code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: jupyter
# language: python
# name: jupyter
# ---
# # word2vec
# **Importing libraries**
from gensim.models import KeyedVectors
from sklearn.metrics.pairwise import cosine_similarity
from numpy import dot
from numpy.linalg import norm
import gensim.downloader as api
model = api.load('word2vec-google-news-300')
# **Loading model**
# +
#model = KeyedVectors.load_word2vec_format('/home/juanluis/projects/embeddings/GoogleNews-vectors-negative300.bin',binary=True)
# -
model
# **queen=king-man+woman**
model.most_similar(positive=['woman', 'king'], negative=['man'])
# **princess=prince-man+woman**
model.most_similar(positive=['woman', 'prince'], negative=['man'])
# Calculating **queen=king-man+woman**
queen_vector = model['queen']
king_vector= model['king']
man_vector = model['man']
woman_vector = model['woman']
result_vector = king_vector-man_vector+woman_vector
cosine_similarity([result_vector],[queen_vector])
# **Example (Conference)**
puebla_vector = model['Puebla']
guadalajara_vector = model['Guadalajara']
bicycle_vector = model['bicycle']
cosine_similarity([puebla_vector],[guadalajara_vector])
cosine_similarity([puebla_vector],[bicycle_vector])
| 2_Word_Embedding/word2vec(queen=king-man+woman).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ***Building DCNN for Facial Emotion Recognition (FER)***
import math
import numpy as np
import pandas as pd
import scikitplot
import seaborn as sns
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
import tensorflow as tf
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dropout, BatchNormalization, LeakyReLU, Activation
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
# leitura dos dados do FER2013
df = pd.read_csv('../input/facial-expression-recognitionferchallenge/fer2013/fer2013/fer2013.csv')
print(df.shape)
df.head()
df
df.shape
img_array = df.pixels.apply(lambda x: np.array(x.split(' ')).reshape(48, 48, 1).astype('float32'))
img_array = np.stack(img_array, axis=0)
img_array.shape
df.emotion
le = LabelEncoder()
img_labels = le.fit_transform(df.emotion)
img_labels = np_utils.to_categorical(img_labels)
img_labels.shape
img_labels[1]
# +
X_train, X_valid, y_train, y_valid = train_test_split(
img_array, img_labels,
shuffle=True, stratify=img_labels,
test_size=0.1, random_state=42)
X_train.shape, X_valid.shape, y_train.shape, y_valid.shape
# -
img_width = X_train.shape[1]
img_height = X_train.shape[2]
img_depth = X_train.shape[3]
num_classes = y_train.shape[1]
# Normalizing results, as neural networks are very sensitive to unnormalized data.
X_train = X_train / 255.
X_valid = X_valid / 255.
# +
# Inicializing the cnn
cnn = Sequential()
# First Layer
cnn.add(
Conv2D(
filters = 64,
kernel_size=(5,5),
input_shape=(img_width, img_height, img_depth),
activation = 'elu',
name = 'Conv2D_1'
)
)
cnn.add(BatchNormalization(name='batchnorm_1'))
# Second Layer
cnn.add(
Conv2D(
filters=64,
kernel_size=(5,5),
activation='elu',
name='conv2d_2'
)
)
cnn.add(BatchNormalization(name='batchnorm_2'))
cnn.add(MaxPooling2D(pool_size=(2,2), name='maxpool2d_1'))
cnn.add(Dropout(0.4, name='dropout_1'))
# Third Layer
cnn.add(
Conv2D(
filters=128,
kernel_size=(3,3),
activation='elu',
name='conv2d_3'
)
)
cnn.add(BatchNormalization(name='batchnorm_3'))
# Fouth Layer
cnn.add(
Conv2D(
filters=128,
kernel_size=(3,3),
activation='elu',
name='conv2d_4'
)
)
cnn.add(BatchNormalization(name='batchnorm_4'))
cnn.add(MaxPooling2D(pool_size=(2,2), name='maxpool2d_2'))
cnn.add(Dropout(0.4, name='dropout_2'))
# flattening
cnn.add(Flatten(name='flatten'))
# densing
cnn.add(
Dense(
128,
activation='elu',
kernel_initializer='he_normal',
name='dense_1'
)
)
cnn.add(BatchNormalization(name='batchnorm_7'))
cnn.add(Dropout(0.6, name='dropout_4'))
cnn.add(
Dense(
num_classes,
activation='softmax',
name='out_layer'
)
)
cnn.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# +
train_datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.15,
height_shift_range=0.15,
shear_range=0.15,
zoom_range=0.15,
horizontal_flip=True,
)
train_datagen.fit(X_train)
# -
cnn.summary()
batch_size = 32 #batch size of 32 performs the best.
epochs = 70
history = cnn.fit_generator(
train_datagen.flow(X_train, y_train, batch_size=batch_size),
validation_data=(X_valid, y_valid),
steps_per_epoch=len(X_train) / batch_size,
epochs=epochs,
use_multiprocessing=True
)
cnn.save("MY_CNN_1.h5")
| building-cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
from sympy.mpmath import nsum, inf, mp
from sympy import *
init_printing()
# %matplotlib inline
# The **Leibniz formula** states that:
#
# $$
# 1 - \frac{1}{3} + \frac{1}{5} - \frac{1}{7} + \frac{1}{9} - \ldots = \frac{\pi}{4}
# $$
#
# So in other words:
#
# $$
# \sum^{\infty}_{n = 0}\frac{(-1)^n}{2n + 1} = \frac{\pi}{4}
# $$
#
# Let's see if we can understand the sum formula first. What we'll do is just take the top part of the fraction $(-1)^n$ and the bottom part $2n + 1$ and plot them seperately for some values of $n$:
with plt.xkcd():
x = np.arange(0, 10, 1)
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
for ax in axes: pu.setup_axes(ax)
axes[0].plot(x, (-1)**x, 'bo', zorder=10)
axes[0].set_xlim(0, 9)
axes[0].set_ylim(-5, 5)
axes[1].plot(x, (2*x) + 1)
axes[1].set_xlim(0, 9)
axes[1].set_ylim(0, 10)
# So as $n$ gets bigger and bigger we have two things. One flips between $1$ and $-1$ and the other is just a linear value $y = 2n + 1$ that just keeps on getting bigger and bigger. Now the equation above tells us that if we take an near infinite sum of these values we will get closer and closer to the value of $\frac{\pi}{4}$ so let's see if that's true.
#
# Below are two lines, one line represents $y = \frac{(-1)^n}{2n + 1}$ and the other line is the sum of all the values of that equation for $y$ at $n = 0, 1, 2, \ldots, n$. You can see that it (slowly) converges to some value, namely the value $\frac{4}{\pi}$.
n = np.arange(0, 10, 1)
f = lambda x: ((-1)**x) / (2*x + 1)
with plt.xkcd():
fig, axes = plt.subplots(1, figsize=(8, 8))
pu.setup_axes(axes, xlim=(-1, 9), ylim=(-0.5, 1.2), yticks=[1], yticklabels=[1], xticks=[1,2,3,4,5,6,7,8])
plt.plot(n, f(n), zorder=10, label='THE INFINITE SERIES')
plt.plot(n, [nsum(f, [0, n]) for n in n], label='SUMMATION OF THE SERIES')
plt.annotate('THE LEIBNIZ FORMULA FOR PI', (1, 1))
axes.set_aspect(4.0)
axes.legend(loc=4)
# Now if we sum up all the terms of that line above for $x = 0, 1, 2, 3, \ldots, n$ we'll get closer and closer to $\frac{4}{\pi}$. Using `mpmath` we can calculate $\pi$ with pretty good detail using the `mp.dps` setting to control the precision.
leibniz = lambda n: ((-1)**n) / (2 * n + 1)
mp.dps = 50
nsum(leibniz, [0, inf]) * 4
# Of course we can compute it symbolically as well. These fractions get pretty crazy real quickly.
leibniz = S('((-1)^n)/(2*n+1)')
n = S('n')
sum([leibniz.subs(n, i) for i in range(100)])
| leibniz_formula.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/martin-fabbri/colab-notebooks/blob/master/gaussian-processes/01-fitting-gaussian-processes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="uFt47sOZ49B4" colab_type="code" outputId="80beb6c4-3af9-4040-cb77-2786fd96574f" colab={"base_uri": "https://localhost:8080/", "height": 317}
# !nvidia-smi
# + id="jjZsFXR8BWfM" colab_type="code" outputId="cf57123b-bffd-4949-b89b-a5429b775e44" colab={"base_uri": "https://localhost:8080/", "height": 140}
# !git clone https://github.com/martin-fabbri/colab-notebooks
# + id="-Hi0gIkKBbH4" colab_type="code" colab={}
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
import theano.tensor as tt
# %matplotlib inline
# + [markdown] id="spTtx-t-CoHr" colab_type="text"
# ### Sampling from a Gaussian Process
#
# To make this notion of a "distribution over functions" more concrete, let's quickly desmostrate how we obtain realizations from a Gaussian process, which result in an evaluation of a function over a set of points.
# + id="9R2SMC5tQoUm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="73f7897c-0f9c-42f9-d9d8-fa28d7963b33"
a = (10,4)
b = (2,3)
print(np.subtract.outer(a,b))
X = np.arange(3)
Y = X + 0.5
print(Y)
Z = np.subtract.outer(X, Y)
print(Z)
# + id="PIYKwGoXZ_8R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="b9916dc2-828a-47f0-a595-a695b71fbbf6"
x = np.array([[[0], [1], [2]]])
print(x.shape)
print(np.squeeze(x).shape)
print(np.squeeze(x))
# + id="GaPMvvQnCn6w" colab_type="code" colab={}
def exponential_cov(x, y, params):
return params[0] * np.exp(-0.5 * params[1] * np.subtract.outer(x, y)**2)
# + id="FVP5iTsARjo_" colab_type="code" colab={}
def conditional(x_new, x, y, params):
B = exponential_cov(x_new, x, params)
C = exponential_cov(x, x, params)
A = exponential_cov(x_new, x_new, params)
mu = np.linalg.inv(C).dot(B.T).T.dot(y)
sigma = A - B.dot(np.linalg.inv(C).dot(B.T))
return(mu.squeeze(), sigma.squeeze())
# + id="L0750yqtb9ce" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 304} outputId="54673ace-1186-48c8-d2bb-89279ce4d49d"
θ = [1, 10]
σ_0 = exponential_cov(0, 0, θ)
print('σ_0', σ_0)
xpts = np.arange(-3, 3, step=0.01)
plt.errorbar(xpts, np.zeros(len(xpts)), yerr=σ_0, capsize=0)
# + id="MydGc5r-gKql" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b6a8437e-ebf7-47ca-ae27-c078e920ba5b"
x = [1.]
y = [np.random.normal(scale=σ_0)]
y
# + id="MyrZ3JhNgpfV" colab_type="code" colab={}
σ_1 = exponential_cov(x, x, θ)
def predict(x, data, kernel, params, sigma, t):
k = [kernel(x, y, params) for y in data]
Sinv = np.linalg.inv(sigma)
y_pred = np.dot(k, Sinv).dot(t)
sigma_new = kernel(x, x, params) - np.dot(k, Sinv).dot(k)
return y_pred, sigma_new
x_pred = np.linspace(-3, 3, 1000)
predictions = [predict(i, x, exponential_cov, θ, σ_1, y) for i in x_pred]
# + id="kH1oh6KMgs8d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="7ea40f0b-afd6-4ac2-fa8b-d976f96a15b7"
y_pred, sigmas = np.transpose(predictions)
plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0)
plt.plot(x, y, "ro")
# + id="qXY9L4nnhC22" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="e893c1b1-9d42-44e1-d54b-9b43105613dd"
m, s = conditional([-0.7], x, y, θ)
y2 = np.random.normal(m, s)
y2
# + id="JLru9Y9WhIkB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="2a40757d-1682-45e4-e692-df9fc6db183e"
x.append(-0.7)
y.append(y2)
σ_2 = exponential_cov(x, x, θ)
predictions = [predict(i, x, exponential_cov, θ, σ_2, y) for i in x_pred]
# + id="SA1vaJCahKzV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 622} outputId="5f2ffa15-5dc4-4c16-f98c-eff395ae9ddc"
y_pred, sigmas = np.transpose(predictions)
plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0)
plt.plot(x, y, "ro")
# + id="l5fEdQMhhVCO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 388} outputId="8e154dc5-2eb3-4717-9611-f464d24dbd51"
x_more = [-2.1, -1.5, 0.3, 1.8, 2.5]
mu, s = conditional(x_more, x, y, θ)
y_more = np.random.multivariate_normal(mu, s)
y_more
# + id="Td42OO94hajE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="493776cf-c481-4cc6-a887-131d1884fdad"
x += x_more
y += y_more.tolist()
σ_new = exponential_cov(x, x, θ)
predictions = [predict(i, x, exponential_cov, θ, σ_new, y) for i in x_pred]
y_pred, sigmas = np.transpose(predictions)
plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0)
plt.plot(x, y, "ro")
# + id="TWsN0xngiZzB" colab_type="code" colab={}
with pm.Model() as gp_fit:
ρ = pm.Gamma('ρ', 1, 1)
η = pm.Gamma('η', 1, 1)
K = η * pm.gp.cov.Matern32(1, ρ)
# + id="5NEHKuyDiZl5" colab_type="code" colab={}
with gp_fit:
M = pm.gp.mean.Zero()
σ = pm.HalfCauchy('σ', 2.5)
# + id="ogFhmpN7v7SW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 223} outputId="552bf256-8013-460f-ee2d-bb61712030d8"
with gp_fit:
y_obs = pm.gp('y_obs', mean_func=M, cov_func=K, sigma=σ, observed={'X':X, 'Y':y})
| gaussian-processes/01-fitting-gaussian-processes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .env
# language: python
# name: .env
# ---
# # Predict Script
#
# Scaling:
# - max and mival are saved in model_params['scaling_dict']
# - this dict is used in encode_data and predict funcs
# +
from __future__ import print_function
# %load_ext autoreload
# %autoreload 2
import tensorflow as tf
import pandas as pd
import numpy as np
import scipy, pickle
import matplotlib.pyplot as plt
import seaborn as sns
import random, sys, os, json
from models import BiRNN_new, mlp, logreg
from data_util import get_data_set, one_hot_dataset, scale, unscale, int_dataset
from palettes import godsnot_64, zeileis_26
from bidirectional_lstm import predict
from data_util import Delta_t95, RMSE, Delta_tr95
from predict_util import get_color, plot_att
sns.set(rc={"axes.facecolor":"#e6e6e6",
"axes.grid":True,
})
model_dir = './out/Tests_200206_ward_min2_PTtest_2/'
file_to_predict = 'data_final/Tests/4/2_test.pkl'
datasetname='4_batch'
with open(model_dir+'model_params.json') as f:
model_params = json.load(f)
print(model_params)
if file_to_predict != None:
model_params['test_file'] = file_to_predict
model = BiRNN_new
print('Model: ', model)
figure_dir = model_params['model_dir'] + '/figures/'
lab_name = model_params['lab_name']
timesteps = model_params['timesteps']
with open('data_final/enc.pickle', 'rb') as handle:
label_encoder = pickle.load(handle)
c_dict = {}
for c,i in enumerate(label_encoder.classes_):
c_dict[i]=godsnot_64[c]
# -
model_params
# +
from predict_util import encode_data, get_tf_dataset
test_data = pd.read_pickle(model_params['test_file'])
print('Using %s' % (model_params['test_file']))
test_sequences = test_data['Modified_sequence'].values
data = test_data
org_columns = data.columns
replaced_charge = False
try:
data[model_params['lab_name']]
except:
data[model_params['lab_name']]=np.zeros(len(data))
replaced_charge = True
one_dat, lab, meta_data, test_size = encode_data(data, model_params)
#build iterator on testdata
dataset_test = get_tf_dataset(one_dat, lab, meta_data, data, model_params)
iter_test = dataset_test.make_initializable_iterator()
next_element_test = iter_test.get_next()
# +
#build graph
#tf.reset_default_graph()
if model_params['simple']:
X = tf.placeholder("float", [None, model_params['num_input']])
else:
X = tf.placeholder("float", [None, model_params['timesteps']])
if model_params['num_classes'] == 1:
Y = tf.placeholder("float", [None, 1])
else:
Y = tf.placeholder("int64", [None, 1])
if model_params['num_tasks'] != -1:
T = tf.placeholder("int32", [None])
else:
T=None
C = tf.placeholder("float", [None, meta_data.shape[1]])
L = tf.placeholder("int32", [None])
dropout = tf.placeholder("float", ())
if model_params['num_tasks'] == -1:
prediction, logits, weights, biases, attention, cert = model(X, C, L, model_params['num_layers'], model_params['num_hidden'], meta_data,
model_params['num_classes'],
model_params['timesteps'], keep_prob=dropout,
uncertainty=model_params['use_uncertainty'], is_train=True)
else:
prediction, logits, weights, biases, attention, cert = model(X, C, L, model_params['num_tasks'], model_params['num_layers'], model_params['num_hidden'], meta_data,
model_params['num_classes'],
model_params['timesteps'], keep_prob=dropout,
uncertainty=model_params['use_uncertainty'], is_train=True)
if model_params['num_classes'] == 1:
if model_params['num_tasks'] == -1:
loss_op = tf.losses.mean_squared_error(predictions=prediction, labels=Y)
else: #multitask regression.
pp = tf.reshape(tf.stack(prediction, axis=1), [-1, model_params['num_tasks']])
ppp = tf.reshape(tf.reduce_sum(pp * tf.one_hot(T, model_params['num_tasks']), axis=1), [-1, 1])
loss_op = tf.losses.mean_squared_error(predictions=ppp, labels=Y)
else:
loss_op = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(Y,[-1]), logits=prediction)
loss_op = tf.reduce_mean(loss_op)
prediction = tf.nn.softmax(prediction)
# Initialize the variables (i.e. assign their default value)
saver = loader = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=10)
#init model
init = [tf.global_variables_initializer(), iter_test.initializer]
# Start training
sess = tf.Session()
# -
#predictions
sess.run(init)
model_file = tf.train.latest_checkpoint(model_params['model_dir'])
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1+5:])
print("Restoring model weights from " + model_file)
saver.restore(sess, model_file)
else:
print('no model found!')
n_preds = 1
for i in range(n_preds):
label, preds, last, seq, charge, loss, att, unc, task = predict(sess, X, Y, C, L, T, test_size, model_params, next_element_test, loss_op, prediction, logits, attention, meta_data, dropout, cert, dropout_rate = model_params['dropout_keep_prob'])
#label, preds, last, seq, charge, loss, att, unc, task = predict(sess, X, Y, C, L, T, test_size, model_params, next_element_test, loss_op, prediction, logits, attention, meta_data, dropout, cert, dropout_rate = 1.0)
if model_params['num_classes'] != 1:
preds = np.argmax(preds,axis=1).reshape(-1,1)
data['label Prediction ' + str(i)] = preds[:,0]
sess.run(iter_test.initializer)
#data['attention'] = att.tolist()
#data['last'] = last.tolist()
#data[lab_name+' Prediction']= np.mean(data[['label Prediction ' + str(i) for i in range(n_preds)]], axis=1)
#data.to_hdf(figure_dir+'data.h5', key='data')
#data.to_csv(figure_dir+'data.csv')
# +
df = data
import matplotlib as mpl
mpl.rcParams.update(mpl.rcParamsDefault)
inline_rc = dict(mpl.rcParams)
sns.set(rc={"axes.facecolor":"#ffffff",
"axes.grid":False,
})
sns.set_style('ticks')
sns.despine()
#df = pd.read_hdf(figure_dir+'data.h5')
if replaced_charge:
data['Charge'] = data['Charge Prediction']
# -
print(data[['Modified_sequence',model_params['lab_name'],model_params['lab_name']+' Prediction 0']].head())
set(data['Charge'].values)
if model_params['lab_name'] != 'Charge':
data['label'] = data['CCS']
data.head()
data[['Modified_sequence','Charge','label Prediction 0']].to_csv(figure_dir + 'prediction_'+model_params['lab_name']+'_'+datasetname+'.csv')
print('saved to', figure_dir + 'prediction_'+model_params['lab_name']+'_'+datasetname+'.csv')
# # Plots
# +
df['rel'] = (df[lab_name] / df[lab_name+' Prediction'] ) * 100 - 100
df['abs'] = np.abs(df[lab_name] - df[lab_name+' Prediction'])
rel = df['rel'].values
rel_abs = np.abs(df['rel'].values)
abs = df['abs'].values
print(np.median(rel_abs))
ax = sns.distplot(rel, norm_hist=False, kde = False, bins=50) #, bins=200
ax.set(xlabel='deviation (%)', ylabel='Counts')
ax.set(xlim = [-10, 10])
sns.despine()
plt.title('Peptide CCS Prediction Deviation')
plt.savefig(figure_dir + '/rel_error.svg', dpi=300)
plt.show()
ppred = df[lab_name+' Prediction']
llabel = df[lab_name]
# +
ax = sns.regplot(x=df[lab_name], y=df[lab_name+' Prediction'],scatter_kws={'s':0.02})#, scatter_kws={'color' : ccs}
pearson = scipy.stats.pearsonr(df[lab_name+' Prediction'], df[lab_name])
print('Pearson', pearson[0])
ax.set(xlabel='observed CCS', ylabel='predicted CCS')
plt.text(700,450,'Pearson: {:.4f}'.format(pearson[0]))
plt.title('Pearson Correlation')
sns.despine()
plt.savefig(figure_dir + '/pearson.png', dpi=300)
plt.show()
# -
df['maxval']
| evaluate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import h5py
import scipy.io
np.random.seed(1337) # for reproducibility
import keras
import tensorflow as tf
from keras.preprocessing import sequence
from keras.optimizers import RMSprop
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.regularizers import l2
from keras.constraints import maxnorm
from keras.layers.recurrent import LSTM, GRU
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Bidirectional
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# +
print 'loading data'
trainmat = h5py.File('deepsea_data/train.mat')
validmat = scipy.io.loadmat('deepsea_data/valid.mat')
testmat = scipy.io.loadmat('deepsea_data/test.mat')
X_train = np.transpose(np.array(trainmat['trainxdata']),axes=(2,0,1))
y_train = np.array(trainmat['traindata']).T
# +
forward_lstm = LSTM(units=320, return_sequences=True)
# backward_lstm = LSTM(input_dim=320, output_dim=320, return_sequences=True)
brnn = Bidirectional(forward_lstm)
print 'building model'
model = Sequential()
model.add(Convolution1D(activation="relu",
input_shape=(1000, 4),
padding="valid", strides=1,
filters=320, kernel_size=26))
model.add(MaxPooling1D(strides=13, pool_size=13))
model.add(Dropout(0.2))
model.add(brnn)
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(input_dim=75*640, units=925))
model.add(Activation('relu'))
model.add(Dense(input_dim=925, units=919))
model.add(Activation('sigmoid'))
print 'compiling model'
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['cosine'])
print 'running at most 60 epochs'
checkpointer = ModelCheckpoint(filepath="DanQ_bestmodel.hdf5", verbose=1, save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
model.fit(X_train, y_train, batch_size=2048, epochs=60,
shuffle=True,
validation_data=(np.transpose(validmat['validxdata'],axes=(0,2,1)), validmat['validdata']),
callbacks=[checkpointer,earlystopper])
# +
tresults = model.evaluate(np.transpose(testmat['testxdata'],axes=(0,2,1)),
testmat['testdata'])
print tresults
# -
preds = model.predict(np.transpose(testmat['testxdata'],axes=(0,2,1)))
preds[preds>=0.5] = 1
preds[preds<0.5] = 0
preds_flat = preds.flatten()
test_flat = testmat['testdata'].flatten()
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
print(accuracy_score(test_flat, preds_flat))
print(confusion_matrix(test_flat, preds_flat))
| 2.CNN_RNN_sequence_analysis/DNA_sequence_function_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # A toy example
# + [markdown] slideshow={"slide_type": "slide"}
# **Purpose of this demo**: Motivate that abstract notions, such as sparse projection, are useful in practice.
#
# # + Disclaimer: I'm not expert in Python - I use Python/Matlab as tools to validate algorithms and theorems.
# # + Thus, my implementations are not the most efficient ones + there might be bugs
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem definition: Linear regression**.
#
# \begin{align}
# y = A x^\star + w
# \end{align}
#
# # + $A \in \mathbb{R}^{n \times p}$
# # + $x^\star \in \mathbb{R}^p$
# # + $w \in \mathbb{R}^n$
#
# Assume $n = p$, and $A$ is in general position.
# Given $y$ and $A$:
#
# \begin{equation*}
# \begin{aligned}
# & \underset{x \in \mathbb{R}^p}{\text{min}}
# & & f(x) \triangleq \|y - A x\|_2^2
# \end{aligned}
# \end{equation*}
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import random
from scipy import stats
from scipy.optimize import fmin
from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
from PIL import Image
import random
from numpy import linalg as la
p = 100 # Ambient dimension
n = 100 # Number of samples
# Generate a p-dimensional zero vector
x_star = np.random.randn(p)
# Normalize
x_star = (1 / la.norm(x_star, 2)) * x_star
# + slideshow={"slide_type": "slide"}
# Plot
xs = range(p)
markerline, stemlines, baseline = plt.stem(xs, x_star, '-.')
plt.setp(markerline, 'alpha', 0.3, 'ms', 6)
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'color', 'r', 'linewidth', 1, 'alpha', 0.3)
plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
plt.xlabel('Index')
plt.ylabel('Amplitude')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# <center> How would you solve this problem? </center>
# + [markdown] slideshow={"slide_type": "slide"}
# <center>Closed form solution using matrix inverse</center>
#
# \begin{align}
# \widehat{x} = A^{-1} y
# \end{align}
# + slideshow={"slide_type": "slide"}
A = np.random.randn(n, p)
y = A.dot(x_star)
A_inv = la.inv(A)
widehat_x = A_inv.dot(y)
# Plot
xs = range(p)
markerline, stemlines, baseline = plt.stem(xs, widehat_x, '-.')
plt.setp(markerline, 'alpha', 0.3, 'ms', 6)
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'color', 'r', 'linewidth', 1, 'alpha', 0.3)
plt.xlabel('Index')
plt.ylabel('Amplitude')
plt.show()
print('\|x^\star - x\|_2 = {0}'.format(la.norm(x_star - widehat_x)))
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem definition: Sparse linear regression**.
#
# \begin{align}
# y = A x^\star + w
# \end{align}
#
# # + $A \in \mathbb{R}^{n \times p}$, but now $n \ll p$
# # + $x^\star \in \mathbb{R}^p$ but $k$-sparse, where $k \ll p$
# # + $w \in \mathbb{R}^n$
#
# Assume $n = p$, and $A$ is in general position.
# Given $y$ and $A$:
#
# \begin{equation*}
# \begin{aligned}
# & \underset{x \in \mathbb{R}^p}{\text{min}}
# & & f(x) \triangleq \|y - A x\|_2^2
# \end{aligned}
# \end{equation*}
#
# <center> Would a similar technique solve the problem? </center>
# + slideshow={"slide_type": "slide"}
p = 100 # Ambient dimension
n = 40 # Number of samples
k = 5 # Sparsity level
# Generate a p-dimensional zero vector
x_star = np.zeros(p)
# Randomly sample k indices in the range [1:p]
x_star_ind = random.sample(range(p), k)
# Set x_star_ind with k random elements from Gaussian distribution
x_star[x_star_ind] = np.random.randn(k)
# Normalize
x_star = (1 / la.norm(x_star, 2)) * x_star
# Plot
xs = range(p)
markerline, stemlines, baseline = plt.stem(xs, x_star, '-.')
plt.setp(markerline, 'alpha', 0.3, 'ms', 6)
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'color', 'r', 'linewidth', 1, 'alpha', 0.3)
plt.xlabel('Index')
plt.ylabel('Amplitude')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# We will use the pseudo-inverse of $A$:
#
# \begin{align}
# A^\dagger = A^\top (AA^\top)^{-1}
# \end{align}
# + slideshow={"slide_type": "slide"}
A = np.random.randn(n, p)
y = A.dot(x_star)
A_inv = la.pinv(A)
widehat_x = A_inv.dot(y)
# Plot
xs = range(p)
markerline, stemlines, baseline = plt.stem(xs, widehat_x, '-.')
plt.setp(markerline, 'alpha', 0.3, 'ms', 6)
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'color', 'r', 'linewidth', 1, 'alpha', 0.3)
plt.xlabel('Index')
plt.ylabel('Amplitude')
plt.show()
la.norm(x_star - widehat_x)
# + [markdown] slideshow={"slide_type": "slide"}
# # + The reconstruction of $x^\star$ from $y$ is an ill-posed problem since $n < p$ and there is no hope in finding the *true vector* without ambiguity.
# # + Additional prior information is needed.
# # + We might want to use the fact that $\|x\|_0 \leq k$ where $k \ll p$ and $\|\cdot\|_0$ is the $\ell_0$-"norm".
# # + It turns out that, under proper assumptions on the sensing matrix $A$ and the sparsity level $k$, one can still recover $x^\star$!
# + [markdown] slideshow={"slide_type": "slide"}
# ### Why sparsity?
#
# Let us consider the following practical case: image processing.
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import random
from scipy import stats
from scipy.optimize import fmin
from PIL import Image
# Open image using Image package
x_mush_orig = Image.open("./SupportFiles/mushroom.png").convert("L")
# Transform to a np array
x_mush_star = np.fromstring(x_mush_orig.tobytes(), np.uint8)
# Set the shape of np array
x_mush_star.shape = (x_mush_orig.size[1], x_mush_orig.size[0])
# Show the image
plt.imshow(x_mush_star, interpolation = "nearest", cmap = plt.cm.gray)
# + [markdown] slideshow={"slide_type": "skip"}
# Obviously, this is a simple image case: the "mushroom" image is sparse by itself (do you see the black pixels? Yes, they are zeros). To see this more clearly, let's sort the true coeffients in decreasing order.
# + slideshow={"slide_type": "slide"}
from bokeh.plotting import figure, show, output_file
from bokeh.palettes import brewer
# Get the absolute value of a flatten array (vectorize)
x_mush_abs = abs(x_mush_star.flatten())
# Sort the absolute values (ascending order)
x_mush_abs.sort()
# Descending order
x_mush_abs_sort = np.array(x_mush_abs[::-1])
plt.style.use('bmh')
fig, ax = plt.subplots()
# Generate an array with elements 1:len(...)
xs = np.arange(len(x_mush_abs_sort))
# Fill plot - alpha is transparency (might take some time to plot)
ax.fill_between(xs, 0, x_mush_abs_sort, alpha = 0.2)
# Plot - alpha is transparency (might take some time to plot)
ax.plot(xs, x_mush_abs_sort, alpha = 0.8)
plt.show()
# + [markdown] slideshow={"slide_type": "skip"}
# For this 64 x 64 image, the total number of pixels sums up to 4096. As you can observe, by default almost half of the pixels are zero, which constitutes "mushroom" image sparse (but still the sparsity level is quite high: more than half the ambient dimension).
#
# Since this seems to be a "cooked"-up example, let us consider a more *realistic* scenario: a brick house. (*Does anyone know where is this house?*)
# -
x_house_orig = Image.open("./SupportFiles/house128.png").convert("L")
x_house_star = np.fromstring(x_house_orig.tobytes(), np.uint8)
x_house_star.shape = (x_house_orig.size[1], x_house_orig.size[0])
plt.imshow(x_house_star, interpolation = "nearest", cmap = plt.cm.gray)
# + [markdown] slideshow={"slide_type": "skip"}
# ...and here is the bar plot of the coefficients.
# +
x_house_abs = abs(x_house_star.flatten())
x_house_abs.sort()
x_house_abs_sort = np.array(x_house_abs[::-1])
plt.style.use('bmh')
fig, ax = plt.subplots()
xs = np.arange(len(x_house_abs_sort))
ax.fill_between(xs, 0, x_house_abs_sort, alpha = 0.2)
plt.plot(xs, x_house_abs_sort, alpha=0.8)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # + All the coefficients are non-zero! Is there anything we can do in this case?
#
# # + However: under proper orthonormal transformations, natural images become sparse.
# + slideshow={"slide_type": "slide"}
import pywt
x_house_orig = Image.open("./SupportFiles/house.png").convert("L")
x_house_star = np.fromstring(x_house_orig.tobytes(), np.uint8)
x_house_star.shape = (x_house_orig.size[1], x_house_orig.size[0])
# Defines a wavelet object - 'db1' defines a Daubechies wavelet
wavelet = pywt.Wavelet('db1')
# Multilevel decomposition of the input matrix
coeffs = pywt.wavedec2(x_house_star, wavelet, level=2)
cA2, (cH2, cV2, cD2), (cH1, cV1, cD1) = coeffs
# Concatenate the level-2 submatrices into a big one and plot
x_house_star_wav = np.bmat([[cA2, cH2], [cV2, cD2]])
plt.imshow(np.flipud(x_house_star_wav), origin='image', interpolation="nearest", cmap=plt.cm.gray)
plt.title("Wavelet representation of image", fontsize=10)
plt.tight_layout()
# + [markdown] slideshow={"slide_type": "skip"}
# After wavelet transformation, let's plot the wavelet coefficients.
# + slideshow={"slide_type": "slide"}
# Flatten and show the histogram
x_house_abs_wav = abs(x_house_star_wav.flatten())
x_house_abs_wav.sort()
x_house_abs_wav.flatten()
x_house_abs_wav_sort = np.array(x_house_abs_wav[::-1])
plt.style.use('bmh')
fig, ax = plt.subplots()
xs = np.arange(len(x_house_abs_wav_sort.flatten()))
ax.fill_between(xs, 0, np.flipud(x_house_abs_wav_sort.flatten()), alpha = 0.2)
plt.plot(xs, np.flipud(x_house_abs_wav_sort.transpose()), alpha = 0.8)
plt.show()
# + [markdown] slideshow={"slide_type": "skip"}
# It is obvious that much less number of coefficients are non-zero! (...and this holds generally for naturally images.)
# + slideshow={"slide_type": "fragment"}
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
for c, z, zi in zip(['r', 'g', 'b', 'y'], ['./SupportFiles/house128.png', './SupportFiles/peppers128.png', './SupportFiles/man128.png', './SupportFiles/pedro128.png'], [4, 3, 2, 1]):
y = Image.open(z).convert("L")
y_star = np.fromstring(y.tobytes(), np.uint8)
y_star.shape = (y.size[1], y.size[0])
# Multilevel decomposition of the input matrix
y_coeffs = pywt.wavedec2(y_star, wavelet, level=2)
y_cA2, (y_cH2, y_cV2, y_cD2), (y_cH1, y_cV1, y_cD1) = y_coeffs
# Concatenate the level-2 submatrices into a big one and plot
y_star_wav = np.bmat([[y_cA2, y_cH2], [y_cV2, y_cD2]])
y_abs_wav = abs(y_star_wav.flatten())
y_abs_wav.sort()
y_abs_wav.flatten()
y_abs_wav_sort = np.array(y_abs_wav[::-1])
xs = np.arange(len(y_abs_wav_sort.flatten()))
cs = c
ys = [zi] * len(xs)
ys = np.array(ys)
ax.plot(xs, ys = ys.flatten(), zs = np.flipud(y_abs_wav_sort.flatten()), zdir = 'z', color = cs, alpha = 0.5)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show
# + [markdown] slideshow={"slide_type": "skip"}
# In the above picture, the y values (1.0 to 4.0) correspond to four different image cases (for chanity check, observe that the red curve is the same curve for the house.png case, presented above).
#
# One can observe that most of the coeffs are close to zero and only few of them (compared to the ambient dimension) are significantly large. This has led to the observation that keeping only the most important coefficients (even truncating the non-zero entries further) leads to a significant compression of the image. At the same time, only these coefficients can lead to a pretty good reconstruction of the original image.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Using sparse projections
# + slideshow={"slide_type": "skip"}
import math
# Generate sensing matrix
A = (1 / math.sqrt(n)) * np.random.randn(n, p)
# Observation model
y = A @ x_star
# + [markdown] slideshow={"slide_type": "slide"}
# **Gradient descent with sparse projections[7-8]**. Solve the criterion
# \begin{align}
# \min_{x} ~ ~f(x) := \frac{1}{2}\|y - Ax\|_2^2 \quad \text{s.t.} \quad \|x\|_0 \leq k
# \end{align}
#
# *The IHT method* <br>
# 1: Choose initial guess $x_0$ <br>
# 2: <b>for</b> i = 0, 1, 2, ... <b>do</b> <br>
# 3: Compuete $\nabla f(x_i) = -A^\top \cdot (y - A x_i)$ <br>
# 4: $\widehat{x}_{i+1} = x_i - \nabla f(x_i)$ <br>
# 5: $x_{i+1} = \arg \min_{x \text{ is } k-\text{sparse}} \|\widehat{x}_{i+1} - x\|_2$ <br>
# 5: <b>end for</b>
#
# Let's use this algorithm and see how it performs in practice.
#
# + slideshow={"slide_type": "slide"}
from numpy import linalg as la
# Hard thresholding function
def hardThreshold(x, k):
p = x.shape[0]
t = np.sort(np.abs(x))[::-1]
threshold = t[k-1]
j = (np.abs(x) < threshold)
x[j] = 0
return x
# Returns the value of the objecive function
def f(y, A, x):
return 0.5 * math.pow(la.norm(y - A @ x, 2), 2)
# + slideshow={"slide_type": "slide"}
def IHT(y, A, k, iters, epsilon, verbose, x_star):
p = A.shape[1] # Length of original signal
n = A.shape[0] # Length of measurement vector
x_new = np.zeros(p) # Initial estimate
At = np.transpose(A) # Transpose of A
x_list, f_list = [1], [f(y, A, x_new)]
for i in range(iters):
x_old = x_new
# Compute gradient
grad = -At @ (y - A @ x_new)
# Perform gradient step
x_temp = x_old - 0.5 * grad
# Perform hard thresholding step
x_new = hardThreshold(x_temp, k)
if (la.norm(x_new - x_old, 2) / la.norm(x_new, 2)) < epsilon:
break
# Keep track of solutions and objective values
x_list.append(la.norm(x_new - x_star, 2))
f_list.append(f(y, A, x_new))
if verbose:
print("iter# = "+ str(i) + ", ||x_new - x_old||_2 = " + str(la.norm(x_new - x_old, 2)))
print("Number of steps:", len(f_list))
return x_new, x_list, f_list
# + slideshow={"slide_type": "slide"}
# Run algorithm
epsilon = 1e-6 # Precision parameter
iters = 100
x_IHT, x_list, f_list = IHT(y, A, k, iters, epsilon, True, x_star)
# Plot
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
xs = range(p)
markerline, stemlines, baseline = plt.stem(xs, x_IHT, '-.x')
plt.setp(markerline, 'alpha', 0.3, 'ms', 6)
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'linewidth', 1, 'alpha', 0.3)
plt.xlabel('Index')
plt.ylabel('Amplitude')
#plt.title(r"$\|x^\star - \widehat{x}\|_2 = %s$" %(la.norm(x_star - x_IHT, 2)), fontsize=16)
# Make room for the ridiculously large title.
plt.subplots_adjust(top=0.8)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# This is great! IHT finds $\mathbf{x}^\star$ fast and 'accurately'. How fast? Let's create a convergence plot.
# + slideshow={"slide_type": "slide"}
# Plot
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
xs = range(len(x_list))
plt.plot(xs, x_list, '-o', color = '#3399FF', linewidth = 4, alpha = 0.7, markerfacecolor = 'b')
plt.yscale('log')
plt.xlabel('Iterations')
plt.ylabel(r"$\|x^\star - \widehat{x}\|_2$")
# Make room for the ridiculously large title.
plt.subplots_adjust(top=0.8)
plt.show()
# -
| schedule/images/Chapter 1b.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import scipy.stats as stats
from vuong_tests import *
# -
#generate the sample
def gen_data():
nobs = 1000
beta = 3
x = np.random.uniform(low=-1., high=1., size=nobs)
e = np.random.uniform(low=-1., high=1., size=nobs) #np.random.normal(loc=0.0, scale=1.0, size=nobs) #
y = 1*(1 + beta * x + e >= 0)
return y,x,nobs
# +
def compute_llr(yn,xn):
model1 = sm.Probit(yn,sm.add_constant(xn))
model1_fit = model1.fit(disp=False)
ll1 = model1.loglikeobs(model1_fit.params)
model2 = sm.Logit(yn,sm.add_constant(xn))
model2_fit = model2.fit(disp=False)
ll2 = model2.loglikeobs(model2_fit.params)
llr = ll1.sum() - ll2.sum()
omega2 = (ll1- ll2).var()
return llr,np.sqrt(omega2)
yn,xn,nobs = gen_data()
print(compute_llr(yn,xn))
# +
def regular_test(yn,xn,nobs,hist=False):
llr, omega = compute_llr(yn,xn)
test_stat = llr/(omega*np.sqrt(nobs))
#plot
if hist:
x = np.linspace(-2.5, 2.5, 100)
plt.plot(x, stats.norm.pdf(x, 0, 1),label="Normal")
return 1*(test_stat >= 1.96) + 2*( test_stat <= -1.96)
def bootstrap_test(yn,xn,nobs,hist=False):
test_stats = []
trials = 100
for i in range(trials):
subn = 1000
np.random.seed()
sample = np.random.choice(np.arange(0,nobs),subn,replace=True)
ys,xs = yn[sample],xn[sample]
llr, omega = compute_llr(ys,xs)
test_stat = llr/(omega*np.sqrt(subn))
test_stats.append(test_stat)
llr, omega = compute_llr(yn,xn)
test_stat = llr/(omega*np.sqrt(nobs))
#plot
if hist:
plt.hist( 2*test_stat - test_stats, density=True,bins=10, label="Bootstrap")
cv_lower = 2*test_stat - np.percentile(test_stats, 97.5, axis=0)
cv_upper = 2*test_stat - np.percentile(test_stats, 2.5, axis=0)
return 2*(0 >= cv_upper) + 1*(0 <= cv_lower)
yn,xn,nobs = gen_data()
print(bootstrap_test(yn,xn,nobs,hist=True))
print(regular_test(yn,xn,nobs,hist=True))
plt.title("Comparison with bootstrap")
plt.xlabel("Test Statistic")
plt.ylabel("Density")
plt.legend()
plt.savefig('figs/bootstrap_compare1')
plt.show()
# +
reg = np.array([0, 0 ,0])
boot = np.array([0, 0 ,0])
omega = 0
llr = 0
total = 1000
for i in range(total):
np.random.seed()
yn,xn,nobs = gen_data()
llrn,omegan = compute_llr(yn,xn)
#update the llr
llr = llr +llrn
omega = omega +omegan
reg_index = regular_test(yn,xn,nobs)
#update test results
boot_index = bootstrap_test(yn,xn,nobs)
reg[reg_index] = reg[reg_index] + 1
boot[boot_index] = boot[boot_index] + 1
print("reg: %s, boot: %s, llr:%s, omega:%s"%(reg/total,boot/total,llr/total,omega/total))
# +
#Dgp1 - reg: [0.737 0.263 0. ], boot: [0.88 0.114 0.006], llr:0.7785740338656467, omega:0.02975939594282737
#dgp2 - reg: [0.756 0.242 0.002], boot: [0.839 0.152 0.009], llr:0.47658608905951694, omega:0.020173926657762808
#dgp3 - [0.003 0.997 0. ], boot: [0.017 0.983 0. ], llr:2.677881131428181, omega:0.015645737393878697
# -
| logit_probit/weakly_overlapping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # NumPy
#
# - This file contains the basics of Numpy.
#
#
# +
# NumPy is a library for scientific computations in Python.
# Numpy is one of the packages you have to know if you're going to do data science with Python.
# It is a Python library that provides support for large, multidimensional arrays along with masked
# arrays and matrices, and provides extensive functions for performing array manipulation, including
# mathematical, logical, and shape calculations, sorting, selecting, I/O, discrete Fourier transforms,
# linear algebra, basic statistical operations, random simulations, and so on.
# -
# Importing the Numpy library
import numpy as np
# Numpy version
np.__version__
# # Function - numpy.array()
# +
# Function - numpy.array() (Note only most frequently parameters are explain below)
# numpy.array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, like=None)
# Parameters :
# object : array_like
# An array, any object exposing the array interface, an object whose __array__ method returns an array,
# or any (nested) sequence. If object is a scalar, a 0-dimensional array containing object is returned.
# Example - list object [1,2,3] or list of tuple [(1,2,3),(4,5,6)]
# dtype : data-type (optional)
# The desired data-type for the array. If not given, then the type will be determined as the minimum
# type required to hold the objects in the sequence.
# Example - dtype - float or str
# -
a = np.array([1,2,3])
b = np.array([(1,2,3),(6,7,8)],dtype = float)
c = np.array([[(1,2,3),(4,5,6)],[(7,8,9),(10,11,12)]], dtype= str)
print(a, " \t" , type(a))
print("\n",b, " \t" , type(b))
print("\n",c, " \t" , type(c))
# # Numpy supports - element-wise operations
# When a*2 is used, it performs element-wise operations rather than duplicating the content as with lists.
print(a*2)
print(b*2)
# # Function required for inspecting the Numpy Array
# - a.shape
# - a.ndim
# - a.size
# - a.dtype
# - a.dtype.name
# - a.astype(float)
# a.shape - Tuple of array dimensions.
## The shape property is usually used to get the current shape of an array
## Example - (3,) - What is the magnitude of each dimension.
c.shape
# a.ndim - Dimension of the array (1d,2d or nth array)
## Example - a.ndim - Array of dimension 1 (1d).
c.ndim
# a.size - Number of elements in the array.
## Equal to np.prod(a.shape), i.e., the product of the array’s dimensions.
## Example c.size - shape of c is (2,2,3) = 2*2*3 = 12
c.size
# a.dtype - Data-type of the array’s elements.
## Example a.dtype - dtype('int32') and b.dtype - dtype('float64')
b.dtype
# a.dtype.name - A bit-width name for this data-type.
## Un-sized flexible data-type objects do not have this attribute.
## Example x = np.dtype(float) x.name is float64
x = np.dtype(float)
y = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
print(x.name,y.name)
# a.astype(float)
## Convert the array's datatype according to the parameter given
## b.astype - When creating the array b, we used float, and then used astype to convert to integer.
b.astype(int)
# # Different methods to Initialize the numpy array
# ## numpy.arange
# numpy.arange([start, ]stop, [step, ]dtype=None, *, like=None)
## Return evenly spaced values within a given interval.
## Values are generated within the half-open interval [start, stop)
## (In other words, the interval including start but excluding stop).
## For integer arguments the function is equivalent to the Python built-in range function,
## but returns an ndarray rather than a list.
print(np.arange(0,10,2,dtype = int))
print(np.arange(0,10,0.5,dtype=float))
# ## numpy.linspace
# numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0)[source]
## Return evenly spaced numbers over a specified interval.
## Returns num evenly spaced samples, calculated over the interval [start, stop].
## The endpoint of the interval can optionally be excluded.
print(np.linspace(2.0, 3.0, num=5))
print(np.linspace(2.0, 3.0, num=5, endpoint=False))
print(np.linspace(2.0, 3.0, num=5, retstep=True))
# ## Difference between the linspace and arange.
# +
# Difference between the linspace and arange.
## arange allow you to define the size of the step. linspace allow you to define the number of steps.
## Example where arange might fail are -
print(" Using arange ",np.arange(0, 5, 0.5, dtype=int))
print(" Using arange ",np.arange(-3, 3, 0.5, dtype=int))
print(" Using Linspace ", np.linspace(0, 5, num = 5))
print(" Using Linspace ",np.linspace(-3, 3,num = 5))
# -
# ## Difference between numpy Array and Lists
# |Numpy Array|List|
# |-----|-------|
# |Numpy data structures consume less memory|List take more memory than numpy array|
# |Numpy are faster |Lists are slower as compared to numpy array|
# |NumPy have optimized functions such as linear algebra operations built in||
# |Element wise operation is possible||
# |Array are by default Homogeneous, which means data inside an array must be of the same Datatype.|A list can store different data types|
# ||A list can consist of different nested data size|
# |We can create a N-dimensional array in python using numpy.array().||
# ||A list is easier to modify|
# |Array can handle mathematical operations|A list cannot directly handle a mathematical operations|
# ## numpy.zeros
# numpy.zeros(shape, dtype=float, order='C', *, like=None)
## Return (ndarray) a new array of given shape and type, filled with zeros.
arr_1d_zeros = np.zeros(5)
arr_2d_zeros = np.zeros((2,5),dtype="int64")
arr_3d_zeros = np.zeros((2,3,4),dtype = int)
print("\n 1D Array\n",arr_1d_zeros)
print("\n 2D Array\n", arr_2d_zeros)
print("\n 3D Array\n",arr_3d_zeros)
# ## numpy.ones
# numpy.ones(shape, dtype=None, order='C', *, like=None)
## Return a new array of given shape and type, filled with ones.
arr_1d_ones = np.ones(5)
arr_2d_ones = np.ones((2,5),dtype="int64")
arr_3d_ones = np.ones((2,3,4),dtype = int)
print("\n 1D Array\n",arr_1d_ones)
print("\n 2D Array\n", arr_2d_ones)
print("\n 3D Array\n",arr_3d_ones)
# ## numpy.full
# numpy.full(shape, fill_value, dtype=None, order='C', *, like=None)
## Return a new array of given shape and type, filled with fill_value.
arr_1d_full = np.full(2, np.inf)
arr_2d_full = np.full((2, 2), 5)
arr_3d_full = np.full((2, 2,2), [1, 2])
print("\n 1D Array\n",arr_1d_full)
print("\n 2D Array\n", arr_2d_full)
print("\n 3D Array\n",arr_3d_full)
# ## numpy.eye
# numpy.eye(N, M=None, k=0, dtype=<class 'float'>, order='C', *, like=None)
## Return a 2-D array with ones on the diagonal and zeros elsewhere
arr_2d_diag0 = np.eye(2, dtype=int)
arr_2d_diag1 = np.eye(3, k=1)
print("\n 2D Array\n", arr_2d_diag0)
print("\n 2D Array\n", arr_2d_diag1)
# ## random.random
# random.random(size=None)
## Return random floats in the half-open interval [0.0, 1.0).
## Alias for random_sample to ease forward-porting to the new random API.
np.random.random()
# # Array Manipulation
#
# ## numpy.transpose
# +
# numpy.transpose(a, axes=None)
# Reverse or permute the axes of an array; returns the modified array.
# Parameters | aarray_like | Input array.
# axes | tuple or list of ints, optional
# If specified, it must be a tuple or list which contains a permutation of [0,1,..,N-1] where N is the
# number of axes of a. The i’th axis of the returned array will correspond to the axis numbered
# axes[i] of the input. If not specified, defaults to range(a.ndim)[::-1], which reverses the order
# of the axes.
x = np.arange(4).reshape((2,2))
print(x)
np.transpose(x)
# -
x = np.ones((1, 2, 3))
print("Original Array\n",x)
print("\n After Transpose \n",np.transpose(x, (1, 0, 2)))
# ## numpy.vstack
# +
# numpy.vstack(tup)
# Stack arrays in sequence vertically (row wise).
# Parameters | tup | sequence of ndarrays
# The arrays must have the same shape along all but the first axis.
# 1-D arrays must have the same length.
# Returns | stacked | ndarray
# The array formed by stacking the given arrays, will be at least 2-D.
top_stack = np.linspace(0, 3, 4).reshape(2,2)
bottom_stack = np.linspace(5, 8, 4).reshape(2,2)
vstack = np.vstack((top_stack,bottom_right))
# -
print("Array \n",top_stack)
print("\nArray \n",bottom_right)
print("\nMerged Array \n",vstack)
# ## numpy.hstack
# +
# numpy.hstack(tup)
# Stack arrays in sequence horizontally (column wise).
# Parameters | tup | sequence of ndarrays
# The arrays must have the same shape along all but the second axis,
#except 1-D arrays which can be any length.
# Returns | stacked | ndarray
# The array formed by stacking the given arrays.
left_stack = np.linspace(0, 3, 4).reshape(2,2)
right_stack = np.linspace(5, 8, 4).reshape(2,2)
hstack = np.hstack((left_stack,right_stack))
# -
print("Array \n",left_stack)
print("\nArray \n",right_stack)
print("\nMerged Array \n",hstack)
# ## numpy.concatenate
# +
# numpy.concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
# Join a sequence of arrays along an existing axis
# Parameters | a1, a2, …sequence of array_like
# The arrays must have the same shape, except in the dimension corresponding to axis
# (the first, by default).
# axis | int, optional
# The axis along which the arrays will be joined. If axis is None, arrays are flattened before use.
# Default is 0.
# Returns | res | ndarray | he concatenated array.
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
np.concatenate((a, b), axis=0)
# -
np.concatenate((a, b.T), axis=1)
np.concatenate((a, b), axis=None)
| Numpy/Numpy Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example Model Servers with Seldon
# ## Setup Seldon Core
#
# Use the setup notebook to [Setup Cluster](seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](seldon_core_setup.ipynb#Ambassador) and [Seldon Core](seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html).
# ## Serve SKlearn Iris Model
# We can deploy a sklearn model uploaded to an object store by using the sklearn model server implementation as the config below:
# %%writefile ../servers/sklearnserver/samples/iris.yaml
apiVersion: machinelearning.seldon.io/v1alpha2
kind: SeldonDeployment
metadata:
name: sklearn
spec:
name: iris
predictors:
- graph:
children: []
implementation: SKLEARN_SERVER
modelUri: gs://seldon-models/sklearn/iris
name: classifier
name: default
replicas: 1
svcOrchSpec:
env:
- name: SELDON_LOG_LEVEL
value: DEBUG
# And then we apply it to deploy it to our kubernetes cluster
# !kubectl apply -f ../servers/sklearnserver/samples/iris.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=sklearn -o jsonpath='{.items[0].metadata.name}')
# Once it's deployed we can send our sklearn model requests
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="sklearn",namespace="seldon")
r = sc.predict(gateway="ambassador",transport="rest",shape=(1,4))
print(r)
assert(r.success==True)
# And delete the model we deployed
# !kubectl delete -f ../servers/sklearnserver/samples/iris.yaml
# ## Serve XGBoost Iris Model
# We can deploy a xgboost model uploaded to an object store by using the xgboost model server implementation as the config below:
# %%writefile ../servers/xgboostserver/samples/iris.yaml
apiVersion: machinelearning.seldon.io/v1alpha2
kind: SeldonDeployment
metadata:
name: xgboost
spec:
name: iris
predictors:
- graph:
children: []
implementation: XGBOOST_SERVER
modelUri: gs://seldon-models/xgboost/iris
name: classifier
name: default
replicas: 1
# And then we apply it to deploy it to our kubernetes cluster
# !kubectl apply -f ../servers/xgboostserver/samples/iris.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=xgboost -o jsonpath='{.items[0].metadata.name}')
# Once it's deployed we can send our xgboost model requests
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="xgboost",namespace="seldon")
r = sc.predict(gateway="ambassador",transport="rest",shape=(1,4))
print(r)
assert(r.success==True)
# And delete the model we deployed
# !kubectl delete -f ../servers/xgboostserver/samples/iris.yaml
# ## Serve Tensorflow MNIST Model
# We can deploy a tensorflow model uploaded to an object store by using the tensorflow model server implementation as the config below.
#
# This notebook contains two examples, one which shows how you can use the TFServing prepackaged serve with the Seldon Protocol, and a second one which shows how you can deploy it using the tensorlfow protocol (so you can send requests of the exact format as you would to a tfserving server).
#
# ### Serve Tensorflow MNIST Model with Seldon Protocol
#
# The config file below shows how you can deploy your Tensorflow model which exposes the Seldon protocol.
# %%writefile ../servers/tfserving/samples/mnist_rest.yaml
apiVersion: machinelearning.seldon.io/v1alpha2
kind: SeldonDeployment
metadata:
name: tfserving
spec:
name: mnist
predictors:
- graph:
children: []
implementation: TENSORFLOW_SERVER
modelUri: gs://seldon-models/tfserving/mnist-model
name: mnist-model
parameters:
- name: signature_name
type: STRING
value: predict_images
- name: model_name
type: STRING
value: mnist-model
name: default
replicas: 1
# Once it's deployed we can send our sklearn model requests
# !kubectl apply -f ../servers/tfserving/samples/mnist_rest.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=tfserving -o jsonpath='{.items[0].metadata.name}')
# Once it's deployed we can send our sklearn model requests
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="tfserving",namespace="seldon")
r = sc.predict(gateway="ambassador",transport="rest",shape=(1,784))
print(r)
assert(r.success==True)
# And delete the model we deployed
# !kubectl delete -f ../servers/tfserving/samples/mnist_rest.yaml
# ## Serve Tensorflow MNIST Model with Tensorflow protocol
#
# The config file below shows how you can deploy your Tensorflow model which exposes the Tensorflow protocol.
# %%writefile ../servers/tfserving/samples/halfplustwo_rest.yaml
apiVersion: machinelearning.seldon.io/v1alpha2
kind: SeldonDeployment
metadata:
name: hpt
spec:
name: hpt
protocol: tensorflow
transport: rest
predictors:
- graph:
children: []
implementation: TENSORFLOW_SERVER
modelUri: gs://seldon-models/tfserving/half_plus_two
name: halfplustwo
parameters:
- name: model_name
type: STRING
value: halfplustwo
name: default
replicas: 1
# !kubectl apply -f ../servers/tfserving/samples/halfplustwo_rest.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=hpt -o jsonpath='{.items[0].metadata.name}')
import json
# X=!curl -s -d '{"instances": [1.0, 2.0, 5.0]}' \
# -X POST http://localhost:8003/seldon/seldon/hpt/v1/models/halfplustwo/:predict \
# -H "Content-Type: application/json"
d=json.loads("".join(X))
print(d)
assert(d["predictions"][0] == 2.5)
# !kubectl delete -f ../servers/tfserving/samples/halfplustwo_rest.yaml
# ## Serve MLFlow Elasticnet Wines Model
# We can deploy an MLFlow model uploaded to an object store by using the MLFlow model server implementation as the config below:
# %%writefile ../servers/mlflowserver/samples/elasticnet_wine.yaml
apiVersion: machinelearning.seldon.io/v1alpha2
kind: SeldonDeployment
metadata:
name: mlflow
spec:
name: wines
predictors:
- graph:
children: []
implementation: MLFLOW_SERVER
modelUri: gs://seldon-models/mlflow/elasticnet_wine
name: classifier
name: default
replicas: 1
# !kubectl apply -f ../servers/mlflowserver/samples/elasticnet_wine.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=mlflow -o jsonpath='{.items[0].metadata.name}')
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="mlflow",namespace="seldon")
r = sc.predict(gateway="ambassador",transport="rest",shape=(1,11))
print(r)
assert(r.success==True)
# !kubectl delete -f ../servers/mlflowserver/samples/elasticnet_wine.yaml
| notebooks/server_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### PROJECT DESCRIPTION
#
# ##CCTE Programme for Syrians and other refugees in Turkey is being implemented with the partnership of; Turkish Red Crescent , Ministry of National Education (MoNE), Ministry of Family, Labor and Social Services and UNICEF. In May 2017, the National CCTE Programme was extended to cover school-age refugee children residing in Turkey under temporary/international protection. It was aimed to ensure that children have access to school and attend classes regularly with the cash assistance offered under the programme. The cash assistance amounts vary according to gender and school levels for each month. The payments are made on a bi-monthly basis and the programme is on the condition of regular attendance, i.e. cash is not provided if the child is absent for more than four days in a school month.The CCTE program is open to refugees and persons under temporary/international protection of all nationalities. Applicants for the CCTE must be registered with the Directorate General of Migration Management (DGMM), have a foreigner’s ID number beginning with the number 99 and be registered in the MERNIS database. Applicants must meet the eligibility criteria for the CCTE program and send their children to school regularly in order to receive CCTE payments regularly. Children attending the Accelerated Learning Program (ALP) at Public Education Centers can also benefit from CCTE.
#
#
# +
# Calling the required packages
import os
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import seaborn as sns
import plotly.graph_objects as go
from pandas.plotting import autocorrelation_plot
from datetime import datetime
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.stats.stattools import jarque_bera
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.gofplots import qqplot
from scipy.stats import gaussian_kde, norm
import scipy
#import sys
# #!{sys.executable} -m pip install statsmodels
import statsmodels
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from dateutil.relativedelta import relativedelta
# -
##Sinif bilgisi Class information
Anasinif_Ilkokul_kiz=50
Anasinif_Ilkokul_erkek=45
lise_kiz=75
lise_erkek=55
Hep_kiz=75
Hep_erkek=75
Ek_odeme_orani=0.96
Hep_ek_orani=0.99
top_up_lise=150
top_up_orta=100
import chart_studio
import cufflinks as cf
cf.go_offline()
# +
#calling Arima package
import pmdarima as pm
from pmdarima import auto_arima
# +
###Historical data
historic = pd.read_excel(r"D:\desktop\Sey\ŞEY_Forecasting 24032021.xlsx",sheet_name="Sheet1")
# -
sinif_cinsiyet=pd.read_excel(r"D:\desktop\Sey\seyforecast.xlsx",sheet_name="cinsiyet-sinif")
sinif_cinsiyet.head()
idx = pd.date_range(start='05/2017', end='03/2021', freq='2m')
idx = pd.to_datetime(idx, format = '%m/%d/%Y').strftime('%b %Y')
all_fay = pd.Series((sinif_cinsiyet['Toplam'][0:24]),index = idx)
# +
ax = sinif_cinsiyet.plot(fontsize=15, linewidth=3);
ax.set_xlabel('AY', fontsize=10);
ax.legend(loc='upper left', bbox_to_anchor=(0.7, 1.5), ncol=3, fontsize=7);
# +
###Sinif_cinsiyet Forecasting
Anaokul_kiz=pd.Series((sinif_cinsiyet['ANASINIFI_Kadın'][:24]),index = idx)
Anaokul_erkek=pd.Series((sinif_cinsiyet['ANASINIFI_Erkek'][:24]),index = idx)
##############################
ILKOKUL_Kadın=pd.Series((sinif_cinsiyet['ILKOKUL_Kadın'][:24]),index = idx)
ILKOKUL_Erkek=pd.Series((sinif_cinsiyet['ILKOKUL_Erkek'][:24]),index = idx)
###################
HEP_Kadın=pd.Series((sinif_cinsiyet['HEP_Kadın'][:24]),index = idx)
HEP_Kadın=pd.Series((sinif_cinsiyet['HEP_Erkek'][:24]),index = idx)
######################################
LISE_Kadın=pd.Series((sinif_cinsiyet['LISE_Kadın'][:24]),index = idx)
LISE_Erkek=pd.Series((sinif_cinsiyet['LISE_Erkek'][:24]),index = idx)
###########################################
ORTAOKUL_Kadın=pd.Series((sinif_cinsiyet['ORTAOKUL_Kadın'][:24]),index = idx)
ORTAOKUL_Erkek=pd.Series((sinif_cinsiyet['ORTAOKUL_Erkek'][:24]),index = idx)
# -
plt.style.use('fivethirtyeight')
ax = sinif_cinsiyet.plot(figsize=(13,4),fontsize=15, linewidth=3);
plt.show()
# +
pd.concat([sinif_cinsiyet['ANASINIFI_Kadın'],sinif_cinsiyet['ANASINIFI_Erkek'],sinif_cinsiyet['ILKOKUL_Kadın'],sinif_cinsiyet['ILKOKUL_Erkek']
, sinif_cinsiyet['HEP_Kadın'], sinif_cinsiyet['HEP_Erkek'], sinif_cinsiyet['LISE_Kadın'],sinif_cinsiyet['LISE_Erkek'],
sinif_cinsiyet['ORTAOKUL_Kadın'],sinif_cinsiyet['ORTAOKUL_Erkek']],axis=1).iplot()
# -
# # Arima models according to class
# +
##Arima
reg = auto_arima(sinif_cinsiyet['ANASINIFI_Kadın'][0:24], start_p=0, start_q=0,
max_p=15, max_q=15, m=12,
start_P=0, seasonal=True,start_Q=0,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
# -
Ana_kiz_model= reg.predict(n_periods=10)
Ana_kiz_model
# +
reg = auto_arima(sinif_cinsiyet['ANASINIFI_Erkek'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=2, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
# -
Ana_erkek_model= reg.predict(n_periods=10)
Ana_erkek_model
# # ILK OKUL
# +
reg = auto_arima(sinif_cinsiyet['ILKOKUL_Kadın'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=1, seasonal=True,start_Q=0,
d=2, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
# -
ILK_Kadın_model= reg.predict(n_periods=10)
ILK_Kadın_model
# +
reg = auto_arima(sinif_cinsiyet['ILKOKUL_Erkek'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=2, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
# -
ILK_erkek_model= reg.predict(n_periods=10)
ILK_erkek_model
# # ORTA OKUL
reg = auto_arima(sinif_cinsiyet['ORTAOKUL_Kadın'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=2, D=0, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
Orta_kiz_model= reg.predict(n_periods=10)
Orta_kiz_model
reg = auto_arima(sinif_cinsiyet['ORTAOKUL_Erkek'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=2, D=0, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
Orta_erk_model= reg.predict(n_periods=10)
Orta_erk_model
# # Lise
reg = auto_arima(sinif_cinsiyet['LISE_Kadın'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=0, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
lise_kiz_model= reg.predict(n_periods=10)
lise_kiz_model
reg = auto_arima(sinif_cinsiyet['LISE_Erkek'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
lise_erk_model= reg.predict(n_periods=10)
lise_erk_model
# # HEP
reg = auto_arima(sinif_cinsiyet['HEP_Kadın'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
hep_kiz_model= reg.predict(n_periods=10)
hep_kiz_model
reg = auto_arima(sinif_cinsiyet['HEP_Erkek'][0:24], start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
hep_erk_model= reg.predict(n_periods=10)
hep_erk_model
# # Addition of all classes
y_pred = Ana_kiz_model + Ana_erkek_model + ILK_Kadın_model + ILK_erkek_model + Orta_kiz_model + Orta_erk_model + lise_kiz_model
+ lise_erk_model + hep_kiz_model+ hep_erk_model
Ana_ilk_kiz = Ana_kiz_model + ILK_Kadın_model
Ana_ilk_erkek = Ana_erkek_model + ILK_erkek_model
y_pred
# +
hfont = {'fontname':'Trebuchet MS'}
fig = plt.figure(figsize=(10,5))
plt.scatter(np.arange(len(sinif_cinsiyet.Toplam))+1, sinif_cinsiyet.Toplam,color='gray',linewidth=0.7, s = 10)
plt.scatter(np.arange(len(y_pred))+len(sinif_cinsiyet.Toplam)+1,y_pred,color='red',linewidth=0.7, s = 10)
plt.ylim(0, 600000)
plt.xlabel('ŞEY ÖDEME DÖNEM AY', **hfont)
plt.ylabel('FAYDA SAHIBI SAYISI', **hfont)
plt.show()
# -
months_to_pred = pd.date_range(start='05/2021', periods = 10, freq='2m', closed = None)
months_to_pred = pd.to_datetime(months_to_pred, format = '%m/%d/%Y').strftime('%b %Y')
months_to_pred
d = {'Months of Payment':months_to_pred,'# of Beneficiaries- Post Verification' : np.zeros(len(y_pred)),'Anasinifi_İLK OKUL_Kiz': Ana_ilk_kiz,
'Anasinifi_İLK OKUL_Erkek': Ana_ilk_erkek }
predictions = pd.DataFrame(data = d)
predictions.index += sinif_cinsiyet.index[-1]
predictions['# of Beneficiaries- Post Verification'] = [np.ceil(i) for i in y_pred]
predictions['ORTA_OKUL_Kız']= Orta_kiz_model
predictions['ORTA_OKUL_Erkek']= Orta_erk_model
predictions['Lise_OKUL_Kız']= lise_kiz_model
predictions['Lise_OKUL_Erkek']= lise_erk_model
predictions['HEP_OKUL_Kız']= hep_kiz_model
predictions['HEP_OKUL_Erkek']= hep_erk_model
y_pred = Ana_kiz_model + Ana_erkek_model + ILK_Kadın_model + ILK_erkek_model + Orta_kiz_model + Orta_erk_model + lise_kiz_model
+ lise_erk_model + hep_kiz_model+ hep_erk_model
predictions
predictions['Payment Amount for Beneficiaries (TL)'] = Anasinif_Ilkokul_kiz*predictions['Anasinifi_İLK OKUL_Kiz'] + Anasinif_Ilkokul_erkek*predictions['Anasinifi_İLK OKUL_Erkek'] + predictions['ORTA_OKUL_Kız']*Anasinif_Ilkokul_kiz + predictions['ORTA_OKUL_Erkek']*Anasinif_Ilkokul_erkek
+ predictions['Lise_OKUL_Kız']*lise_kiz + predictions['Lise_OKUL_Erkek']*lise_erkek + predictions['HEP_OKUL_Kız']*Hep_kiz + predictions['HEP_OKUL_Erkek']*Hep_erkek
month_to_quart = predictions['Months of Payment']
month_to_quart = pd.to_datetime(month_to_quart, format = '%b %Y').dt.month
month_to_quart
nn=[3,5,7]
predictions['Payment Amount for Beneficiaries (TL)']= np.array([predictions['Payment Amount for Beneficiaries (TL)']*2 if i in nn else predictions['Payment Amount for Beneficiaries (TL)'] for i in month_to_quart])
month_to_quart = predictions['Months of Payment']
month_to_quart
#month_to_quart = pd.to_datetime(month_to_quart, format = '%b %Y').dt.month
month_to_quart
# +
month_to_quart1 = (month_to_quart+3)%3
l=[1,9]
quarter1= [1 if i in l else 0 for i in month_to_quart]
quarter1
# -
month_to_quart2 = (month_to_quart)%5
quarter2= [1 if i==0 else 0 for i in month_to_quart2]
quarter2
# + active=""
# quarter1=[1,0,0,0,1,0,1,0,0,0]
# quarter2=[0,0,1,0,0,0,0,0,1,0]
# quarter3=[1,0,1,0,1,0,1,0,1,0]
# +
predictions.columns
Ek_odeme_orani=0.96
Hep_ek_orani=0.99
top_up_lise=150
top_up_orta=100
# -
ek_odeme = quarter1*((predictions['Anasinifi_İLK OKUL_Kiz'] + predictions['Anasinifi_İLK OKUL_Erkek'] + predictions['ORTA_OKUL_Kız'] + predictions['ORTA_OKUL_Erkek']
+ predictions['Lise_OKUL_Kız'] + predictions['Lise_OKUL_Erkek'])*100*Ek_odeme_orani + (predictions['HEP_OKUL_Kız'] + predictions['HEP_OKUL_Erkek'])*100*Hep_ek_orani)
ek_odeme
ek_odeme2 = (quarter2*(predictions['HEP_OKUL_Kız'] + predictions['HEP_OKUL_Erkek'])*100*Hep_ek_orani)
ek_odeme2
predictions['Ek Ödeme Tutar (TL)'] = ek_odeme + ek_odeme2
Topup1 = quarter1*((predictions['ORTA_OKUL_Kız'] + predictions['ORTA_OKUL_Erkek'])*top_up_orta*Ek_odeme_orani + (predictions['Lise_OKUL_Kız'] + predictions['Lise_OKUL_Erkek'])*top_up_lise*Ek_odeme_orani + (predictions['HEP_OKUL_Kız'] + predictions['HEP_OKUL_Erkek'])*top_up_lise*Hep_ek_orani)
Topup2 = quarter2*((predictions['HEP_OKUL_Kız'] + predictions['HEP_OKUL_Erkek'])*top_up_lise*Hep_ek_orani)
predictions['Top-Up Ödemesi (TL)']=Topup1 + Topup2
predictions["Toplam Tutar"]=predictions['Ek Ödeme Tutar (TL)']+predictions['Top-Up Ödemesi (TL)']+predictions['Payment Amount for Beneficiaries (TL)']
predictions["Toplam Tutar"][predictions.index[0]]
predictions.to_excel('C:/Users/fedaa.elderdesawe/desktop/Forecast CCTE(model8).xlsx')
Anasinif_Ilkokul_erkek*predictions['Anasinifi_İLK OKUL_Erkek']
Eligibile_Individuals=historic['TRC Ödeme alan faydalanıcı sayısı']
Eligibile_Individuals.head()
idx
index = pd.DatetimeIndex(['01/2018', '03/2018', '05/2018', '07/2018', '09/2018', '11/2018', '01/2019', '03/2019','05/2019', '07/2019',
'09/2019', '11/2019','01/2020', '03/2020','05/2020', '07/2020',
'09/2020', '11/2020' ],freq='2MS')
index
historic['TRC Ödeme alan faydalanıcı sayısı'][0:20]
idxn = pd.date_range(start='01/2018', end='05/2021', freq='2m')
idxn = pd.to_datetime(idxn, format = '%m/%d/%Y').strftime('%b %Y')
all_hist = pd.Series(list(historic['TRC Ödeme alan faydalanıcı sayısı'][0:20]),index = idxn)
idxn
# +
hfont = {'fontname':'Trebuchet MS','size' : 12}
plt.figure(figsize=(15,7))
all_hist.plot.bar( color="#f9af2b",edgecolor="#f05131", rot = 0)
plt.xlabel('months', **hfont)
plt.axhline(y=100000, color='dimgray', linestyle='dashed')
plt.axhline(y=300000, color='dimgray', linestyle='dashed')
plt.axhline(y=500000, color='dimgray', linestyle='dashed')
plt.xticks([1,3,5,7,9,11,13,15,17,19,21], [idx[1],idx[3],idx[5],idx[7],idx[9],idx[11],idx[13],idx[15],idx[17],idx[19],idx[21]], rotation= 0)
plt.ylabel('number of eligible individuals', **hfont)
plt.savefig('all_historic_individuals.png')
# -
Eligibile_Individuals = pd.to_datetime(all_hist)
Eligibile_Individuals.iplot(title="# of Eligibile Individuals - Post Verification")
predictions.head()
# +
##ACCUTARCY
from sklearn.metrics import accuracy_score
y_pred = predictions["# of Beneficiaries- Post Verification"]
actual_y = historic['TRC Ödeme alan faydalanıcı sayısı'][0:20]
accuracy_score(y_true, y_pred)
accuracy_score(y_true, y_pred, normalize=False)
# -
# # New eligibles by Registration
historic.head(13)
# +
train=historic['TRC Ödeme alan faydalanıcı sayısı'][0:10]
test=historic['TRC Ödeme alan faydalanıcı sayısı'][11:18]
plt.plot(train)
plt.plot(test)
# -
test
train_model = auto_arima(train, start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=1, D=0, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(train_model.aic())
prediction1 = pd.DataFrame(train_model.predict(n_periods=7),index=test.index)
prediction1.columns=['Predicted # of Applications']
prediction1
plt.figure(figsize=(8,5))
plt.plot(train,label="Training")
plt.plot(test,label="Test")
plt.plot(prediction1,label="Predicted")
plt.legend(loc='upperleft')
plt.show()
pd.concat([test,prediction1],axis=1).iplot()
historic.head(24)
app_month = historic['Aylık Başvuru Sayısı'][0:22]
elig_ratio = historic['Yüzde'][0:22]
y = np.array(app_month*elig_ratio)
y
plt.plot(y)
y
#reg = SARIMAX(list(y[12:]), order=(0,1,0), seasonal_order=(0,1, 0, 12), enforce_invertibility=False, enforce_stationarity=False).fit()
reg.aic
reg = auto_arima(y, start_p=0, start_q=0,
max_p=10, max_q=10, m=12,
start_P=0, seasonal=True,start_Q=0,
d=1, D=0, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True,random_state=20,n_fits=50)
print(reg.aic())
len(y)
reg.summary()
y_pred_new_reg = reg.predict(n_periods=10)
y_pred_new_reg
# +
hfont = {'fontname':'Trebuchet MS'}
fig = plt.figure(figsize=(10,5))
plt.scatter(np.arange(len(y))+1, y,color='gray',linewidth=0.7, s = 7)
plt.scatter(np.arange(len(y_pred_new_reg))+len(y)+1, y_pred_new_reg,color='red',linewidth=0.7, s = 7)
plt.ylim(0, 30000)
plt.xlabel('months since program start', **hfont)
plt.ylabel('number of new eligible households by service centers', **hfont)
plt.show()
# -
historic.head(2)
# ### Bakanlık gelen
total_app=historic['Toplam Başvuru Sayısı'][0:22]
elig_ratio = historic['Yüzde'][0:22]
elig_app = np.array(total_app*elig_ratio)
bakanliksayi = np.array(historic['ORAN_b'][0:22]*elig_app)
bakanliksayi
y = np.array(bakanliksayi)
reg = auto_arima(y, start_p=1, start_q=0,
max_p=4, max_q=4, m=12,
start_P=0,start_Q=0, seasonal=True,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
print(reg.aic())
reg.summary()
y_pred_bakanlik = reg.predict(n_periods=10)
y_pred_bakanlik
# # TRC Ödeme alan faydalanıcı sayısı (# of Beneficiaries- Post Verification)
# +
fayda = historic['TRC Ödeme alan faydalanıcı sayısı'][0:22]
app_month = historic['Aylık Başvuru Sayısı'][0:22]
elig_ratio = 0.97
odeme_alan = y_pred_bakanlik *elig_ratio
#PARTIAL MODIFICATION - TRC to explain further why the eligible number of HH in January 2017 is 19145, if the total number of applicants in December was 85629, and the eligibility ratio was 28.8%?
#we assume the new eligible for registration in service centers in June was 19145- 589 (eligible first month)
#new_reg[1] = historic['# of Eligibile Household - Post Verification'][1]-historic['# of Eligibile Household - Post Verification'][0]
# -
y_pred_bakanlik
f = {'Months of Payment':months_to_pred,'Bakanlıktan Gelen' : y_pred_bakanlik,'TRC Ödeme alan faydalanıcı sayısı': odeme_alan}
predictions2 = pd.DataFrame(data = f)
predictions2
reg = auto_arima(fayda, start_p=0, start_q=0,start_Q=0,
max_p=5, max_q=5, m=12,
start_P=0, seasonal=True,
d=2, D=0, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
print(reg.aic())
reg.summary()
fayda_predict = reg.predict(n_periods=10)
fayda_predict
predictions.to_excel('C:/Users/fedaa.elderdesawe/desktop/Forecast ESSN(model10).xlsx')
| SEY_FORECAST_LAST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import subprocess
import shutil
import os
import time
ftypes_ori=['asciidoc', 'html', 'latex', 'markdown', 'notebook', 'pdf', 'python','slides'];
ptypes_ori=['asciidoc', 'html', 'tex', 'md', 'ipynb', 'pdf', 'py','slides.html'];
originalF='lec1_step1.ipynb';
# originalF='x_publish_all_codes_sub.ipynb';
def subF(originalF):
#ftypes=['asciidoc', 'html', 'latex', 'markdown', 'notebook', 'pdf', 'python','slides'];
#ptypes=['asciidoc', 'html', 'tex', 'md', 'ipynb', 'pdf', 'py','slides.html'];
ftypes=['asciidoc', 'html', 'latex', 'markdown', 'pdf', 'python','slides'];
ptypes=['asciidoc', 'html', 'tex', 'md', 'pdf','slides.html'];
cmd0=['jupyter','nbconvert','--to'];
ftag=originalF.split('.')[0];
print(ftag)
for key in ftypes:
cmd=cmd0+[key]+[originalF];
Pcmd=' '.join(cmd)
print(Pcmd)
subprocess.Popen(cmd)
if not os.path.isdir(ftag):
print(' '.join(['The target folder \"',ftag,'\" does not exist...']))
os.mkdir(ftag);
else:
print(' '.join(['There exists',ftag]));
time.sleep(10);
for key in ptypes:
target='.'.join([ftag,key]);
print(target);
if os.path.isfile(target):
shutil.move(target,os.path.join(ftag,target));
time.sleep(10);
for key in ptypes:
target='.'.join([ftag,key]);
print(target);
if os.path.isfile(target):
os.remove(target);
target='.'.join([ftag,ptypes[0]]);
target2='.'.join([ftag,'txt']);
shutil.copy(os.path.join(ftag,target),os.path.join(ftag,target2));
flist=os.listdir(path='.')
flist1= [key for key in flist if '.py' in key or '.ipy' in key ]
flist2= [key for key in flist1 if 'x_' not in key and '.ipynb_' not in key ]
flist3= [key for key in flist2 if '.py' in key ]
ftaglist=[key.split('.')[0] for key in flist3]
print(ftaglist)
for key in ftaglist:
subF(".".join([key,ptypes_ori[4]])) # + 'ipynb'
# subF(originalF)
print("\n All processes were completed!")
# -
flist=os.listdir(path='.')
print(flist)
flist[0].find('py')
[key for key in flist if 'py' or 'ipynb' in key]
flist=os.listdir(path='.')
flist1= [key for key in flist if '.py' in key or '.ipy' in key ]
print(flist1)
print(' ')
flist2= [key for key in flist1 if 'x_' not in key and '.ipynb_' not in key ]
print(flist2)
flist3= [key for key in flist2 if '.py' in key ]
print(flist3)
ftaglist=[key.split('.')[0] for key in flist3]
print(ftaglist)
# +
[key for key in flist if 'x_' not in key ]
# -
".".join([key,ptypes_ori[4]])
| x_publish_all_codes_SCRACH.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import padasip as pa
import numpy as np
import os
import librosa
from mir_eval.separation import bss_eval_sources
import scipy.io.wavfile as wavfile
import pandas as pd
# # Metrics before ANC use
def signaltonoise(a, axis, ddof):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis = axis, ddof = ddof)
return round(float(np.where(sd == 0, 0, m / sd)),4)
history_metrics = []
for folder in os.listdir(os.path.join(os.getcwd(),"visualization")):
if(os.path.isdir(os.path.join(os.getcwd(),"visualization",folder))):
fold_path = os.path.join(os.getcwd(),"visualization",folder)
#required gt1.wav, gt2.wav, pred1.wav, pred2.wav
gt1,sr = librosa.load(os.path.join(fold_path,'gt1.wav'), mono=True)
gt2,sr = librosa.load(os.path.join(fold_path,'gt2.wav'), mono=True)
pred1,sr = librosa.load(os.path.join(fold_path,'pred1.wav'), mono=True)
pred2,sr = librosa.load(os.path.join(fold_path,'pred2.wav'), mono=True)
mix_wav, sr = librosa.load(os.path.join(fold_path, 'mix.wav'), mono=True)
L = len(gt1)
#check the output sir, sar, sdr
gts_wav = [gt1, gt2]
preds_wav = [pred1, pred2]
valid = True
for n in range(0,2):
valid *= np.sum(np.abs(gts_wav[n])) > 1e-5
valid *= np.sum(np.abs(preds_wav[n])) > 1e-5
if valid:
sdr, sir, sar, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray(preds_wav),
False)
sdr_mix, _, _, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray([mix_wav[0:L] for n in range(2)]),
False)
#formula for calculation of MSE, then take mean of 2
MSE_baseline = np.array([np.dot(gt2-pred2, gt2-pred2) / float(len(gt2)), np.dot(gt1-pred1, gt1-pred1) / float(len(gt1))])
#formula for calculation of SNR using scipy and take mean of 2
snr = np.array([signaltonoise(pred1, axis = 0, ddof = 0),signaltonoise(pred2, axis = 0, ddof = 0)])
history_metrics.append([sdr_mix.mean(),sdr.mean(),sir.mean(),sar.mean(), snr.mean(), MSE_baseline.mean()])
df = pd.DataFrame(np.array(history_metrics),columns = ['sdr_mix', 'sdr', 'sir', 'sar', 'snr', 'mse'])
df.head()
df.to_csv("Baseline_model_results.csv")
sdr_mix, sdr, sir, sar, snr, mse_baseline = np.mean(history_metrics, axis = 0) #mean over all data items
print('before applying ANC: ',sdr_mix, sdr, sir,sar, snr, mse_baseline)
# # Metrics after ANC used
#
# * Table for 2 audios with different lr and weights filter size
# * table for final metrics on whole dataset using the parameters selected above
# * box plot of metrics on complete dataset using parameters selected above
# ----------------
# * Version 1: iterations = 10000, lr = 0.01 was used which is saved with names `pred1_anc.wav`
# * Version 1: iterations = 50000, lr = 0.0001 was used which is saved with names `pred1_anc50000.wav`
def adaptive_noice_cancellation(filter_type, input_with_noise, noise, length_of_signal, m, learning_rate):
"""
Adaptive noice cancellation using adaptive filtering
Parameters:
m = number of taps or filter size
learning_rate = decides change in the weigths per iteration
Results:
filtered_signal: the output of ANC
hist_lmserror = list of lms error for each iteration
"""
#initialization
input_with_noise = np.asarray(input_with_noise) #input to adaptive noise cancellation filter
noise = np.asarray(noise) #input to adaptive filter
weights = np.zeros(shape = m )
mu = 1 - learning_rate
eps = 0.5
noise_estimate = np.zeros(shape = length_of_signal)
filtered_signal = input_with_noise.copy()
R = 1/eps*np.identity(m)
hist_lmserror = []
hist_sample_error = []
#loop till we reach sufficient minimized filtered signal or error
for i in range(m-1, length_of_signal):
noise_sample = noise[i-m+1:i+1]
assert noise_sample.shape == weights.shape , "shape of noise_sample and weights should be same"
noise_estimate[i] = np.dot(noise_sample,weights)
filtered_signal[i] = input_with_noise[i] - noise_estimate[i]
error = filtered_signal[i]
if(filter_type == 'lms'):
#lms weights' adaptation step
weights = weights + 2*learning_rate*error*noise_sample #u*error*input_to_filter
elif(filter_type == 'rls'):
#rls weights' adaptation step
R1 = np.dot(np.dot(np.dot(R,noise_sample),noise_sample.T),R)
R2 = mu + np.dot(np.dot(noise_sample,R),noise_sample.T)
R = 1/mu * (R - R1/R2)
dw = np.dot(R, noise_sample.T) * error
weights = weights + dw
else:
print('invalid filter argument')
return [0],[0]
#error history
lms_error = np.sum(filtered_signal**2) / len(filtered_signal)
hist_lmserror.append(lms_error)
hist_sample_error.append(error)
return filtered_signal, hist_lmserror
# # Taking a sample of 3 mixed signals to determine optimum parameters for ANC
# ### Random sampling of mixed signals
target_path = os.path.join(os.getcwd(), 'visualization')
count = 0
sample = []
i = np.random.randint(0,40, 3)
k = 0
for folder in os.listdir(target_path):
if(os.path.isdir(os.path.join(target_path,folder))):
#print(folder)
if(k in i):
sample.append(folder)
k += 1
print(sample)
learning_rate = np.logspace(-1, -5, num = 5)
m = np.array([4,16,64,128])
filter_type = ['rls', 'lms']
# # change here
# +
# TAKE ANY 1 FOLDER AND APPLY SNR FORMULA ON IT WITH LMS
# AND WITH RLS FILTER TO FILL THE COLUMN IN REPORT APPENDIX PARAMETER
# SELECTION
#run same code as below
#just put sample = ['accordion-XZ1rUpbdu-Y+tuba-G46O0IzYtt0'] manually
sample = ['accordion-XZ1rUpbdu-Y+tuba-G46O0IzYtt0']
# +
history_metrics = []
for folder in sample:
for f in filter_type:
for taps in m:
for lr in learning_rate:
fold_path = os.path.join(os.getcwd(),"visualization",folder)
#required gt1.wav, gt2.wav, pred1.wav, pred2.wav
gt1,sr = librosa.load(os.path.join(fold_path,'gt1.wav'), mono=True)
gt2,sr = librosa.load(os.path.join(fold_path,'gt2.wav'), mono=True)
pred1,sr = librosa.load(os.path.join(fold_path,'pred1.wav'), mono=True)
pred2,sr = librosa.load(os.path.join(fold_path,'pred2.wav'), mono=True)
mix_wav, sr = librosa.load(os.path.join(fold_path, 'mix.wav'), mono=True)
L = len(gt1)
#apply adaptive noice cancellation to pred1 and pred2
pred1_anc, lms_error = adaptive_noice_cancellation(f, pred1, pred2, L, taps, lr)
pred2_anc, lms_error = adaptive_noice_cancellation(f, pred2, pred1, L, taps, lr)
if(f=='rls'):
name1 = 'pred1_ancRLS.wav'
name2 = 'pred2_ancRLS.wav'
elif(f=='lms'):
name1 = 'pred1_ancLMS.wav'
name2 = 'pred2_ancLMS.wav'
wavfile.write(os.path.join(fold_path, name1),sr,pred1_anc)
wavfile.write(os.path.join(fold_path, name2),sr,pred2_anc)
#check the output sir, sar, sdr, mse
gts_wav = [gt1, gt2]
preds_wav = [pred1_anc, pred2_anc]
valid = True
for n in range(0,2):
valid *= np.sum(np.abs(gts_wav[n])) > 1e-5
valid *= np.sum(np.abs(preds_wav[n])) > 1e-5
if valid:
sdr, sir, sar, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray(preds_wav),
False)
sdr_mix, _, _, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray([mix_wav[0:L] for n in range(2)]),
False)
MSE_anc = np.array([np.dot(gt1 - pred1_anc, gt1-pred1_anc) / float(len(gt1)), np.dot(gt2 - pred2_anc, gt2-pred2_anc) / float(len(gt2))])
snr = np.array([signaltonoise(pred1_anc, axis = 0, ddof = 0),signaltonoise(pred2_anc, axis = 0, ddof = 0)])
history_metrics.append([f, taps, lr, sdr_mix.mean(),sdr.mean(),sir.mean(),sar.mean(), snr.mean(), MSE_anc.mean()])
print(f, taps, lr, 'done')
# -
snr_learning = pd.DataFrame(history_metrics, columns = ['filter', 'size', 'lr', 'sdr_mix', 'sdr', 'sir', 'sar', 'snr', 'MSE ANC'])
display(snr_learning)
snr_learning.to_csv('snrSelection.csv')
explore_learning = pd.DataFrame(history_metrics, columns = ['filter', 'size', 'lr', 'sdr_mix', 'sdr', 'sir', 'sar', 'snr', 'MSE ANC'])
display(explore_learning)
explore_learning.to_csv('parameterSelection.csv')
overall_performance = explore_learning.groupby(['filter', 'size', 'lr']).mean()
# +
lms_performance = overall_performance.loc['lms'].copy()
rls_performance = overall_performance.loc['rls'].copy()
rls_performance.to_csv("rls_parameter.csv")
lms_performance.to_csv("lms_parameter.csv")
# -
# # by observation size = 128, lr = 0.00001 are the best parameter for rls
rls_performance.loc[128, 0.00001]
# # by observation size = 128, lr = 0.0001 are the best parameters for lms filter
lms_performance.loc[128, 0.0001]
# # Applying the optimal lms on complete dataset
history_metrics = []
for folder in os.listdir(os.path.join(os.getcwd(),"visualization")):
if(os.path.isdir(os.path.join(os.getcwd(),"visualization",folder))):
fold_path = os.path.join(os.getcwd(),"visualization",folder)
#required gt1.wav, gt2.wav, pred1.wav, pred2.wav
gt1,sr = librosa.load(os.path.join(fold_path,'gt1.wav'), mono=True)
gt2,sr = librosa.load(os.path.join(fold_path,'gt2.wav'), mono=True)
pred1,sr = librosa.load(os.path.join(fold_path,'pred1.wav'), mono=True)
pred2,sr = librosa.load(os.path.join(fold_path,'pred2.wav'), mono=True)
mix_wav, sr = librosa.load(os.path.join(fold_path, 'mix.wav'), mono=True)
L = len(gt1)
#apply adaptive noice cancellation to pred1 and pred2
pred1_anc, lms_error1 = adaptive_noice_cancellation('lms', pred1, pred2, L, m = 128, learning_rate = 0.0001)
pred2_anc, lms_error2 = adaptive_noice_cancellation('lms', pred2, pred1, L, m = 128, learning_rate = 0.0001)
wavfile.write(os.path.join(fold_path, 'pred1_ancLMS.wav'),sr,pred1_anc)
wavfile.write(os.path.join(fold_path, 'pred2_ancLMS.wav'),sr,pred2_anc)
#check the output sir, sar, sdr
gts_wav = [gt1, gt2]
preds_wav = [pred1_anc, pred2_anc]
valid = True
for n in range(0,2):
valid *= np.sum(np.abs(gts_wav[n])) > 1e-5
valid *= np.sum(np.abs(preds_wav[n])) > 1e-5
if valid:
sdr, sir, sar, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray(preds_wav),
False)
sdr_mix, _, _, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray([mix_wav[0:L] for n in range(2)]),
False)
MSE_anc = np.array([np.dot(gt1 - pred1_anc, gt1-pred1_anc) / float(len(gt1)), np.dot(gt2 - pred2_anc, gt2-pred2_anc) / float(len(gt2))])
snr = np.array([signaltonoise(pred1_anc, axis = 0, ddof = 0),signaltonoise(pred2_anc, axis = 0, ddof = 0)])
history_metrics.append([sdr_mix.mean(),sdr.mean(),sir.mean(),sar.mean(), snr.mean(),MSE_anc.mean()])
print(folder, " done")
sdr_mix, sdr, sir, sar, snr, mse = np.mean(history_metrics, axis = 0)
print('after applying ANC with lms filter: ',sdr_mix, sdr, sir,sar, snr, mse)
lms_performance = pd.DataFrame(history_metrics, columns = ['sdr_mix', 'sdr', 'sir', 'sar', 'snr', 'mse'])
lms_performance.to_csv('lms_results.csv')
# # Applying RLS optimal filter on complete dataset
history_metrics_rls = []
for folder in os.listdir(os.path.join(os.getcwd(),"visualization")):
if(os.path.isdir(os.path.join(os.getcwd(),"visualization",folder))):
fold_path = os.path.join(os.getcwd(),"visualization",folder)
#required gt1.wav, gt2.wav, pred1.wav, pred2.wav
gt1,sr = librosa.load(os.path.join(fold_path,'gt1.wav'), mono=True)
gt2,sr = librosa.load(os.path.join(fold_path,'gt2.wav'), mono=True)
pred1,sr = librosa.load(os.path.join(fold_path,'pred1.wav'), mono=True)
pred2,sr = librosa.load(os.path.join(fold_path,'pred2.wav'), mono=True)
mix_wav, sr = librosa.load(os.path.join(fold_path, 'mix.wav'), mono=True)
L = len(gt1)
#apply adaptive noice cancellation to pred1 and pred2
pred1_anc, lms_error1 = adaptive_noice_cancellation('rls', pred1, pred2, L, m = 128, learning_rate = 0.00001)
pred2_anc, lms_error2 = adaptive_noice_cancellation('rls', pred2, pred1, L, m = 128, learning_rate = 0.00001)
wavfile.write(os.path.join(fold_path, 'pred1_ancRLS.wav'),sr,pred1_anc)
wavfile.write(os.path.join(fold_path, 'pred2_ancRLS.wav'),sr,pred2_anc)
#check the output sir, sar, sdr
gts_wav = [gt1, gt2]
preds_wav = [pred1_anc, pred2_anc]
valid = True
for n in range(0,2):
valid *= np.sum(np.abs(gts_wav[n])) > 1e-5
valid *= np.sum(np.abs(preds_wav[n])) > 1e-5
if valid:
sdr, sir, sar, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray(preds_wav),
False)
sdr_mix, _, _, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray([mix_wav[0:L] for n in range(2)]),
False)
MSE_anc = np.array([np.dot(gt1 - pred1_anc, gt1-pred1_anc) / float(len(gt1)), np.dot(gt2 - pred2_anc, gt2-pred2_anc) / float(len(gt2))])
snr = np.array([signaltonoise(pred1_anc, axis = 0, ddof = 0),signaltonoise(pred2_anc, axis = 0, ddof = 0)])
history_metrics_rls.append([sdr_mix.mean(),sdr.mean(),sir.mean(),sar.mean(), snr.mean(),MSE_anc.mean()])
print(folder, " done")
sdr_mix, sdr, sir, sar, snr, mse = np.mean(history_metrics_rls, axis = 0)
print('after applying ANC with rls filter: ', sdr_mix, sdr, sir, sar, snr, mse)
rls_performance = pd.DataFrame(history_metrics_rls, columns = ['sdr_mix', 'sdr', 'sir', 'sar', 'snr', 'mse'])
rls_performance.to_csv('rls_results.csv')
# # Analysis of results
df.describe()
lms_performance.describe()
rls_performance.describe()
import seaborn as sns
import matplotlib.pyplot as plt
df['model'] = ['baseline']*len(df)
rls_performance['model'] = ['rls']*len(rls_performance)
lms_performance['model'] = ['lms']*len(lms_performance)
combined_df = pd.concat([df,lms_performance, rls_performance], ignore_index = True)
df.drop(columns = ['model'], inplace = True);
rls_performance.drop(columns = ['model'], inplace = True);
lms_performance.drop(columns=['model'], inplace = True)
df.drop(columns = ['sdr_mix'], inplace = True);
rls_performance.drop(columns = ['sdr_mix'], inplace = True);
lms_performance.drop(columns = ['sdr_mix'], inplace = True);
df_bss = df[['sdr', 'sir','sar']]
lms_performance_bss = lms_performance[['sdr', 'sir','sar']]
rls_performance_bss = rls_performance[['sdr', 'sir','sar']]
df_anc = df[['snr', 'mse']]
lms_performance_anc = lms_performance[['snr', 'mse']]
rls_performance_anc = rls_performance[['snr', 'mse']]
# +
ticks = ['sdr', 'sir', 'sar']
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.figure(figsize = (18,10))
bpl = plt.boxplot(df_bss.T, positions=np.array(range(len(df_bss.T)))*2.0-0.7, sym='', widths=0.4)
bpm = plt.boxplot(lms_performance_bss.T, positions=np.array(range(len(lms_performance_bss.T)))*2.0, sym='', widths=0.4)
bpr = plt.boxplot(rls_performance_bss.T, positions=np.array(range(len(rls_performance_bss.T)))*2.0+0.7, sym='', widths=0.4)
set_box_color(bpl, '#D7191C') # colors are from http://colorbrewer2.org/
set_box_color(bpr, '#2C7BB6')
set_box_color(bpm, '#58a832')
# draw temporary red and blue lines and use them to create a legend
plt.plot([], c='#D7191C', label='baseline')
plt.plot([], c='#2C7BB6', label='rls')
plt.plot([], c='#58a832', label = 'lms')
plt.legend(ncol = 3, fontsize = 'xx-large')
plt.xticks(range(0, len(ticks) * 2, 2), ticks, fontsize = 'xx-large')
#plt.xlim(-2, len(ticks)*3)
plt.tight_layout()
plt.savefig('resultsComparisonBSS.png')
# +
df_anc_mse = df_anc[['mse']]
lms_performance_anc_mse = lms_performance_anc[['mse']]
rls_performance_anc_mse = rls_performance_anc[['mse']]
df_anc_snr = df_anc[['snr']]
lms_performance_anc_snr = lms_performance_anc[['snr']]
rls_performance_anc_snr = rls_performance_anc[['snr']]
# -
# +
ticks = ['mse']
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.figure(figsize = (18,10))
bpl = plt.boxplot(df_anc_mse.T, positions=np.array(range(len(df_anc_mse.T)))*2.0-0.7, sym='', widths=0.4)
bpm = plt.boxplot(lms_performance_anc_mse.T, positions=np.array(range(len(lms_performance_anc_mse.T)))*2.0, sym='', widths=0.4)
bpr = plt.boxplot(rls_performance_anc_mse.T, positions=np.array(range(len(rls_performance_anc_mse.T)))*2.0+0.7, sym='', widths=0.4)
set_box_color(bpl, '#D7191C') # colors are from http://colorbrewer2.org/
set_box_color(bpr, '#2C7BB6')
set_box_color(bpm, '#58a832')
# draw temporary red and blue lines and use them to create a legend
plt.plot([], c='#D7191C', label='baseline')
plt.plot([], c='#2C7BB6', label='rls')
plt.plot([], c='#58a832', label = 'lms')
plt.legend(ncol = 3, fontsize = 'xx-large')
plt.xticks(range(0, len(ticks) * 2, 2), ticks, fontsize = 'xx-large')
#plt.xlim(-2, len(ticks)*3)
plt.tight_layout()
plt.savefig('resultsComparisonANCmse.png')
# +
ticks = ['snr']
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.figure(figsize = (18,10))
bpl = plt.boxplot(df_anc_snr.T, positions=np.array(range(len(df_anc_snr.T)))*2.0-0.7, sym='', widths=0.4)
bpm = plt.boxplot(lms_performance_anc_snr.T, positions=np.array(range(len(lms_performance_anc_snr.T)))*2.0, sym='', widths=0.4)
bpr = plt.boxplot(rls_performance_anc_snr.T, positions=np.array(range(len(rls_performance_anc_snr.T)))*2.0+0.7, sym='', widths=0.4)
set_box_color(bpl, '#D7191C') # colors are from http://colorbrewer2.org/
set_box_color(bpr, '#2C7BB6')
set_box_color(bpm, '#58a832')
# draw temporary red and blue lines and use them to create a legend
plt.plot([], c='#D7191C', label='baseline')
plt.plot([], c='#2C7BB6', label='rls')
plt.plot([], c='#58a832', label = 'lms')
plt.legend(loc = 'upper right', fontsize = 'xx-large')
plt.xticks(range(0, len(ticks) * 2, 2), ticks, fontsize = 'xx-large')
#plt.xlim(-2, len(ticks)*3)
plt.tight_layout()
plt.savefig('resultsComparisonANCsnr.png')
# -
| ckptEval/MUSIC-2mix-LogFreq-resnet18dilated-unet7-linear-frames3stride24-maxpool-binary-weightedLoss-channels32-epoch100-step40_80/AdaptiveFilter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e3c276a0-faa4-449b-a9ba-8b5dd58f5140", "showTitle": false, "title": ""}
# Import Libraries for PySpark
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2ce7cf8b-07f5-4eda-90b1-9084e6d53dd1", "showTitle": false, "title": ""}
from pyspark import SparkContext
from pyspark import SparkConf
import numpy as np
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9380f758-35c8-4d16-9195-9853b936cb9e", "showTitle": false, "title": ""}
# ## (b) Now, come back to your notebook HW3Q3 and create a new SparkContext. Note that if you are using python,you will need to import SparkContext from pyspark
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fe0f7eae-e408-4db5-87c1-26741ef51821", "showTitle": false, "title": ""}
sc = SparkContext.getOrCreate()
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "16bcc4ee-4055-48fc-b460-b9ad24b6b441", "showTitle": false, "title": ""}
# ## (c) Create a RDD (using textFile method from your SparkContext) with the file you just dropped in item (a). For that, you will need to use the path you took note before.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ffe3c58f-515b-4c57-8cec-74d0e44344da", "showTitle": false, "title": ""}
text_rdd = sc.textFile('/FileStore/tables/inputHW3.txt')
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "88d8e520-abc4-4619-9ba5-61162eac5329", "showTitle": false, "title": ""}
# ### Displaying first few lines from text file
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3b25cf17-21b0-4ba8-801e-42541137e3f7", "showTitle": false, "title": ""}
text_rdd.take(100)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "78b24506-59c5-4125-9e33-d16b41430197", "showTitle": false, "title": ""}
# ## (d)Using Spark operations on this RDD (transformation, actions or persistence), count the words of this text file and print its words and their corresponding frequencies.
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "894cc32d-3c0c-42b1-8f50-3c35db4bf155", "showTitle": false, "title": ""}
# #### Spliting every the entire file into words
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "abad671f-4fd3-4ef9-95c7-4f63f9411347", "showTitle": false, "title": ""}
#Split with no parameters splits by whitespaces.
words = text_rdd.flatMap(lambda line: line.split())
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a6023cfa-71c5-4fc3-b612-ff1e32593eb2", "showTitle": false, "title": ""}
# #### Mapping each word to a single count and calculating the frequency of a word
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8ee1298a-b765-422d-b4e8-416447459247", "showTitle": false, "title": ""}
counts = words.map(lambda word: (word, 1)).reduceByKey(lambda a,b:a + b)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b9b5b082-4ad3-4ae1-8bd4-f44eb5d9dd04", "showTitle": false, "title": ""}
# #### Converting RDD to dictionary to print word and its frequency
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f2df7f0d-599a-4ecf-a048-2751f4930f26", "showTitle": false, "title": ""}
words_and_count = counts.collectAsMap()
words_and_count
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "58b0f011-26c6-4af7-9c78-b0c002380ac7", "showTitle": false, "title": ""}
# ## (e)Using Spark operations on this RDD (transformation, actions or persistence), select and print the top 30 most frequent words and their corresponding frequencies in the RDD with the text file.
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f963a2b1-6b7c-45ce-89ce-b74e9b8d9304", "showTitle": false, "title": ""}
# #### Print 30 Most Frequent Words
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0ae8e785-49fa-442c-bac6-54bd1cec7fa9", "showTitle": false, "title": ""}
top_30_most_frequent_words = counts.sortBy(lambda keyVal: keyVal[1] , ascending = False).take(30)
top_30_most_frequent_words
| HW3Q3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit
# name: python3
# ---
# +
#imports
from functions import blurring, one_plot
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import LeaveOneOut
# -
#data loading for experiment 57
a = blurring("data/exp57/control/")
b = blurring('data/exp57/Treated/')
image_matrix = np.concatenate((a, b))
# +
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score
lr = LogisticRegression()
Y = np.zeros(image_matrix.shape[0], dtype=bool)
Y[36::] = 1
Y_pred = cross_val_predict(lr, image_matrix, Y, cv=LeaveOneOut())
print(accuracy_score(Y, Y_pred))
lr.fit(image_matrix, Y)
plt.plot(lr.coef_.T);
# -
Y_pred = cross_val_predict(lr, image_matrix[:, 0:20], Y, cv=LeaveOneOut())
print(accuracy_score(Y, Y_pred))
# +
#generate plots
y_val55 = blurring('data/exp55/')
one_plot(y_val55, 'experiment 55')
y_val4 = blurring('data/exp48.49//')
one_plot(y_val4, 'experiment 48 49')
# question: what is being compared within ex 55 and 48/49? what are the different var groups
| testnotes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.pooling import GlobalMaxPooling2D
from keras import backend as K
import json
from collections import OrderedDict
def format_decimal(arr, places=6):
return [round(x * 10**places) / 10**places for x in arr]
DATA = OrderedDict()
# ### GlobalMaxPooling2D
# **[pooling.GlobalMaxPooling2D.0] input 6x6x3, data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = GlobalMaxPooling2D(data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(270)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.GlobalMaxPooling2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.GlobalMaxPooling2D.1] input 3x6x6, data_format='channels_first'**
# +
data_in_shape = (3, 6, 6)
L = GlobalMaxPooling2D(data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(271)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.GlobalMaxPooling2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.GlobalMaxPooling2D.2] input 5x3x2, data_format='channels_last'**
# +
data_in_shape = (5, 3, 2)
L = GlobalMaxPooling2D(data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(272)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.GlobalMaxPooling2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# ### export for Keras.js tests
# +
import os
filename = '../../../test/data/layers/pooling/GlobalMaxPooling2D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
# -
print(json.dumps(DATA))
| notebooks/layers/pooling/GlobalMaxPooling2D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Set the seed value for the notebook so the results are reproducible
from numpy.random import seed
seed(1)
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import pandas as pd
# +
redquality_df = pd.read_csv('../winequality-red.csv')
redquality_df.head()
redquality_df.dtypes
# +
y = redquality_df["quality"]
X = redquality_df.drop("quality", axis=1)
# -
print(f"Labels: {y[:10]}")
print(f"Data: {X[:10]}")
# +
y_arr = y.to_numpy()
y_arr
X_arr = X
X_arr
# +
# Use train_test_split to create training and testing data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_arr, y_arr, random_state=1)
# +
from sklearn.preprocessing import StandardScaler
# Create a StandardScater model and fit it to the training data
X_scaler = StandardScaler().fit(X_train)
# +
# Transform the training and testing data using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# -
from tensorflow.keras.utils import to_categorical
# +
# One-hot encoding
y_train_categorical = to_categorical(y_train)
y_test_categorical = to_categorical(y_test)
# + nbgrader={"grade": false, "grade_id": "cell-ec399a95e133cb58", "locked": false, "schema_version": 1, "solution": true}
# first, create a normal neural network with 2 inputs, 6 hidden nodes, and 2 outputs
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(units=6, activation='relu', input_dim=11))
model.add(Dense(units=9, activation='softmax'))
# -
model.summary()
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# + nbgrader={"grade": false, "grade_id": "cell-5cf2fbdbea0ed50b", "locked": false, "schema_version": 1, "solution": true}
# Fit the model to the training data
history = model.fit(
X_train_scaled,
y_train_categorical,
validation_split = 0.33,
epochs=20,
shuffle=True,
verbose=2
)
# +
fig = plt.figure()
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
Epochs = range(1, len(acc)+1)
plt.plot(Epochs, acc, "b", label="Normal Training Accuracy")
plt.plot(Epochs, val_acc, "r", label="Normal Validation Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Red Wine - Normal Training Model Accuracy Comparison")
plt.legend()
plt.show()
fig.savefig('NormalRedWineAccuracyComparison.png')
# +
fig = plt.figure()
loss = history.history["loss"]
val_loss = history.history["val_loss"]
Epochs = range(1, len(loss)+1)
plt.plot(Epochs, loss, "b", label="Normal Training Loss")
plt.plot(Epochs, val_loss, "r", label="Normal Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Red Wine - Normal Training Model Loss Comparison")
plt.legend()
plt.show()
fig.savefig('NormalRedWineLossComparison.png')
# -
# # Deep Learning
# For this network, we simply add an additional hidden layer of 6 nodes
deep_model = Sequential()
deep_model.add(Dense(units=20, activation='relu', input_dim=11))
deep_model.add(Dense(units=20, activation='relu'))
deep_model.add(Dense(units=9, activation='softmax'))
deep_model.summary()
# +
deep_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
deep_history = deep_model.fit(
X_train_scaled,
y_train_categorical,
validation_split = 0.33,
epochs=20,
shuffle=True,
verbose=2
)
# +
fig = plt.figure()
acc = deep_history.history["accuracy"]
val_acc = deep_history.history["val_accuracy"]
Epochs = range(1, len(acc)+1)
plt.plot(Epochs, acc, "b", label="Deep Training Accuracy")
plt.plot(Epochs, val_acc, "r", label="Deep Validation Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Red Wine - Deep Training Model Accuracy Comparison")
plt.legend()
plt.show()
fig.savefig('DeepRedWineAccuracyComparison.png')
# +
fig = plt.figure()
loss = deep_history.history["loss"]
val_loss = deep_history.history["val_loss"]
Epochs = range(1, len(loss)+1)
plt.plot(Epochs, loss, "b", label="Deep Training Loss")
plt.plot(Epochs, val_loss, "r", label="Deep Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Red Wine - Deep Training Model Loss Comparison")
plt.legend()
plt.show()
fig.savefig('DeepRedWineLossComparison.png')
# -
deep_history.history.keys()
# # Compare the models below
model_loss, model_accuracy = model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
print(
f"Normal Neural Network - Loss: {model_loss}, Accuracy: {model_accuracy}")
model_loss, model_accuracy = deep_model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
print(f"Deep Neural Network - Loss: {model_loss}, Accuracy: {model_accuracy}")
| assets/Wine_Quality_Predictions_NN_Red.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cost evaluation
import os
import random
import numpy as np
import scipy.io as scio
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
constellation_name = "StarLink"
# +
fac_id_list = [1584, 1585, 1586, 1587]
user_num_list = [10, 100, 200, 300, 400, 500, 1000]
number_of_satellites = 66 * 24
city_num = 1
bound = 2.71
G = nx.Graph()
edge = []
time = 1 # second
path = os.path.join('matlab_code\StarLink\delay', str(time)+'.mat')
data = scio.loadmat(path)
delay = data['delay']
G.add_nodes_from(range(number_of_satellites + city_num))
# construct constellation network, links from sat to sat
for i in range(number_of_satellites):
for j in range(i + 1, number_of_satellites):
if delay[i][j] > 0:
edge.append((i, j, delay[i][j]))
G.add_weighted_edges_from(edge)
fac_id = 1584
available_index = np.where(delay[fac_id][:1584] < bound)[0]
print(available_index)
shortest_paths = {}
for sat in available_index:
G.nodes[sat]['nusers'] = 0
sat_paths = nx.single_source_dijkstra_path(G, sat)
# print(sat_paths[157])
shortest_paths[sat] = sat_paths
# print(shortest_paths[220][156])
# +
slaac_cost_list = []
gams_cost_list = []
# add users in this city/area
for user_num in user_num_list:
slaac_cost = 0
gams_cost = 0
for i in range(1, user_num+1):
# choose a random sat from available list
access_sat = random.choice(available_index)
G.nodes[access_sat]['nusers'] += 1
for sat in available_index:
slaac_cost += len(shortest_paths[access_sat][sat]) # + G.nodes[sat]['nusers']
gams_cost += 2
slaac_cost_list.append(slaac_cost)
gams_cost_list.append(gams_cost)
# -
with open('./data/'+constellation_name+'/cost_cmp.csv', 'w') as fcost:
slaac = [str(s) for s in slaac_cost_list]
gams = [str(g) for g in gams_cost_list]
fcost.write(','.join(slaac)+'\n')
fcost.write(','.join(gams)+'\n')
print(slaac_cost_list)
print(gams_cost_list)
# +
fig, ax = plt.subplots()
ax.set(xlabel='Number of Users', ylabel='Number of Hops',
title='Cost of Geographical SLAAC in Starlink')
line1 = ax.plot(user_num_list, slaac_cost_list, marker='+', label='Geo-SLAAC')
line2 = ax.plot(user_num_list, gams_cost_list, marker='*',label='GAMS')
ax.legend(bbox_to_anchor=(0.05, 0.95), loc='upper left', borderaxespad=0.)
plt.show()
| SLAAC_GAMS_hop_cost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Input Validation
# * Proper testing of any input provided by the user.
# * Ensure properly formed data is entered into the system.
# * Prevent malformed data being stored in the database.
# ## Strategies for validation
# * Syntactical
# * Enforce correct syntax of the field. e.g. Phone number (515-282-2929) or Date of Birth: 02-28-2020(Valid) but 11-31-2020
#
# * Semantic validation
# * Enforce correctness of their values in the specific business context e.g. Minimum age of driver license is 14 years.
#
# ## Implementing the Strategies
# * Data Type validation
# * Type Conversion
# * Maximum and Minimum Range checks
#
#
#
age = int(input("Enter an age: "))
if(age < 0 ):
print('Please enter a valid age.')
# ---
# # Exception
# * Errors occured during execution of the program
# * Various types of Exception - Built-in
# * NameError
# * TypeError
# * ZeroDivisionError
# ---
4 + newVariable * 3
'2' + 2
10 + 1/0
# ---
# ## Handling Errors
# * Use try/except blocks
# * Code inside the try block is executed
# * If no error then the except block is not executed
# * If an error occurs then the code inside the except block is executed.
# * Can have multiple except block.
# * Finally block: Clean - up exception
# * finally clause will execute as the last task before the try statement completes.
#
#
#
# ---
try:
x = int(input("Please enter a value:"))
y = 100/x
print(f'The value of y is {y}')
except ValueError:
print("Enter an integer")
print('Code executed')
try:
x = int(input("Please enter a value:"))
y = 100/x
print(f'The value of y is {y}')
except ValueError:
print("Enter an integer")
except ZeroDivisionError:
print('Enter a value greater than 0')
else:
print('Else Block Executed')
finally:
print('Finally Code executed')
# ## Summarize
# * try always runs
# * if the try results in an exception, the except section runs. If there is no exception, this section is skipped
# * else will run if there are no exceptions from the try. If there are exceptions, else does not run
# * finally will always run regardless of exceptions
# * else and finally are optional sections.
# ---
# ## Raise exception
# * Allows the programmer to force a specified exception to occur.
#
# +
age = int(input("Enter an age: "))
if(age < 0 ):
raise ValueError('Age should be greater than Zero.')
#print('Age should be greater than zero')
print('No Errors; Continue execution')
# -
# ---
# # Functions
# * Block of reusable code.
# * Built-in Functions i.e. int(), str(), float()
# * User Defined Functions
# * Custom functions created by us for a specific purpose.
# ## Defining a function
# * Define a function using the def keyword.
# * e.g. def calculate_tax():
# * The : indicates the beginning of the function
# * Function names should be lowercase, with words separated by underscores as necessary to improve readability.
# * The body of the function start at the next line, and must be indented.
# * The first statement can be a string literal, this string literal is the function’s documentation string, or docstring.
#
# ## _ _main_ _
# * No main() function in python
#
# ```python
# __name__ : #Built - in variable that evaluates the name of current module
# __main__:
#
# ```
# * A Module can define functions, classes and Variables, identified by .py file
# * A module’s __name__ variable is set to '__main__' when read from standard input, a script, or from an interactive prompt.
# e.g python helloWorld.py
# * We can check the __name__ variable's value to be __main__ and then invoke the function
#
# ```python
# def myfunction():
# pass
#
# if __name__ == '__main__' :
# myfunction()
# ```
#
# ## Invoke a function
#
# ## Parameter(s)
# * Are inputs to the Functions.
# * Variables defined in the function definition
# * The arguments that are passed to the function
# * Multiple parameters can be passed to the function
# * Can have default values(more to come later)
#
#
# ## Return Values
# * Use keyword return
# * Can return string, integer, float
#
# ## Return multiple values
# * Return multiple values from a function
# * Values are separated by comma
#
#
# ---
# +
def calculate_tax(income):
"""Calculates the federal income tax and
returns the dollar amount to be paid.
: param income: The yearly income before taxes
: returns: The tax to be paid in dollars
: raises ValueError: When negative numbers are provided
"""
if(income < 0):
raise ValueError('Income should be greater than zero')
if(income < 50000):
return 0
elif(50001 < income < 75000):
tax = income * 0.1
return tax
else:
return income * 0.2
if __name__ == "__main__":
income = -1000
try:
tax = calculate_tax(income)
except ValueError as err:
print(err)
else:
print(f'The tax to be paid is {tax}')
# print('The tax amount is ' + str(calculate_tax(40000)))
help(calculate_tax)
# +
def calculate_tax(income):
"""Calculates the federal income tax and
returns the dollar amount and tax rate. """
if(income < 50000):
return 0
elif(50001 < income < 75000):
return income * 0.1, 10
else:
return income * 0.2, 20
if __name__ == "__main__":
income = 80000
tax, rate = calculate_tax(income)
print(f'The tax for {income} is {tax}. Rate: {rate}')
income = 60000
tax, rate = calculate_tax(income)
print(f'The tax for {income} is {tax}. Rate: {rate}')
# -
# ## Assignments:
# Due Date: 16th Feb
# ## Topic 1: Input validation with Try
# ## Topic 2: Basic Function Assignment
# ## Topic 3: Function Return Value
# ## Topic 4: Function Parameter and Return Value
#
# ---
#
# ## Classwork (Group)
#
# Create a function get_user_input().
# * Prompts user to enter the name.
# * Prompts user to enter the age.
# * Validates the age. Only integers are allowed.
# * Returns a string 'Hello {name}. Your age is {age}'
#
# In the main method:
# * Invoke the function
# * Print the results returned from the function.
#
# * Handle Exceptions
#
# ### Food for thought:
# * Can the inputs be accepted in the main method and passed to a function?
# * When should we accept the value in main?
# * Think about Single Responsibility principle while writing the functions.
#
#
#
| Module04/topic_notes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from openfermion import hermitian_conjugated, QubitOperator
R=QubitOperator('0.7250957503591403 [] +\n0.6850066491388216j [Y0 X2] +\n0.054241956789800254j [Z0 Y1] +\n0.035531648304549975j [X1 Y2] +\n-0.02823748464530462j [Z1 Y2]')
R
# +
R_dag = hermitian_conjugated(R)
H_test = QubitOperator('Z1 Z2') + QubitOperator('Z0 Z2') + QubitOperator('X0 X2') + QubitOperator('Y0 Y1 Y2')
new = R*H_test*R_dag
print(len(list(H_test)))
print(len(list(R)))
print(len(list(new)))
# +
H_size = len(list(H_test))
R_size = len(list(R))
I_term = H_size
# exclude I term in R
equal_term = (R_size-1)*(H_size)
remaining = (R_size-1)*(H_size)*(R_size-1)
# equal_term + I_term
remaining - I_term - equal_term
# -
max_poss = len(list(R))*len(list(H_test))*len(list(R))
reduced = len(list(R))*(len(list(H_test))-1)*len(list(R))
max_poss - reduced
| Projects/Misc/RHR_counting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# genipynb
#
# script to turn py files into ipynb files.
#
# used to turn rst files i write into py files, then into ipynb.
#
# Open up all ipynbs - Filter all markdown text into one notebook.
# +
import os
from base64 import encodestring
from IPython.nbformat.v4.nbbase import (
new_code_cell, new_markdown_cell, new_notebook,
new_output, new_raw_cell
)
import IPython.nbformat as nbf
import codecs
# -
oprsf = open('/home/wcmckee/github/bb.com/posts/js13k.rst','r')
opr = oprsf.read()
# +
# -*- coding: utf-8 -*-
# some random base64-encoded *text*
png = encodestring(os.urandom(5)).decode('ascii')
jpeg = encodestring(os.urandom(6)).decode('ascii')
cells = []
cells.append(new_markdown_cell(
source= opr))
cells.append(new_code_cell(
source='import numpy',
execution_count=1,
))
cells.append(new_markdown_cell(
source='A random array',
))
cells.append(new_markdown_cell(
source=u'## My Heading',
))
cells.append(new_code_cell(
source='a = numpy.random.rand(100)',
execution_count=2,
))
cells.append(new_code_cell(
source='a = 10\nb = 5\n',
execution_count=3,
))
cells.append(new_code_cell(
source='a = 10\nb = 5',
execution_count=4,
))
cells.append(new_code_cell(
source=u'print "ünîcødé"',
execution_count=3,
outputs=[new_output(
output_type=u'execute_result',
data={
'text/plain': u'<array a>',
'text/html': u'The HTML rep',
'text/latex': u'$a$',
'image/png': png,
'image/jpeg': jpeg,
'image/svg+xml': u'<svg>',
'application/json': {
'key': 'value'
},
'application/javascript': u'var i=0;'
},
execution_count=3
),new_output(
output_type=u'display_data',
data={
'text/plain': u'<array a>',
'text/html': u'The HTML rep',
'text/latex': u'$a$',
'image/png': png,
'image/jpeg': jpeg,
'image/svg+xml': u'<svg>',
'application/json': {
'key': 'value'
},
'application/javascript': u'var i=0;'
},
),new_output(
output_type=u'error',
ename=u'NameError',
evalue=u'NameError was here',
traceback=[u'frame 0', u'frame 1', u'frame 2']
),new_output(
output_type=u'stream',
text='foo\rbar\r\n'
),new_output(
output_type=u'stream',
name='stderr',
text='\rfoo\rbar\n'
)]
))
nb0 = new_notebook(cells=cells,
metadata={
'language': 'python',
}
)
f = codecs.open('test.ipynb', encoding='utf-8', mode='w')
nbf.write(nb0, f, 4)
f.close()
# -
# cat test.ipynb
| genipynb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 第十二讲:图和网络
#
# ## 图和网络
# +
import networkx as nx
import matplotlib.pyplot as plt
# %matplotlib inline
dg = nx.DiGraph()
dg.add_edges_from([(1,2), (2,3), (1,3), (1,4), (3,4)])
edge_labels = {(1, 2): 1, (1, 3): 3, (1, 4): 4, (2, 3): 2, (3, 4): 5}
pos = nx.spring_layout(dg)
nx.draw_networkx_edge_labels(dg,pos,edge_labels=edge_labels, font_size=16)
nx.draw_networkx_labels(dg, pos, font_size=20, font_color='w')
nx.draw(dg, pos, node_size=1500, node_color="gray")
# -
# 该图由4个节点与5条边组成,
#
# $$
# \begin{array}{c | c c c c}
# & node_1 & node_2 & node_3 & node_4 \\
# \hline
# edge_1 & -1 & 1 & 0 & 0 \\
# edge_2 & 0 & -1 & 1 & 0 \\
# edge_3 & -1 & 0 & 1 & 0 \\
# edge_4 & -1 & 0 & 0 & 1 \\
# edge_5 & 0 & 0 & -1 & 1 \\
# \end{array}
# $$
#
# 我们可以建立$5 \times 4$矩阵
# $
# A=
# \begin{bmatrix}
# -1 & 1 & 0 & 0 \\
# 0 & -1 & 1 & 0 \\
# -1 & 0 & 1 & 0 \\
# -1 & 0 & 0 & 1 \\
# 0 & 0 & -1 & 1 \\
# \end{bmatrix}
# $
#
# 观察前三行,易看出这三个行向量线性相关,也就是这三个向量可以形成回路(loop)。
#
# 现在,解$Ax=0$:
# $
# Ax=
# \begin{bmatrix}
# -1 & 1 & 0 & 0 \\
# 0 & -1 & 1 & 0 \\
# -1 & 0 & 1 & 0 \\
# -1 & 0 & 0 & 1 \\
# 0 & 0 & -1 & 1 \\
# \end{bmatrix}
# \begin{bmatrix}
# x_1\\x_2\\x_3\\x_4\\
# \end{bmatrix}
# $。
#
# 展开得到:
# $\begin{bmatrix}x_2-x_1 \\x_3-x_2 \\x_3-x_1 \\x_4-x_1 \\x_4-x_3 \\ \end{bmatrix}=\begin{bmatrix}0\\0\\0\\0\\0\\ \end{bmatrix}$
#
# 引入矩阵的实际意义:将$x=\begin{bmatrix}x_1 & x_2 & x_3 & x_4\end{bmatrix}$设为各节点电势(Potential at the Nodes)。
#
# 则式子中的诸如$x_2-x_1$的元素,可以看做该边上的电势差(Potential Differences)。
#
# 容易看出其中一个解$x=\begin{bmatrix}1\\1\\1\\1\end{bmatrix}$,即等电势情况,此时电势差为$0$。
#
# 化简$A$易得$rank(A)=3$,所以其零空间维数应为$n-r=4-3=1$,即$\begin{bmatrix}1\\1\\1\\1\end{bmatrix}$就是其零空间的一组基。
#
# 其零空间的物理意义为,当电位相等时,不存在电势差,图中无电流。
#
# 当我们把图中节点$4$接地后,节点$4$上的电势为$0$,此时的
# $
# A=
# \begin{bmatrix}
# -1 & 1 & 0 \\
# 0 & -1 & 1 \\
# -1 & 0 & 1 \\
# -1 & 0 & 0 \\
# 0 & 0 & -1 \\
# \end{bmatrix}
# $,各列线性无关,$rank(A)=3$。
#
# 现在看看$A^Ty=0$(这是应用数学里最常用的式子):
#
# $A^Ty=0=\begin{bmatrix}-1 & 0 & -1 & -1 & 0 \\1 & -1 & 0 & 0 & 0 \\0 & 1 & 1 & 0 & -1 \\0 & 0 & 0 & 1 & 1 \\ \end{bmatrix}\begin{bmatrix}y_1\\y_2\\y_3\\y_4\\y_5\end{bmatrix}=\begin{bmatrix}0\\0\\0\\0\end{bmatrix}$,对于转置矩阵有$dim N(A^T)=m-r=5-3=2$。
#
# 接着说上文提到的的电势差,矩阵$C$将电势差与电流联系起来,电流与电势差的关系服从欧姆定律:边上的电流值是电势差的倍数,这个倍数就是边的电导(conductance)即电阻(resistance)的倒数。
#
# $
# 电势差
# \xrightarrow[欧姆定律]{矩阵C}
# 各边上的电流y_1, y_2, y_3, y_4, y_5
# $,而$A^Ty=0$的另一个名字叫做“基尔霍夫电流定律”(Kirchoff's Law, 简称KCL)。
#
# 再把图拿下来观察:
# +
import networkx as nx
import matplotlib.pyplot as plt
# %matplotlib inline
dg = nx.DiGraph()
dg.add_edges_from([(1,2), (2,3), (1,3), (1,4), (3,4)])
edge_labels = {(1, 2): 1, (1, 3): 3, (1, 4): 4, (2, 3): 2, (3, 4): 5}
pos = nx.spring_layout(dg)
nx.draw_networkx_edge_labels(dg,pos,edge_labels=edge_labels, font_size=16)
nx.draw_networkx_labels(dg, pos, font_size=20, font_color='w')
nx.draw(dg, pos, node_size=1500, node_color="gray")
# -
# 将$A^Ty=0$中的方程列出来:
# $
# \left\{
# \begin{aligned}
# y_1 + y_3 + y_4 &= 0 \\
# y_1 - y_2 &= 0 \\
# y_2 + y_3 - y_5 &= 0 \\
# y_4 - y_5 &= 0 \\
# \end{aligned}
# \right.
# $
#
# 对比看$A^Ty=0$的第一个方程,$-y_1-y_3-y_4=0$,可以看出这个方程是关于节点$1$上的电流的,方程指出节点$1$上的电流和为零,基尔霍夫定律是一个平衡方程、守恒定律,它说明了流入等于流出,电荷不会在节点上累积。
#
# 对于$A^T$,有上文得出其零空间的维数是$2$,则零空间的基应该有两个向量。
#
# * 现在假设$y_1=1$,也就是令$1$安培的电流在边$1$上流动;
# * 由图看出$y_2$也应该为$1$;
# * 再令$y_3=-1$,也就是让$1$安培的电流流回节点$1$;
# * 令$y_4=y_5=0$;
#
# 得到一个符合KCL的向量$\begin{bmatrix}1\\1\\-1\\0\\0\end{bmatrix}$,代回方程组发现此向量即为一个解,这个解发生在节点$1,2,3$组成的回路中,该解即为零空间的一个基。
#
# 根据上一个基的经验,可以利用$1,3,4$组成的节点求另一个基:
#
# * 令$y_1=y_2=0$;
# * 令$y_3=1$;
# * 由图得$y_5=1$;
# * 令$y_4=-1$;
#
# 得到令一个符合KCL的向量$\begin{bmatrix}0\\0\\1\\-1\\1\end{bmatrix}$,代回方程可知此为另一个解。
#
# 则$N(A^T)$的一组基为$\begin{bmatrix}1\\1\\-1\\0\\0\end{bmatrix}\quad\begin{bmatrix}0\\0\\1\\-1\\1\end{bmatrix}$。
#
# 看图,利用节点$1,2,3,4$组成的大回路(即边$1,2,5,4$):
#
# * 令$y_3=0$;
# * 令$y_1=1$;
# * 则由图得$y_2=1, y_5=1, y_4=-1$;
#
# 得到符合KCL的向量$\begin{bmatrix}1\\1\\0\\-1\\1\end{bmatrix}$,易看出此向量为求得的两个基之和。
#
# 接下来观察$A$的行空间,即$A^T$的列空间,方便起见我们直接计算
# $
# A^T=
# \begin{bmatrix}
# -1 & 0 & -1 & -1 & 0 \\
# 1 & -1 & 0 & 0 & 0 \\
# 0 & 1 & 1 & 0 & -1 \\
# 0 & 0 & 0 & 1 & 1 \\
# \end{bmatrix}
# $
# 的列空间。
#
# 易从基的第一个向量看出前三列$A^T$的线性相关,则$A^T$的主列为第$1,2,4$列,对应在图中就是边$1,2,4$,可以发现这三条边没有组成回路,则在这里可以说**线性无关等价于没有回路**。由$4$个节点与$3$条边组成的图没有回路,就表明$A^T$的对应列向量线性无关,也就是节点数减一($rank=nodes-1$)条边线性无关。另外,没有回路的图也叫作树(Tree)。
#
# 再看左零空间的维数公式:$dim N(A^T)=m-r$,左零空间的维数就是相互无关的回路的数量,于是得到$loops=edges-(nodes-1)$,整理得:
#
# $$
# nodes-edges+loops=1
# $$
#
# 此等式对任何图均有效,任何图都有此拓扑性质,这就是著名的欧拉公式(Euler's Formula)。$零维(节点)-一维(边)+二维(回路)=1$便于记忆。
#
# 总结:
#
# * 将电势记为$e$,则在引入电势的第一步中,有$e=Ax$;
# * 电势差导致电流产生,$y=Ce$;
# * 电流满足基尔霍夫定律方程,$A^Ty=0$;
#
# 这些是在无电源情况下的方程。
#
# 电源可以通过:在边上加电池(电压源),或在节点上加外部电流 两种方式接入。
#
# 如果在边上加电池,会体现在$e=Ax$中;如果在节点上加电流,会体现在$A^Ty=f$中,$f$向量就是外部电流。
#
# 将以上三个等式连起来得到$A^TCAx=f$。另外,最后一个方程是一个平衡方程,还需要注意的是,方程仅描述平衡状态,方程并不考虑时间。最后,$A^TA$是一个对称矩阵。
| notes-linear-algebra/chapter12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="_wutUJGEIBJW" colab_type="text"
# # ImageMagick on Colab
#
# The immortal cockroach know as ImageMagick.
#
# This started as an experiment to see if [ImageMagick](https://imagemagick.org/index.php) can be used as part of the dev environment on Colab.
#
# ## History
# - 2020-05-11: Experiments with Fred's whiteboard scripts
# - 2020-04-18: base at least installs
# + [markdown] id="GslFDNt92Zjj" colab_type="text"
# ## Install
#
# ImageMagick does not come pre-installed on Colab, but it will install:
# + id="TDc2V3WKiEAm" colab_type="code" outputId="bbe9386d-d441-44c9-b354-fea2ed79a576" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !convert -version
# + [markdown] id="4nZPZGYu2hTa" colab_type="text"
# So, the above test proves not pre-installed.
# + id="Ge7Z7W09hzjT" colab_type="code" colab={}
# !apt install -qq -y imagemagick
# + id="QEydWho-icT2" colab_type="code" outputId="fe7f3cf7-93af-47f3-c225-c0355f9400ff" colab={"base_uri": "https://localhost:8080/", "height": 102}
# !convert -version
# + [markdown] id="heG0Wqix24AA" colab_type="text"
# So, indeed it can be installed and interacted with on the CLI.
# + [markdown] id="d1HrPl3utCpE" colab_type="text"
# ## Fred's whiteboard
#
# Note that file `whiteboard` which is a bash shell downloaded from [Fred](http://www.fmwconcepts.com/imagemagick/downloadcounter.php?scriptname=whiteboard&dirname=whiteboard). That is his unmodified code used here in this non-commercial notebook. It's cached to help this page init smoothly.
#
# + [markdown] id="h0Ig_AgT3FKK" colab_type="text"
# Years ago, <NAME> cranked out the tool which originally inspired this notebook to happen. His ImageMagick script, [Whiteboard](http://www.fmwconcepts.com/imagemagick/whiteboard/index.php), is described as "Processes a picture of a whiteboard to clean up the background and correct the perspective." Old but nonetheless it was "last modified: December 16, 2018" so it is also long maintained.
#
# [2020-04-23: kicked Fred's whiteboard around a bit but results were unimpressive, although didn't get to quality of results shown in fmwconcepts.com. So, further exercising might get somewhere but this turned into an exercise in modern ML techniques before thoroughly exploring ImageMagick.]
# + id="LBWBzOsd3BK0" colab_type="code" colab={}
# Peek inside that shell script file downloaded from Fred:
#freds_whiteboard_file_name = gdrive_data_root_path + 'whiteboard'
# #!head -n 4 "{freds_whiteboard_file_name}"
# + [markdown] id="bSbens2obUhI" colab_type="text"
# #### Install Fred's
# + id="9YlNDO884GHp" colab_type="code" colab={}
# !apt -q install bc # Fred's whiteboard wants this
# + id="Wj2_yC7qbOdz" colab_type="code" colab={}
# TODO: this commented out next line fails. A file is created but not usable.
# # !wget -O freds_whiteboard http://www.fmwconcepts.com/imagemagick/downloadcounter.php?scriptname=whiteboard&dirname=whiteboard
#
# Take #2 was to manual test: download from fmwconcepts.com, cache in private gDrive, download same to Colab FS works.
# That worked so that's why it's cached in gDrive as freds_whiteboard_file_name.
# So install is just a cp
freds_dest = '/content/whiteboard' # whiteboard is file's orig name
# !cp "{freds_whiteboard_file_name}" {freds_dest}
# !chmod u+x {freds_dest} # mark it as executable
# !ls -lh /content
# + [markdown] id="dpt3i7FXa9VR" colab_type="text"
# #### Run Fred's
# + id="xWZoi-X7a0R7" colab_type="code" colab={}
# # !bash {freds_dest} -o 33 -e none -f 10 -S 150 -p 0.1 "{in_file_name}" out.jpg
# #!bash {freds_dest} -S 300 "{in_file_name}" out.jpg
# + id="OXSbWlKbFwZR" colab_type="code" colab={}
#out_img = PIL.Image.open("out.jpg")
#_ = plt.figure(figsize=(5,5))
#_ = plt.imshow(out_img)
#display(out_img)
# + [markdown] id="xDwv97Y52NPM" colab_type="text"
# ## Python to ImageMagick
#
# [TBD]
#
# - [wand](http://docs.wand-py.org/en/0.5.9/)
# - PythonMagick
#
| probings/imagemagick_on_colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nikito060195/Chaos-and-Attractors/blob/main/elastomers_attractors.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="ofDEyC-k2Dmn"
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import make_interp_spline
from scipy.stats import gaussian_kde
from matplotlib.collections import LineCollection
from scipy import stats
from scipy import ndimage
import pandas as pd
# + [markdown] id="5qz76Yk12cXt"
# # **Read Data**
# + colab={"base_uri": "https://localhost:8080/", "height": 451} id="PgXKSBdo2fuj" outputId="fc19fb53-c39c-4b43-a1db-f9597ce36c6c"
file_path = 't6_1hz.lvm'
df = pd.read_csv(file_path, usecols = [0], names = ['t6_1hz'], header = None)
xaxis = range(0, len(df['t6_1hz']), 1)
yaxis = df['t6_1hz']
# + [markdown] id="e06VbHjD3GNw"
# # **Plot routine**
# + id="FLHq-Aee3Nsw"
def plot(xaxis, yaxis, limit = None, name = '', atr = None):
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
if limit != None:
ax.set_xlim(limit)
if atr != None:
ax.set_ylabel(r'$V(t+\Delta t)$', fontsize = 14)
ax.set_xlabel('V(t)', fontsize = 14)
color = 'brown'
else:
ax.set_ylabel('V(t)', fontsize = 14)
ax.set_xlabel('t', fontsize = 14)
color = 'navy'
ax.plot(xaxis, yaxis, c = color, linewidth = 2)
ax.tick_params(axis='both', which='major', labelsize=13)
if name == '':
plt.savefig('semnome.png', dpi = 700)
else:
plt.savefig(name, dpi = 700)
plt.show()
# + [markdown] id="f5FvGoVU3_9i"
# # **Attractor code**
# + [markdown] id="ZsTE5ba9fEvY"
# ## Attractor subroutine
# + id="ewrVN__tfNxd"
def attr_routine(yaxis, min, max):
att = []
new_x = []
dt = 10 #interval to v(t + dt)
for cnt in np.arange(min, max, 1):
if cnt > min + dt:
att.append(yaxis[cnt])
new_x.append(yaxis[cnt - dt])
att = np.array(att)
new_x = np.array(new_x)
sigma = 6
new_x = ndimage.gaussian_filter1d(new_x, sigma)
att = ndimage.gaussian_filter1d(att, sigma)
return new_x, att
# + [markdown] id="3aVGq4pCb1OT"
# ## Subroutine to periodic data create
# + id="5au1zBf1bz6o"
def signal(y0, A, omega, t):
y = []
for time in t:
y.append(y0 + A * np.sin(omega * time))
return y
# + [markdown] id="9BBZskq2c4d_"
# ## Create a virtual system
# + colab={"base_uri": "https://localhost:8080/", "height": 395} id="mr9NICb3c9Oq" outputId="793573cf-c389-4693-9a78-5d96a84b4cda"
time = np.arange(0,8.17,1./120.)
f = 1.
y0 = -12.
amp = -12.
omega = 2. * np.pi * f
y = signal(y0, amp, omega, time)
plot(time, y,name='wave')
# + [markdown] id="sY_YCAjle3-L"
# ## Attractor to virtual data
# + id="q8D6H3M9e8AP"
min_virt = 0
max_virt = len(time)
x_virt, y_virt = attr_routine(y, min_virt, max_virt)
# + colab={"base_uri": "https://localhost:8080/", "height": 395} id="oQZn6tFPhRRW" outputId="95a43c6a-ad23-4602-e7ec-366fb0198567"
plot(x_virt, y_virt,name='atr_simul', atr=True)
# + [markdown] id="F6fsXNPj7F0k"
# ## Plot data
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="MpwJ330X4EMh" outputId="8834aa07-7da3-425c-cae3-37a435258a9c"
plot(xaxis, yaxis, [0.99e6,1.004e6])
# + [markdown] id="aBhcw3yj7JAi"
# ## Create Attractor
#
# + id="6f9YW0Yo7N8y"
min = 0.980e6 #initial time to attractor
max = 0.988e6 #final time to attractor
x_att, y_att = attr_routine(yaxis, min, max)
#print(att)
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="OQnIOt0v7wbG" outputId="dbad8760-7581-45c0-a32d-6ba0a0be9a23"
plot(x_att, y_att)
| elastomers_attractors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_learnable_ai
# language: python
# name: conda_learnable_ai
# ---
# ## Terminology
# - encoding channels / encodings
# - a way of communicating with people or getting something done
# - (交流)途徑,管道
#
# - markings
# - a mark that makes it possible to recognize something
# - 標記;標識
#
# > Altair’s main purpose is to convert plot specifications to a JSON string that conforms to the Vega-Lite schema.
#
# > The key to creating meaningful visualizations is to map properties of the data to visual properties in order to effectively communicate information. In Altair, this mapping of visual properties to data columns is referred to as an encoding, and is most often expressed through the Chart.encode() method.
#
# > To visually separate the points, we can map various encoding channels, or channels for short, to columns in the dataset.
#
# > encode the variable a of the data with the x channel
#
# > Specifying the correct type for your data is important, as it affects the way Altair represents your encoding in the resulting plot. the importance of thinking carefully about your data types when visualizing data
#
# > Altair’s visualizations are built on the concept of the database-style grouping and aggregation; that is, the split-apply-combine abstraction that underpins many data analysis approaches.
import pandas as pd
data = pd.DataFrame({'a': list('CCCDDDEEE'),
'b': [2, 7, 4, 1, 2, 6, 8, 4, 7]})
data
import altair as alt
chart = alt.Chart(data)
alt.Chart(data).mark_point()
alt.Chart(data).mark_point().encode(
x='a',
)
alt.Chart(data).mark_point().encode(
x='a',
y='b'
)
alt.Chart(data).mark_point().encode(
x='a',
y='average(b)'
)
alt.Chart(data).mark_bar().encode(
x='a',
y='average(b)'
)
alt.Chart(data).mark_bar().encode(
y='a',
x='average(b)'
)
y = alt.Y('average(b):Q')
print(y.to_json())
y = alt.Y(field='b', type='quantitative', aggregate='average')
print(y.to_json())
alt.Chart(data).mark_bar().encode(
alt.Y('a', type='nominal'),
alt.X('b', type='quantitative', aggregate='average')
)
alt.Chart(data).mark_bar(color='firebrick').encode(
alt.Y('a', title='category'),
alt.X('average(b)', title='avg(b) by category')
)
| notebooks/garage/altair_notes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Restaurants and cafes in Stuttgart
# ## Influence of location and type of cuisine on future ratings
# # Report
# ## 1. Introduction of the problem
# For sure, the ratings of a restaurant are highly dependent on the quality of the food, the service and the whole atmosphere.<br>
# **BUT** beside these criteria of a running business, are there fundamental conditions I can set before opening a business which are influencing the future rating?<br>
# E.g. before I'm opening a restaurant business I have to decide<br>
# - where to locate it and <br>
# - which kind of cusine I want to offer.<br>
# <br>
# When taking the decision I should consider that this cuisine is demanded by customers in this area of the city.<br>
# <br>
# Of course, I can't look in the future or asking all the residents, but I can check how the existing businesses perform.
# **Target audience:**<br>
# Future business owner
# ## 2. Data
# I want to investigate this problem for the city of Stuttgart in Germany.<br>
# - As level of detail on the locations I'm choosing the ZIP-codes in the city area. This ZIP-code areas should sufficient enough to represent the different neighborhoods --> Web-source
df_zip_merged.head()
# - For each ZIP-code area I can pull the geo coordinates to get reference points per neighborhood. --> GEOPY
df_coords.head()
# - With the geo coordinates I can get a list of food-related businesses from Foursquare which are close to each neighborhood
df_venues.info()
df_venues.head(5)
# - For exotic cuisine I won't have enough data that's why I'm focusing on the top10-cuisines only.
df_top10cat
# - For each food-related business I'm pulling the rating from Foursquare.
df_venues_con.head(5)
# +
fig, ax = plt.subplots(5, 2, figsize=(15,20))
fig.suptitle("Number of restaurant for each category per ZIP-code", fontsize=14)
ax[0][0].bar(x_bins_cat0, series_cat0, color='b', width=0.5)
ax[0][0].set_title(str(cat0))
ax[0][1].bar(x_bins_cat1, series_cat1, color='b', width=0.5)
ax[0][1].set_title(str(cat1))
ax[1][0].bar(x_bins_cat2, series_cat2, color='b', width=0.5)
ax[1][0].set_title(str(cat2))
ax[1][1].bar(x_bins_cat3, series_cat3, color='b', width=0.5)
ax[1][1].set_title(str(cat3))
ax[2][0].bar(x_bins_cat4, series_cat4, color='b', width=0.5)
ax[2][0].set_title(str(cat4))
ax[2][1].bar(x_bins_cat5, series_cat5, color='b', width=0.5)
ax[2][1].set_title(str(cat5))
ax[3][0].bar(x_bins_cat6, series_cat6, color='b', width=0.5)
ax[3][0].set_title(str(cat6))
ax[3][1].bar(x_bins_cat7, series_cat7, color='b', width=0.5)
ax[3][1].set_title(str(cat7))
ax[4][0].bar(x_bins_cat8, series_cat8, color='b', width=0.5)
ax[4][0].set_title(str(cat8))
ax[4][1].bar(x_bins_cat9, series_cat9, color='b', width=0.5)
ax[4][1].set_title(str(cat9))
plt.show()
# -
# - My dataset consists of "rating" (as target) and "close neighborhoods" and "cuisine" (as features)
df_str_group.head()
# ## 3. Methodology
# **Methodology:**<br>
# I'm choosing the Linear Regression-method to predict the ratings, because the coefficients for each feature will show me if this feature plays a crucial role and it's worth considering.
# ## 4. Results
# --> but the MSE is quite small with 0.66
# Let's have a look on the coefficients
df_coeff_lr.head(30)
plt.plot(x_values, y_lin, '-r', label='Linear regression line')
plt.title('Linear regression line for ZIP-area ' + str(df_coeff_lr.index[0]) + " dependent of type of cuisine")
plt.xlabel('Type of cusines', color='#1C2833')
plt.ylabel('Rating', color='#1C2833')
plt.legend(loc='upper left')
plt.grid()
plt.show()
# Linear regression line shows the likely range of a restaurant rating in ZIP-area 70184.<br>
# ZIP-area is the area, where the restaurants have the best rating
# > min. rating is 6.75<br>
# > max. rating is 7.3<br>
# > type of cusine has only limited impact as the MSE is 0.66
# ## 5. Discussion
# Unfortunately the results are not really significant due to limited data.<br>
# It would be very helpful to have the number of ratings since existance of the restaurant.<br>
# But at least the results show in which areas are the best restaurants.
# ## 6. Conclusion
# **Conclusion:<br>
# The impact of location and cusine on the rating is not significant enough to be considered as crucial criteria**
# <br>
# <br>
# <br>
# <br>
# <br>
# <br>
# <br>
# <br>
# <br>
# <br>
# <br>
# <br>
# # NOTEBOOK
# +
import pandas as pd
import numpy as np
from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
from geopy.distance import geodesic
import requests # library to handle requests
from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe
# Matplotlib and associated plotting modules
import matplotlib.cm as cm
import matplotlib.colors as colors
# -
# ## 1. Hoods in Stuttgart, GER
# ### 1.1 Load ZIP-codes in Stuttgart city
df_zip = pd.read_excel("701_Stuttgart_ZIP.xlsx", header = 0)
df_zip.info()
df_zip.head()
# No distinct ZIP-codes -> One Hot Encoding of hoods
df_onehot_hoods = pd.get_dummies(df_zip["Stadtteil"])
df_onehot_hoods.head()
df_onehot_hoods.shape
df_zip_merged = df_zip.join(df_onehot_hoods, how="left")
df_zip_merged.head()
df_zip_merged.shape
#remove col "Stadtteil"
df_zip_merged.drop("Stadtteil", inplace=True, axis=1)
df_zip_merged.info()
# Distinct ZIPs
df_zip_grouped = df_zip_merged.groupby(["PLZ"]).sum().reset_index()
df_zip_grouped.head()
df_zip_grouped.shape
# ### 1.2 Get geo coords for ZIP-codes
# Function to pull geo coordinates based on ZIP codes
def getCoordsSTR(zip_codes):
address_suffix = " Stuttgart, Germany"
address_zip = ""
list_coords = []
for code in zip_codes:
address_zip = str(code)
address = address_zip + address_suffix
geolocator = Nominatim(user_agent="foursquare_agent")
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
list_coords.append([code, latitude, longitude])
df_list = pd.DataFrame(list_coords, columns=["PLZ", "Latitude", "Longitude"])
return(df_list)
df_coords = getCoordsSTR(df_zip_grouped["PLZ"])
df_coords.head()
df_str_geo = pd.merge(df_zip_grouped, df_coords, how="left", on="PLZ")
df_str_geo.info()
# ### 1.3 Get distances between ZIPs
df_source = df_str_geo.loc[:, ["PLZ"]]
# +
# Create distance matrix with all source-sink-relations
list_relations = []
for index_source, row_source in df_source.iterrows():
for index_sink, row_sink in df_source.iterrows():
list_relations.append([row_source[0], row_sink[0]])
df_dist = pd.DataFrame(list_relations, columns=["Source", "Sink"])
df_dist.info()
# -
df_dist.head()
df_provide_geo = df_str_geo.loc[:, ["PLZ", "Latitude", "Longitude"]]
df_provide_geo.info()
# Map coordinates for source
df_dist1 = pd.merge(df_dist, df_provide_geo, how="left", left_on="Source", right_on="PLZ", copy=False)
df_dist1.info()
df_dist1.rename(columns={"Latitude": "Source_lat", "Longitude":"Source_lngt"}, inplace=True)
df_dist1.drop("PLZ", axis=1, inplace=True)
df_dist1.info()
# Map coordinates for source
df_dist2 = pd.merge(df_dist1, df_provide_geo, how="left", left_on="Sink", right_on="PLZ", copy=False)
df_dist2.info()
df_dist2.rename(columns={"Latitude": "Sink_lat", "Longitude":"Sink_lngt"}, inplace=True)
df_dist2.drop("PLZ", axis=1, inplace=True)
df_dist2.head()
# +
# Now we have a distance matrix we can use to pull the distances
list_distances = []
for index, row in df_dist2.iterrows():
source_lat = row[2]
source_lngt = row[3]
sink_lat = row[4]
sink_lngt = row[5]
source = (source_lat, source_lngt)
sink = (sink_lat, sink_lngt)
list_distances.append(np.round(geodesic(source, sink).meters, 0))
len(list_distances)
# -
df_result_dist = pd.DataFrame(list_distances, columns=["Distance_m"])
df_result_dist.head()
df_dist3 = df_dist2.join(df_result_dist, how="left")
df_dist3.info()
# Drop zero distances
index_zero = df_dist3[df_dist3["Distance_m"] == 0].index
df_dist3.drop(index_zero, axis=0, inplace=True)
df_dist3.info()
# Group by Source to get distance to closest adjacent ZIP-area (min distance)
df_dist_grouped = df_dist3.groupby(["Source"])["Distance_m"].min().reset_index()
df_str_input = pd.merge(df_str_geo, df_dist_grouped, how="left", left_on="PLZ", right_on="Source")
df_str_input.info()
df_str_input.drop("Source", axis=1)
# ## 2. Foursquare information on restaurants in ZIP-areas
# Foursquare credentials
credentials = pd.read_excel("../Credentials.xlsx", header=0)
credentials.columns
# +
provider = "Foursquare"
cred_fsquare = credentials[credentials["Provider"] == provider]
CLIENT_ID = cred_fsquare[cred_fsquare["Key"] == "CLIENT_ID"].values[0][2] # your Foursquare ID
CLIENT_SECRET = cred_fsquare[cred_fsquare["Key"] == "CLIENT_SECRET"].values[0][2] # your Foursquare Secret
ACCESS_TOKEN = cred_fsquare[cred_fsquare["Key"] == "ACCESS_TOKEN"].values[0][2] # your FourSquare Access Token
VERSION = '20210101' # Foursquare API version
# -
# ### 2.1 Get list of restaurants from foursquare
# +
#Pull food-venues for each ZIP from Foursquare; using max. distance as radius
def exploreFoodVenues(zip_code, zip_lat, zip_lng, radius):
# Explore Top100
LIMIT = 100
# Category ID for category "FOOD"
CAT_ID = "4d4b7105d754a06374d81259"
venues_list = []
for code, lat, lng, r_m in zip(zip_code, zip_lat, zip_lng, radius):
# create the API request URL
ven_expl_url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}&categoryId={}'.format(
CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, r_m, LIMIT, CAT_ID)
# make the GET request
ven_results = requests.get(ven_expl_url).json()["response"]['groups'][0]['items']
try:
# try to get ZIP-code
venues_list.append([(code, lat, lng, v['venue']['id'], v['venue']['name'], v['venue']['location']['lat'],
v['venue']['location']['lng'], v['venue']['location']['formattedAddress'][1],
v['venue']['categories'][0]['name']) for v in ven_results])
except:
# use dummy ZIP
venues_list.append([(code, lat, lng, v['venue']['id'], v['venue']['name'], v['venue']['location']['lat'],
v['venue']['location']['lng'], "n/a",
v['venue']['categories'][0]['name']) for v in ven_results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['PLZ', 'Latitude', 'Longitude', 'Venue_id',
'Venue_name', 'Venue_latitude', 'Venue_longitude', 'Venue_PLZ', 'Venue_category']
return(nearby_venues)
# -
df_venues = exploreFoodVenues(df_str_input["PLZ"], df_str_input["Latitude"],
df_str_input["Longitude"], df_str_input["Distance_m"])
df_venues.info()
# List of unique Venue_IDs with no. of appearances in Explore-search
df_venues_unique = df_venues.groupby(df_venues.columns.to_list()[3:9])["PLZ"].count().to_frame().reset_index()
df_venues_unique.rename(columns={"PLZ":"Appearances"}, inplace=True)
df_venues_unique.info()
# ### 2.2 Identify restaurant belonging to Top10-categories
# +
df_all_cat = df_venues_unique.groupby(["Venue_category"])["Venue_id"].count().to_frame().reset_index().sort_values(
by="Venue_id", ascending=False)
df_all_cat.rename(columns={"Venue_id":"Appearances"}, inplace=True)
# Remove category "Restaurant" because its meaningless
index_restaurant = df_all_cat[df_all_cat["Venue_category"] == "Restaurant"].index
df_all_cat.drop(index_restaurant, axis=0, inplace=True)
df_top10cat = df_all_cat.iloc[0:10, :]
df_top10cat
# -
list_top10 = df_top10cat["Venue_category"].to_list()
list_top10
# Venues belonging to top10 categories
bool_series= df_venues_unique["Venue_category"].isin(list_top10)
df_venues_top = df_venues_unique[bool_series]
df_venues_top.info()
# ### 2.3 Get ratings for selected venues from Foursquare
#Pull venue-ratings
"""
def pullVenueInfos(list_venues):
info_list = []
for venue_id in list_venues:
# create the API request URL
ven_info_url = 'https://api.foursquare.com/v2/venues/{}?&client_id={}&client_secret={}&v={}'.format(
venue_id, CLIENT_ID, CLIENT_SECRET, VERSION)
# make the GET request
ven_info = requests.get(ven_info_url).json()['response']['venue']
try:
# try to get rating
info_list.append([venue_id, ven_info['rating']])
except:
# use dummy rating
info_list.append([venue_id, "n/a"])
return_ven_info = pd.DataFrame(info_list, columns=["Venue_id", "Rating"])
return(return_ven_info)
"""
df_venue_ratings = pd.read_excel("EXPORT-venue-ratings.xlsx", header=0)
df_venue_ratings.drop("Unnamed: 0", axis=1, inplace=True)
df_venue_ratings.head()
df_venue_ratings.shape
# +
# Make a backup of ratings
#df_venue_ratings.to_excel("EXPORT-venue-ratings.xlsx")
# -
# Merge ratings with other information
df_venues_con = pd.merge(df_venues_top, df_venue_ratings, how="left", on="Venue_id")
df_venues_con.info()
# Drop venues with rating = "n/a"
index_na = df_venues_con[df_venues_con["Rating"] == "n/a"].index
df_venues_con.drop(index_na, axis=0, inplace=True)
df_venues_con.dropna(how="any", axis=0, inplace=True)
df_venues_con.info()
# ### 2.4 Final preparation of input data
# Taking venue information and information on adjacent hoods
df_venues_cut = df_venues.loc[:, ["PLZ", "Venue_id"]]
df_str_venues = pd.merge(df_venues_con, df_venues_cut, how="left", on="Venue_id")
df_str_venues.rename(columns={"PLZ":"Adj_zip_codes"}, inplace=True)
df_str_venues.info()
df_venues_per_zip = df_str_venues.groupby(["Adj_zip_codes", "Venue_category"])["Venue_id"].count().reset_index()
df_venues_per_zip.sort_values(by="Adj_zip_codes", inplace=True)
df_venues_per_zip.head()
# +
import numpy as np
import matplotlib.pyplot as plt
# List of all categories
categories = df_venues_per_zip["Venue_category"].unique()
# Category 0
cat0 = categories[0]
df_cat0 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[0]]
x_bins_cat0 = df_cat0["Adj_zip_codes"].astype(str).tolist()
series_cat0 = df_cat0["Venue_id"].tolist()
# Category 1
cat1 = categories[1]
df_cat1 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[1]]
x_bins_cat1 = df_cat1["Adj_zip_codes"].astype(str).tolist()
series_cat1 = df_cat1["Venue_id"].tolist()
# Category 2
cat2 = categories[2]
df_cat2 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[2]]
x_bins_cat2 = df_cat2["Adj_zip_codes"].astype(str).tolist()
series_cat2 = df_cat2["Venue_id"].tolist()
# Category 3
cat3 = categories[3]
df_cat3 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[3]]
x_bins_cat3 = df_cat3["Adj_zip_codes"].astype(str).tolist()
series_cat3 = df_cat3["Venue_id"].tolist()
# Category 4
cat4 = categories[4]
df_cat4 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[4]]
x_bins_cat4 = df_cat4["Adj_zip_codes"].astype(str).tolist()
series_cat4 = df_cat4["Venue_id"].tolist()
# Category 5
cat5 = categories[5]
df_cat5 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[5]]
x_bins_cat5 = df_cat5["Adj_zip_codes"].astype(str).tolist()
series_cat5 = df_cat5["Venue_id"].tolist()
# Category 6
cat6 = categories[6]
df_cat6 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[6]]
x_bins_cat6 = df_cat6["Adj_zip_codes"].astype(str).tolist()
series_cat6 = df_cat6["Venue_id"].tolist()
# Category 7
cat7 = categories[7]
df_cat7 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[7]]
x_bins_cat7 = df_cat7["Adj_zip_codes"].astype(str).tolist()
series_cat7 = df_cat7["Venue_id"].tolist()
# Category 8
cat8 = categories[8]
df_cat8 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[8]]
x_bins_cat8 = df_cat8["Adj_zip_codes"].astype(str).tolist()
series_cat8 = df_cat8["Venue_id"].tolist()
# Category 9
cat9 = categories[9]
df_cat9 = df_venues_per_zip[df_venues_per_zip["Venue_category"] == categories[9]]
x_bins_cat9 = df_cat9["Adj_zip_codes"].astype(str).tolist()
series_cat9 = df_cat9["Venue_id"].tolist()
fig, ax = plt.subplots(5, 2, figsize=(15,20))
fig.suptitle("Number of restaurant for each category per ZIP-code", fontsize=14)
ax[0][0].bar(x_bins_cat0, series_cat0, color='b', width=0.5)
ax[0][0].set_title(str(cat0))
ax[0][1].bar(x_bins_cat1, series_cat1, color='b', width=0.5)
ax[0][1].set_title(str(cat1))
ax[1][0].bar(x_bins_cat2, series_cat2, color='b', width=0.5)
ax[1][0].set_title(str(cat2))
ax[1][1].bar(x_bins_cat3, series_cat3, color='b', width=0.5)
ax[1][1].set_title(str(cat3))
ax[2][0].bar(x_bins_cat4, series_cat4, color='b', width=0.5)
ax[2][0].set_title(str(cat4))
ax[2][1].bar(x_bins_cat5, series_cat5, color='b', width=0.5)
ax[2][1].set_title(str(cat5))
ax[3][0].bar(x_bins_cat6, series_cat6, color='b', width=0.5)
ax[3][0].set_title(str(cat6))
ax[3][1].bar(x_bins_cat7, series_cat7, color='b', width=0.5)
ax[3][1].set_title(str(cat7))
ax[4][0].bar(x_bins_cat8, series_cat8, color='b', width=0.5)
ax[4][0].set_title(str(cat8))
ax[4][1].bar(x_bins_cat9, series_cat9, color='b', width=0.5)
ax[4][1].set_title(str(cat9))
plt.show()
# -
# One hot encoding of adj_zip_codes
onehot_zip_codes = pd.get_dummies(df_str_venues["Adj_zip_codes"])
onehot_zip_codes.info()
onehot_zip_codes.shape
df_str_venues_oh = df_str_venues.join(onehot_zip_codes, how="left")
df_str_venues_oh.info()
df_str_venues_oh["Rating"] = pd.to_numeric(df_str_venues_oh["Rating"])
df_str_venues_oh.describe()
# Drop unnecessary columns
df_str_reduced = df_str_venues_oh.drop(["Venue_latitude", "Venue_longitude", "Venue_PLZ", "Appearances", "Adj_zip_codes"], axis=1)
df_str_reduced.info()
zip_cols = df_str_reduced.columns.to_list()[4:20]
# Group by Venues
df_str_group = df_str_reduced.groupby(["Venue_id", "Venue_name", "Venue_category", "Rating"])[zip_cols].max().reset_index()
# Transform "Venue_category" into numeric
from sklearn import preprocessing
le_category = preprocessing.LabelEncoder()
le_category.fit(df_str_group["Venue_category"])
df_str_group["Num_category"] = le_category.transform(df_str_group["Venue_category"])
df_str_group.head()
df_str_group.info()
# Summary of Categories
orig_categories = le_category.classes_
new_labels = le_category.transform(orig_categories)
category_legend = pd.DataFrame({'Original': orig_categories, 'New': new_labels})
category_legend
# ## 3. ML to predict rating based on location and restaurant category
# ### 3.1 Prepare input data for ML-model
#Prepare features
x_data = np.asanyarray(df_str_group.iloc[:, 4:22])
x_cols = df_str_group.columns.to_list()[4:22]
# Prepare target labels
y_data = np.asanyarray(df_str_group.loc[:, ["Rating"]])
# +
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=1)
# -
y_test.shape
# ### 3.2 Linear regression
from sklearn.linear_model import LinearRegression
LR_model = LinearRegression(fit_intercept = True)
LR_model.fit(x_train, y_train)
LR_model.coef_
LR_model.intercept_
LR_model.score(x_train, y_train)
# --> Very poor R-squared value. Means the model doesn't really fit
y_predict = LR_model.predict(x_test)
from sklearn.metrics import mean_squared_error
mse_lr = mean_squared_error(y_test, y_predict)
mse_lr
# --> but the MSE is quite small with 0.66
# Let's have a look on the coefficients
df_coeff_lr = pd.DataFrame({"Feature": x_cols, "Coefficients": np.round(LR_model.coef_[0], 3)}).set_index("Feature")
df_coeff_lr.sort_values(by="Coefficients", ascending=False, inplace=True)
df_coeff_lr.head(30)
# ### 3.3 Conclusion of Linear regression model
x_values = np.linspace(0, 9, num=10)
y_lin = df_coeff_lr.loc["Num_category", "Coefficients"] * x_values + LR_model.intercept_
plt.plot(x_values, y_lin, '-r', label='Linear regression line')
plt.title('Linear regression line for ZIP-area ' + str(df_coeff_lr.index[0]) + " dependent of type of cuisine")
plt.xlabel('Type of cusines', color='#1C2833')
plt.ylabel('Rating', color='#1C2833')
plt.legend(loc='upper left')
plt.grid()
plt.show()
# Linear regression line shows the likely range of a restaurant rating in ZIP-area 70184.<br>
# ZIP-area is the area, where the restaurants have the best rating
# > min. rating is 6.75<br>
# > max. rating is 7.3<br>
# > type of cusine has only limited impact as the MSE is 0.66
# **Conclusion:<br>
# The impact of location and cusine on the rating is not significant enough to be considered as crucial criteria**
| .ipynb_checkpoints/03c_Csr_STR_report-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/adnanmasood/AIF360/blob/master/AutoML_AutoKeras_MNIST.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab_type="code" id="tyRK8ckP57Q3" colab={}
# !pip install autokeras
# !pip install git+https://github.com/keras-team/keras-tuner.git@1.0.2rc1
# + [markdown] colab_type="text" id="-Fwut17Y57Q7"
# ## A Simple Example
# The first step is to prepare your data. Here we use the MNIST dataset as an example
#
# + colab_type="code" id="XBYxmQwn57Q7" colab={}
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.python.keras.utils.data_utils import Sequence
import autokeras as ak
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape) # (60000, 28, 28)
print(y_train.shape) # (60000,)
print(y_train[:3]) # array([7, 2, 1], dtype=uint8)
# + [markdown] colab_type="text" id="yB9Ad-K-57Q-"
# The second step is to run the ImageClassifier.
# It is recommended have more trials for more complicated datasets.
# This is just a quick demo of MNIST, so we set max_trials to 1.
#
# + colab_type="code" id="8C9EGtF857Q-" colab={}
# Initialize the image classifier.
clf = ak.ImageClassifier(
overwrite=True,
max_trials=1)
# Feed the image classifier with training data.
clf.fit(x_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(x_test)
print(predicted_y)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))
# + [markdown] colab_type="text" id="gMiVwtnO57RB"
# ## Validation Data
# By default, AutoKeras use the last 20% of training data as validation data. As shown in
# the example below, you can use validation_split to specify the percentage.
#
# + colab_type="code" id="ThC3onzG57RB" colab={}
clf.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=10,
)
# + [markdown] colab_type="text" id="3cszduJl57RE"
# You can also use your own validation set instead of splitting it from the training data
# with validation_data.
#
# + colab_type="code" id="4FkCxcMK57RF" colab={}
split = 50000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
clf.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=10,
)
# + [markdown] colab_type="text" id="YkXQtWpz57RH"
# ## Customized Search Space
# For advanced users, you may customize your search space by using AutoModel instead of
# ImageClassifier. You can configure the ImageBlock for some high-level configurations,
# e.g., block_type for the type of neural network to search, normalize for whether to do
# data normalization, augment for whether to do data augmentation. You can also do not
# specify these arguments, which would leave the different choices to be tuned
# automatically. See the following example for detail.
#
# + colab_type="code" id="6LRJN07c57RH" colab={}
input_node = ak.ImageInput()
output_node = ak.ImageBlock(
# Only search ResNet architectures.
block_type="resnet",
# Normalize the dataset.
normalize=True,
# Do not do data augmentation.
augment=False,
)(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node,
outputs=output_node,
overwrite=True,
max_trials=1)
clf.fit(x_train, y_train, epochs=10)
# + [markdown] colab_type="text" id="HUXlJCGW57RK"
# The usage of AutoModel is similar to the functional API of Keras. Basically, you are
# building a graph, whose edges are blocks and the nodes are intermediate outputs of
# blocks. To add an edge from input_node to output_node with output_node =
# ak.[some_block]([block_args])(input_node).
#
# You can even also use more fine grained blocks to customize the search space even
# further. See the following example.
#
# + colab_type="code" id="wmL7KgR_57RL" colab={}
input_node = ak.ImageInput()
output_node = ak.Normalization()(input_node)
output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node)
output_node = ak.ResNetBlock(version="v2")(output_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node,
outputs=output_node,
overwrite=True,
max_trials=1)
clf.fit(x_train, y_train, epochs=10)
# + [markdown] colab_type="text" id="-4pwB_Fx57RO"
# ## Data Format
# The AutoKeras ImageClassifier is quite flexible for the data format.
#
# For the image, it accepts data formats both with and without the channel dimension. The
# images in the MNIST dataset do not have the channel dimension. Each image is a matrix
# with shape (28, 28). AutoKeras also accepts images of three dimensions with the channel
# dimension at last, e.g., (32, 32, 3), (28, 28, 1).
#
# For the classification labels, AutoKeras accepts both plain labels, i.e. strings or
# integers, and one-hot encoded encoded labels, i.e. vectors of 0s and 1s.
#
# So if you prepare your data in the following way, the ImageClassifier should still work.
#
# + colab_type="code" id="Bz-FteTM57RP" colab={}
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Reshape the images to have the channel dimension.
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
# One-hot encode the labels.
eye = np.eye(10)
y_train = eye[y_train]
y_test = eye[y_test]
print(x_train.shape) # (60000, 28, 28, 1)
print(y_train.shape) # (60000, 10)
print(y_train[:3])
# array([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
# [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]])
# + [markdown] colab_type="text" id="INI0U1YY57RR"
# We also support using tf.data.Dataset format for the training data. In this case, the
# images would have to be 3-dimentional. The labels have to be one-hot encoded for
# multi-class classification to be wrapped into tensorflow Dataset.
#
# + colab_type="code" id="_1WUIcew57RR" colab={}
train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,)))
test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,)))
clf = ak.ImageClassifier(
overwrite=True,
max_trials=1)
# Feed the tensorflow Dataset to the classifier.
clf.fit(train_set, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(test_set)
# Evaluate the best model with testing data.
print(clf.evaluate(test_set))
# + [markdown] colab_type="text" id="rByHsfyZ57RT"
# ## Reference
# [ImageClassifier](/image_classifier),
# [AutoModel](/auto_model/#automodel-class),
# [ImageBlock](/block/#imageblock-class),
# [Normalization](/block/#normalization-class),
# [ImageAugmentation](/block/#image-augmentation-class),
# [ResNetBlock](/block/#resnetblock-class),
# [ImageInput](/node/#imageinput-class),
# [ClassificationHead](/block/#classificationhead-class).
#
| AutoML_AutoKeras_MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py35]
# language: python
# name: conda-env-py35-py
# ---
# +
import sys
if "../" not in sys.path:
sys.path.append("../")
import matplotlib
# %matplotlib inline
matplotlib.style.use('ggplot')
from envs.cliff_walking import CliffWalkingEnv
from utils import plotting
from double_q_learning import double_q_learning
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# -
env = CliffWalkingEnv()
Q1, Q2, stats = double_q_learning(env, 500)
plotting.plot_episode_stats(stats)
| reinforcement/double_q_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watershed Distance Transform for 3D Data
# ---
# Implementation of papers:
#
# [Deep Watershed Transform for Instance Segmentation](http://openaccess.thecvf.com/content_cvpr_2017/papers/Bai_Deep_Watershed_Transform_CVPR_2017_paper.pdf)
#
# [Learn to segment single cells with deep distance estimator and deep cell detector](https://arxiv.org/abs/1803.10829)
# +
import os
import errno
import numpy as np
import deepcell
# -
# ## Load the data
#
# ### Download the data from `deepcell.datasets`
#
# `deepcell.datasets` provides access to a set of annotated live-cell imaging datasets which can be used for training cell segmentation and tracking models.
# All dataset objects share the `load_data()` method, which allows the user to specify the name of the file (`path`), the fraction of data reserved for testing (`test_size`) and a `seed` which is used to generate the random train-test split.
# Metadata associated with the dataset can be accessed through the `metadata` attribute.
# +
# Download the data (saves to ~/.keras/datasets)
filename = 'mousebrain.npz'
test_size = 0.1 # % of data saved as test
seed = 0 # seed for random train-test split
(X_train, y_train), (X_test, y_test) = deepcell.datasets.mousebrain.load_data(filename, test_size=test_size, seed=seed)
print('X.shape: {}\ny.shape: {}'.format(X_train.shape, y_train.shape))
# -
# ### Set up filepath constants
# +
# the path to the data file is currently required for `train_model_()` functions
# change DATA_DIR if you are not using `deepcell.datasets`
DATA_DIR = os.path.expanduser(os.path.join('~', '.keras', 'datasets'))
# DATA_FILE should be a npz file, preferably from `make_training_data`
DATA_FILE = os.path.join(DATA_DIR, filename)
# confirm the data file is available
assert os.path.isfile(DATA_FILE)
# +
# Set up other required filepaths
# If the data file is in a subdirectory, mirror it in MODEL_DIR and LOG_DIR
PREFIX = os.path.relpath(os.path.dirname(DATA_FILE), DATA_DIR)
ROOT_DIR = '/data' # TODO: Change this! Usually a mounted volume
MODEL_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'models', PREFIX))
LOG_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'logs', PREFIX))
# create directories if they do not exist
for d in (MODEL_DIR, LOG_DIR):
try:
os.makedirs(d)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# -
# ## Create the Foreground/Background FeatureNet Model
#
# Here we instantiate two `FeatureNet` models from `deepcell.model_zoo` for foreground/background separation as well as the interior/edge segmentation.
frames_per_batch = 3
norm_method = 'whole_image' # data normalization - `whole_image` for 3d conv
receptive_field = 61 # should be adjusted for the scale of the data
# transform settings
distance_bins = 4 # number of distance "classes"
erosion_width = 1 # erode edges, improves segmentation when cells are close
watershed_kwargs = {
'distance_bins': distance_bins,
'erosion_width': erosion_width,
}
# +
from deepcell import model_zoo
fgbg_model = model_zoo.bn_feature_net_3D(
receptive_field=receptive_field,
n_features=2,
norm_method=norm_method,
n_frames=frames_per_batch,
n_channels=X_train.shape[-1])
# -
# ## Prepare for training
#
# ### Set up training parameters.
#
# There are a number of tunable hyper parameters necessary for training deep learning models:
#
# **model_name**: Incorporated into any files generated during the training process.
#
# **n_epoch**: The number of complete passes through the training dataset.
#
# **lr**: The learning rate determines the speed at which the model learns. Specifically it controls the relative size of the updates to model values after each batch.
#
# **optimizer**: The TensorFlow module [tf.keras.optimizers](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers) offers optimizers with a variety of algorithm implementations. DeepCell typically uses the Adam or the SGD optimizers.
#
# **lr_sched**: A learning rate scheduler allows the learning rate to adapt over the course of model training. Typically a larger learning rate is preferred during the start of the training process, while a small learning rate allows for fine-tuning during the end of training.
#
# **batch_size**: The batch size determines the number of samples that are processed before the model is updated. The value must be greater than one and less than or equal to the number of samples in the training dataset.
# +
from tensorflow.keras.optimizers import SGD
from deepcell.utils.train_utils import rate_scheduler
fgbg_model_name = 'sample_fgbg_3d_model'
watershed_model_name = 'sample_watershed_3d_model'
n_epoch = 1 # Number of training epochs
lr = 0.01
lr_sched = rate_scheduler(lr=lr, decay=0.99)
# Sample mode settings
batch_size = 64 # number of images per batch (should be 2 ^ n)
win = (receptive_field - 1) // 2 # sample window size
win_z = (frames_per_batch - 1) // 2 # z window size
balance_classes = True # sample each class equally
max_class_samples = 1e7 # max number of samples per class.
# -
# ### Create the DataGenerators
#
# The `SampleMovieDataGenerator` generates many image patches of size `(2*win+1, 2*win+1, win_z)` to perform an image classification task for every pixel.
# +
from deepcell.image_generators import SampleMovieDataGenerator
datagen = SampleMovieDataGenerator(
rotation_range=180,
zoom_range=(.8, 1.2),
horizontal_flip=True,
vertical_flip=True)
datagen_val = SampleMovieDataGenerator()
# +
fgbg_train_data = datagen.flow(
{'X': X_train, 'y': y_train},
seed=seed,
batch_size=batch_size,
transform='fgbg',
window_size=(win, win, win_z),
balance_classes=balance_classes,
max_class_samples=max_class_samples)
fgbg_val_data = datagen_val.flow(
{'X': X_test, 'y': y_test},
seed=seed,
batch_size=batch_size,
transform='fgbg',
window_size=(win, win, win_z),
balance_classes=False,
max_class_samples=max_class_samples)
# +
watershed_train_data = datagen.flow(
{'X': X_train, 'y': y_train},
seed=seed,
batch_size=batch_size,
transform='watershed',
transform_kwargs=watershed_kwargs,
window_size=(win, win, win_z),
balance_classes=balance_classes,
max_class_samples=max_class_samples)
watershed_val_data = datagen_val.flow(
{'X': X_test, 'y': y_test},
seed=seed,
batch_size=batch_size,
transform='watershed',
transform_kwargs=watershed_kwargs,
window_size=(win, win, win_z),
balance_classes=False,
max_class_samples=max_class_samples)
# -
# ### Compile the model with a loss function
#
# Each model is trained with it's own loss function. `weighted_categorical_crossentropy` is often used for classification models.
# +
from deepcell import losses
def loss_function(y_true, y_pred):
return losses.weighted_categorical_crossentropy(
y_true, y_pred,
n_classes=2)
fgbg_model.compile(
loss=loss_function,
optimizer=SGD(learning_rate=lr, decay=1e-6, momentum=0.9, nesterov=True),
metrics=['accuracy'])
# -
# ## Train the foreground/background model
#
# Call `fit()` on the compiled model, along with a default set of callbacks.
# +
from deepcell.utils.train_utils import get_callbacks
from deepcell.utils.train_utils import count_gpus
model_path = os.path.join(MODEL_DIR, '{}.h5'.format(fgbg_model_name))
loss_path = os.path.join(MODEL_DIR, '{}.npz'.format(fgbg_model_name))
num_gpus = count_gpus()
print('Training on', num_gpus, 'GPUs.')
train_callbacks = get_callbacks(
model_path,
lr_sched=lr_sched,
save_weights_only=num_gpus >= 2,
monitor='val_loss',
verbose=1)
loss_history = fgbg_model.fit(
fgbg_train_data,
# steps_per_epoch=fgbg_train_data.y.shape[0] // batch_size,
steps_per_epoch=40000,
epochs=n_epoch,
validation_data=fgbg_val_data,
validation_steps=10000,
# validation_steps=fgbg_val_data.y.shape[0] // batch_size,
callbacks=train_callbacks)
# -
# ## Create the `watershed` FeatureNet Model
#
# Here we instantiate two `FeatureNet` models from `deepcell.model_zoo` for foreground/background separation as well as the interior/edge segmentation.
# +
from deepcell import model_zoo
watershed_model = model_zoo.bn_feature_net_3D(
receptive_field=receptive_field,
n_features=distance_bins,
norm_method=norm_method,
n_frames=frames_per_batch,
n_channels=X_train.shape[-1])
# -
# ### Compile the model with a loss function
#
# Just like the foreground/background model, the `watershed` model is compiled with the `weighted_categorical_crossentropy` loss function.
# +
from deepcell import losses
def loss_function(y_true, y_pred):
return losses.weighted_categorical_crossentropy(
y_true, y_pred,
n_classes=distance_bins,
from_logits=False)
watershed_model.compile(
loss=loss_function,
optimizer=SGD(learning_rate=lr, decay=1e-6, momentum=0.9, nesterov=True),
metrics=['accuracy'])
# -
# ## Train the `watershed` model
#
# Call `fit()` on the compiled model, along with a default set of callbacks.
# +
from deepcell.utils.train_utils import get_callbacks
from deepcell.utils.train_utils import count_gpus
model_path = os.path.join(MODEL_DIR, '{}.h5'.format(watershed_model_name))
loss_path = os.path.join(MODEL_DIR, '{}.npz'.format(watershed_model_name))
num_gpus = count_gpus()
print('Training on', num_gpus, 'GPUs.')
train_callbacks = get_callbacks(
model_path,
lr_sched=lr_sched,
save_weights_only=num_gpus >= 2,
monitor='val_loss',
verbose=1)
loss_history = watershed_model.fit(
watershed_train_data,
steps_per_epoch=watershed_train_data.y.shape[0] // batch_size,
epochs=n_epoch,
validation_data=watershed_val_data,
validation_steps=watershed_val_data.y.shape[0] // batch_size,
callbacks=train_callbacks)
# -
# ## Predict on test data
#
# The model was trained on small samples of data of shape `(frames_per_batch, receptive_field, receptive_field)`.
# in order to process full-sized images, the trained weights will be saved and loaded into a new model with `dilated=True` and proper `input_shape`.
#
# #### Save weights of trained models
# +
fgbg_weights_file = os.path.join(MODEL_DIR, '{}.h5'.format(fgbg_model_name))
fgbg_model.save_weights(fgbg_weights_file)
watershed_weights_file = os.path.join(MODEL_DIR, '{}.h5'.format(watershed_model_name))
watershed_model.save_weights(watershed_weights_file)
# -
# #### Initialize dilated models and load the weights
# +
from deepcell import model_zoo
run_fgbg_model = model_zoo.bn_feature_net_3D(
receptive_field=receptive_field,
dilated=True,
n_features=2,
n_frames=frames_per_batch,
input_shape=tuple(X_test.shape[1:]))
run_fgbg_model.load_weights(fgbg_weights_file)
run_watershed_model = model_zoo.bn_feature_net_3D(
receptive_field=receptive_field,
dilated=True,
n_features=distance_bins,
n_frames=frames_per_batch,
input_shape=tuple(X_test.shape[1:]))
run_watershed_model.load_weights(watershed_weights_file)
# -
# #### Make predictions on test data
# +
test_images = run_watershed_model.predict(X_test[:4])
test_images_fgbg = run_fgbg_model.predict(X_test[:4])
print('watershed transform shape:', test_images.shape)
print('segmentation mask shape:', test_images_fgbg.shape)
# -
# #### Watershed post-processing
# +
argmax_images = []
for i in range(test_images.shape[0]):
max_image = np.argmax(test_images[i], axis=-1)
argmax_images.append(max_image)
argmax_images = np.array(argmax_images)
argmax_images = np.expand_dims(argmax_images, axis=-1)
print('watershed argmax shape:', argmax_images.shape)
# +
# threshold the foreground/background
# and remove back ground from watershed transform
threshold = 0.8
fg_thresh = test_images_fgbg[..., 1] > threshold
fg_thresh = np.expand_dims(fg_thresh, axis=-1)
argmax_images_post_fgbg = argmax_images * fg_thresh
# +
# Apply watershed method with the distance transform as seed
from skimage.measure import label
from skimage.morphology import watershed
from skimage.feature import peak_local_max
watershed_images = []
for i in range(argmax_images_post_fgbg.shape[0]):
image = fg_thresh[i, ..., 0]
distance = argmax_images_post_fgbg[i, ..., 0]
local_maxi = peak_local_max(test_images[i, ..., -1],
min_distance=15,
exclude_border=False,
indices=False,
labels=image)
markers = label(local_maxi)
segments = watershed(-distance, markers, mask=image)
watershed_images.append(segments)
watershed_images = np.array(watershed_images)
watershed_images = np.expand_dims(watershed_images, axis=-1)
# +
# Plot the results
import matplotlib.pyplot as plt
index = np.random.randint(low=0, high=watershed_images.shape[0])
frame = np.random.randint(low=0, high=watershed_images.shape[1])
print('Image:', index)
print('Frame:', frame)
fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(15, 15), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(X_test[index, frame, ..., 0])
ax[0].set_title('Source Image')
ax[1].imshow(test_images_fgbg[index, frame, ..., 1])
ax[1].set_title('Segmentation Prediction')
ax[2].imshow(fg_thresh[index, frame, ..., 0], cmap='jet')
ax[2].set_title('Thresholded Segmentation')
ax[3].imshow(argmax_images[index, frame, ..., 0], cmap='jet')
ax[3].set_title('Watershed Transform')
ax[4].imshow(argmax_images_post_fgbg[index, frame, ..., 0], cmap='jet')
ax[4].set_title('Watershed Transform w/o Background')
ax[5].imshow(watershed_images[index, frame, ..., 0], cmap='jet')
ax[5].set_title('Watershed Segmentation')
fig.tight_layout()
plt.show()
# +
from deepcell.utils.plot_utils import get_js_video
from IPython.display import HTML
HTML(get_js_video(watershed_images, batch=0, channel=0))
# -
| notebooks/training/featurenets/Watershed Transform 3D Sample Based.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !nvidia-smi
import sys
if 'google.colab' in sys.modules:
# !pip install -Uqq fastcore onnx onnxruntime sentencepiece seqeval rouge-score
# !pip install -Uqq --no-deps fastai ohmeow-blurr
# !pip install -Uqq transformers datasets wandb
import gc
import wandb
from fastai.text.all import *
from fastai.callback.wandb import *
def read_text(fn):
return open(fn).read()
path = untar_data(URLs.IMDB)
# ## Setup
# +
model_name = 'distilbert-base-uncased'
max_len = 512
bs = 8
val_bs = 16
# -
# ## Training
def _to_device(e, device):
if hasattr(e, 'to'): return e.to(device)
elif isinstance(e, dict):
for _, v in e.items():
if hasattr(v, 'to'): v.to(device)
return {k:(v.to(device) if hasattr(v, 'to') else v) for k, v in e.items()}
@patch
def one_batch(self:Learner, i, b):
self.iter = i
b_on_device = tuple(_to_device(e, self.dls.device) for e in b) if self.dls.device is not None else b
self._split(b_on_device)
self._with_events(self._do_one_batch, 'batch', CancelBatchException)
# +
from transformers import *
from blurr.data.all import *
from blurr.modeling.all import *
# -
hf_arch, hf_config, hf_tokenizer, hf_model = BLURR_MODEL_HELPER.get_hf_objects(model_name, model_cls=AutoModelForSequenceClassification,
tokenizer_cls=AutoTokenizer, tokenizer_kwargs={'max_len':512})
# +
blocks = (HF_TextBlock(hf_arch, hf_config, hf_tokenizer, hf_model), CategoryBlock)
dblock = DataBlock(blocks=blocks,
get_items=get_text_files,
get_x = read_text,
get_y=parent_label,
splitter=GrandparentSplitter(valid_name='test'))
dls = dblock.dataloaders(path, bs=bs, val_bs=val_bs)
# -
# ### vat finetuning
# +
import torch.nn.functional as F
from torch import linalg as LA
def KL(input, target, reduction="sum"):
input = input.float()
target = target.float()
loss = F.kl_div(F.log_softmax(input, dim=-1, dtype=torch.float32), F.softmax(target, dim=-1, dtype=torch.float32), reduction=reduction)
return loss
# +
from fastai.callback.all import Hook
def hook_out(m, inp, out):
return out
# -
def adv_project(grad, norm_type='inf', eps=1e-6):
if norm_type == 'l2':
direction = grad / (torch.norm(grad, dim=-1, keepdim=True) + eps)
elif norm_type == 'l1':
direction = grad.sign()
else:
direction = grad / (grad.abs().max(-1, keepdim=True)[0] + eps)
return direction
def compute_adversarial_loss(model:nn.Module, embed:Tensor, logits:Tensor,
noise_var:float=1e-5, step_size:float=1e-3, k:int=1,
noise_gamma:float=1e-6):
"This is nice docstring"
noise = embed.data.new(embed.size()).normal_(0, noise_var)
noise.requires_grad_();
for _ in range(k):
newembed = embed + noise
adv_logits = model(inputs_embeds=newembed).logits
adv_loss = KL(adv_logits, logits.detach(), reduction="batchmean")
delta_grad, = torch.autograd.grad(adv_loss, noise, only_inputs=True)
norm = LA.norm(delta_grad)
if (torch.isnan(norm) or torch.isinf(norm)):
break
noise = noise + delta_grad * step_size
noise = adv_project(noise, norm_type="fro", eps=noise_gamma)
newembed = embed + noise
adv_logits = model(inputs_embeds=newembed).logits
adv_loss_f = KL(adv_logits, logits.detach())
adv_loss_b = KL(logits, adv_logits.detach())
return adv_loss_f + adv_loss_b
class ALUMCallback(Callback):
"ALUM callback (draft)"
run_valid = False
order = GradientAccumulation.order-1
@delegates(compute_adversarial_loss)
def __init__(self, m:nn.Module, alpha:float=1., start_epoch:int=1, **kwargs):
self.hook = None
self.adv_loss_func = partial(compute_adversarial_loss, **kwargs) if kwargs else compute_adversarial_loss
store_attr()
def before_batch(self):
if (self.hook is None) and (self.epoch >= self.start_epoch):
self.hook = Hook(self.m, hook_out)
print(f'Starting virtual adversarial training at epoch {self.epoch}')
def after_loss(self):
if self.epoch >= self.start_epoch:
embed, logits = self.hook.stored, self.pred
adv_loss = self.adv_loss_func(self.model.hf_model, embed, logits)
self.learn.loss_grad += adv_loss * self.alpha
def after_fit(self):
if self.hook is not None: self.hook.remove()
# # Run a HyperParameter Sweep
wandb.login()
def train():
run = wandb.init();
hf_arch, hf_config, hf_tokenizer, hf_model = BLURR_MODEL_HELPER.get_hf_objects(
model_name, model_cls=AutoModelForSequenceClassification,
tokenizer_cls=AutoTokenizer, tokenizer_kwargs={'max_len':512})
model = HF_BaseModelWrapper(hf_model)
learn = Learner(dls,
model,
opt_func=RAdam,
metrics=[accuracy],
cbs=[HF_BaseModelCallback, GradientAccumulation(8)],
splitter=hf_splitter).to_fp16()
learn.add_cb(ALUMCallback(learn.model.hf_model.base_model.embeddings,
start_epoch = run.config.start_epoch,
alpha=run.config.alpha,
noise_var=run.config.noise_var,
noise_gamma =run.config.noise_gamma,
step_size=run.config.step_size
));
learn.fit_one_cycle(4, 2e-5, cbs=[WandbCallback(log_preds=False, log_model=False)])
del learn
gc.collect()
torch.cuda.empty_cache()
torch.cuda.synchronize()
sweep_config = {
"name": "ALUM test sweep",
"method": "random",
"parameters": {
"start_epoch": {"values":[0,1]},
"alpha": {"values": [0.0, 0.25,0.5,1,2,4,8,10,20]},
"noise_var": {"values": [1e-6, 1e-5, 1e-4, 1e-3]},
"noise_gamma": {"values": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3]},
"step_size": {"values": [1e-5, 1e-4, 1e-3, 1e-2]},
},
"metric":{"goal": "maximise", "name": "accuracy"},
"early_terminate": {"type": "hyperband", "s": 2, "eta": 3, "max_iter": 60}
}
sweep_id = wandb.sweep(sweep_config, project="vat", entity="fastai_community")
wandb.agent(sweep_id, function=train)
| nbs/_alum_sweep_lm_finetuning_fastai_blurr_imdb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="lD6tWdWK2nVR" colab_type="text"
# # Print
# Printing can be done by using **print** followed by the entity to print.
# + id="TKlR-wy52nVX" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 119} outputId="921e3177-1a55-4b7e-f645-5d8af7b1dede" executionInfo={"elapsed": 7832, "status": "ok", "timestamp": 1525235538285, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-bs8EIri6whU/AAAAAAAAAAI/AAAAAAAAFOA/2worzW5Poi0/s50-c-k-no/photo.jpg", "userId": "103954825905713429993"}, "user_tz": -345}
print(5)
print('Hello World')
print(True)
# + [markdown] id="apfayBtMHtXO" colab_type="text"
# <br>
# If you want to print multiple items in same line, you can use a comma **,**
# + id="5l6y_GA-HtXS" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="75efdcad-a8f5-4c8b-d690-2ca0e1e28c61"
print(5, 7, 15, "a")
# + [markdown] id="PS9GKsLq2nV2" colab_type="text"
# # Variable Naming
# Variables are entities or location used to store values of any kind. Are
# * Can begin with alphabets a-z or A-Z or _
# * Can contain numbers but not as the first letter
# * Are case-sensitive
#
# *There are some **keywords** reserved in python which should not be used such as **print**, **type**, **in**, **sum**, **len**. If they are used as variable names, the program might not function properly.*
# + id="hCbnOL172nV3" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
a = 1
A = 13
_a1 = 5
x_y = 10
b = 'apples'
# + id="VoiS9mWXHtXk" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="91af0eda-e0af-446b-90c4-f8fa5dfaac15"
print(a)
print(A)
# + [markdown] id="odwWu6a8HtXs" colab_type="text"
# # Variable Types
# In python, there are many variable types. They include:
# * int
# * *7*
# * str
# * *"Hello"*
# * *'World'*
# * float
# * *3.123*
# * boolean
# * False*
# * list
# * *[2, 5, 10, 15]*
# * tuple
# * *(10, 2, 8)*
# * dictionary
# * *{ "fruit" : "apple",
# "qty" : 1}*
#
# The type is auto assigned when assigning values to the variable. To know the type of variable use **type** function.
# + id="sqTbuFDxHtXu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="4c1e0af0-3121-4f78-8683-f4a3ee4d6aaf"
a = 5
type(a)
# + id="0juK1bG_HtX4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="5a5c1e60-feda-4344-c3b4-3c24fd3c0f4b"
b = 1.5
type(b)
# + id="t5hPGXiSHtYC" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="b2af7cf0-ba6e-4030-9ac7-f4c6fc87e9ad"
type("Write anything!")
# + id="72FmnXWhHtYQ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="0b364c5e-2836-4ba6-8005-cb72f348078e"
c = True
type(c)
# + id="EZ1b85htHtYY" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="593a41ad-b981-4728-ef4c-81fb5dab9125"
type([1, 2])
# + id="OyfQ6Z2ZHtYk" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="4f2e1f72-5760-44a8-f89f-96eb7eb78b72"
type((1, 2, 3))
# + id="NdBua_aMHtYu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="0cad2f31-4b00-4531-8af9-f1f300240190"
type({"hello": 1,
"hi": "a"})
# + [markdown] id="P_oza_1sHtY4" colab_type="text"
# # String Operations
# * Strings can be added with strings
# * Strings can be multiplied by integers
# * Strings can be formatted using curly brackets **{}** and its function **format**
# + id="VfFcPsY0HtY6" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="82393119-b56d-4244-ec23-3f9c9cbf6c0e"
"abc" + "def"
# + id="hrR4v9VZHtZU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="768636a7-f4b3-4e40-bebd-9ccaec3eae7b"
"abc "*3
# + id="y3mV_Xwr2nWb" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} outputId="186c5d75-d578-4cd7-beff-7a7662a5a02b" executionInfo={"elapsed": 880, "status": "ok", "timestamp": 1525227386686, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "100399137249051217878"}, "user_tz": -345}
print('{} is {} {}'.format("smoking", 2, "bad"))
# + [markdown] id="CT4pRnFk2nXJ" colab_type="text"
# # Lists
# Lists can contain multiple values, which makes it easier to write programs that handle large amounts of data. The values in the list do not need to be of same data type.
# To create a list we use square brackets **[ ]** with comma **,** to separate the elements.
# + id="e7_rxO_g2nXK" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="21da8bf8-8244-442a-adbd-78464362389f"
my_list = ['hello', 3.1415, True, None, 42]
my_list
# + id="CM6jmUk5HtaM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="f6f02b39-612e-44e2-fb05-3e73cbfde659"
pl = ['i', 'you', 'we', 'he', 'she', 'they']
type(pl)
# + [markdown] id="0U4p00a4HtaY" colab_type="text"
# ## Length of lists
# The **length** of the list can be found using the **len** keyword.
# + id="H6y8B59KHtae" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="fb6f8dd2-abb0-455d-fc17-9b9775ace953"
len(pl)
# + [markdown] id="NA2LZI_uHtak" colab_type="text"
# ## Indexing
# Elements of lists can be selected using their index i.e. position from the beginning in the list. Unlike functions, indexing requires square brackets **[ ]**.
# *In python, the first index starts with 0.*
# + id="QIngQ3pRHtam" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="f16a4b73-8d3e-4d43-a4ff-b713ebc40740"
pl
# + id="8TeYH0zOHtas" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="d0bb084e-ee27-48d5-ec4f-2904ab4b863d"
pl[0]
# + id="7coBEWsV2nXM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="76a881c5-1187-47dd-b310-7f8dd8582696"
pl[1]
# + [markdown] id="O-NUZp-wHta6" colab_type="text"
# <br>
# Similarly, indexing can be done from the last as well. In this case, the last index is -1, the second last index -2 and so on.
# + id="cHA7b7Q_2nXN" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="7203967d-a799-4ab4-d477-48de6b8bf069"
pl[-1]
# + [markdown] id="PFijxB3k2nXW" colab_type="text"
# ## Slicing
# Slicing can be done using a colon **:** with the starting index in front of it and the ending index after the colon.
# *Note : The ending index is not included in the output.*
# + id="Q0-imRc2HtbE" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="55c294a1-0aa0-4f51-fb67-fe43bf7afd99"
pl
# + [markdown] id="ReGIW3HgHtbM" colab_type="text"
# Taking elements from 0 index to 3-1=2 index.
# + id="6avqbM3b2nXX" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="1b8df06f-bf0b-40df-9e14-66b9503f9c2e"
pl[0:3]
# + [markdown] id="zqaSjvZIHtbQ" colab_type="text"
# We can take the list in reverse in the following way.
# + id="hN8HWQWf2nXc" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="d006e52c-9c2f-46c7-f00c-7867346e55be"
pl[::-1]
# + [markdown] id="pZVnUHTAHtbU" colab_type="text"
# We can also take indices with specific gap similar to in range.
# *Again, the last index is not included in the output.*
# + id="sXEm1Q7nHtbU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="63e6a114-5187-4f4c-f554-2fb6e2333021"
pl[0:6:2]
# + id="Rgitxaiv2nXd" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="da3b3bc3-5b3d-4981-be99-8338efd982e2"
pl[1] = "new"
pl
# + [markdown] id="c5MgZttoHtbc" colab_type="text"
# ## Append
# An item can be appended at the end of a list using the **append** method.
# + id="xgKHICya2nXn" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="2235cd15-accc-45b0-ec9e-c40d4c6caa4f"
pl.append('naya')
pl
# + [markdown] id="2o4IDuadHtbg" colab_type="text"
# # Tuple
# To create a list we use square brackets **[ ]** with comma **,** to separate the elements.
# The indexing/slicing is exactly same in tuple as in list.
#
# **Append** is not supported.
# + id="5Z84DmDNHtbk" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="3ff7bc60-31e4-468c-873e-23ec961988d7"
tpl = (1, 2, 3)
tpl
# + id="e8RkKrdlHtbq" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="4b0b3ea0-3915-4a51-ef9a-e6ef9c607ffc"
tpl = tuple(pl)
tpl
# + id="ja7A6QWyHtb0" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="d25caf48-627f-4a81-99bd-417c7f3c0dd2"
tpl[0]
# + id="MO-z2UYpHtb6" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="04b1ecaf-873e-4303-8fca-d02a57741d02"
tpl[0:6:2]
# + [markdown] id="DROkpZQ72nXk" colab_type="text"
# ## Concatenate
# We can add two or more lists.
# We can also add two or more tuples.
# Tuples and lists however, cannot be added
# + id="hRKM0Qdh2nXk" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="7fa78358-1ba7-42c6-e7b1-95aea5e48be1"
pl_2 = ['my', 'your', 'our']
print(pl+pl_2)
# + id="XHplX08fHtcG" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="811e7ee9-cd94-432f-e9de-4475e8dbb7a6"
tpl_2 = tuple(pl_2)
print(tpl+tpl_2)
# + id="MrKfliVQHtcI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="6f105d04-3d88-4c83-d784-88acddf292d2"
print(tpl+pl)
# + [markdown] id="DKutFRtf2nX2" colab_type="text"
# # Dictionary
# Dictionaries are variables with **key** and **value** pairs.
# The values can be any variables. But the keys must be immutable.
# + id="hwPnB8GG2nX3" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} outputId="a7916ba0-14a8-4a79-f15b-c640db24eabd" executionInfo={"elapsed": 23193, "status": "ok", "timestamp": 1525275724155, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-YYw8RFy1RK4/AAAAAAAAAAI/AAAAAAAAABQ/51JmToxRtUM/s50-c-k-no/photo.jpg", "userId": "110573915273242734570"}, "user_tz": -345}
cat = {'body': 'fat',
'color': 'white',
(1, 2) : 'this is his id',
"meals": [3, 1, 2]}
type(cat)
# + [markdown] id="LnXturFgHtcQ" colab_type="text"
# ## Indexing in Dictionary
# We use the key instead of index for accessing dictionary elements.
# + id="2pLVt-ha2nX4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="ae92db46-d4ea-4e60-fc3c-109f3f922fd5"
'Cat has ' + cat['color'] + ' fur.'
# + [markdown] id="4h7PcLNXHtcS" colab_type="text"
# ### Keys and values
# + id="UdZwNlOW2nX8" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 68} outputId="1c8834ca-f44c-4c19-e7e8-821018a60e08" executionInfo={"elapsed": 925, "status": "ok", "timestamp": 1525275749661, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-YYw8RFy1RK4/AAAAAAAAAAI/AAAAAAAAABQ/51JmToxRtUM/s50-c-k-no/photo.jpg", "userId": "110573915273242734570"}, "user_tz": -345}
print(cat.keys())
print(cat.values())
# + [markdown] id="x9oZFZ9WHtcW" colab_type="text"
# ### Updating dictionary
# Dictionary can be updated either using the **update** keyword. We can also assign new elements in the same way we do in lists but we need to use **keys** instead of indices.
# + id="2f8gH73A2nYD" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="c847cabd-72ff-4ab9-bba9-f06e5a4c1073"
cat.update({'eyes': 'blue'})
# cat
# + id="oXCL8NydHtcc" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="db1914cd-e21e-466a-f6f4-67377f008b51"
cat['weight'] = 4
# cat
# + [markdown] id="1fr-NQejHtcs" colab_type="text"
# # Type Casting
# Variables can be converted to another type if they are compatible. This can be done using the keyword for respective types. Some keywords are:
# * String
# * str
# * Integer
# * int
# * Float
# * float
# * List
# * list
#
# You can try doing this yourself later for more understanding.
# + id="evZ_jideHtcu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="710df7ad-cf45-4670-ccd3-722b30aae62d"
type(5)
# + id="8W7CLPY_Htcy" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="07415420-2d60-4b48-f9a8-d58d57f863dd"
type(str(5))
# + id="bItnoJ15Htc4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="7e57d7d1-1c55-4d33-f772-4b5139e13502"
"abc" + 1
# + id="VW8Cqq_KHtc8" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="69873757-5522-46ad-d77d-2cb676df474c"
"abc" + str(1)
# + [markdown] id="H_dkA_fvHtdA" colab_type="text"
# # Comparators
# * Equal to
# * ==
# * Not equal to
# * !=
# * Less than
# * <
# * Greater than
# * >
# * Less than or equal to
# * <=
# * Greater than or equal to
# * \>=
# They return boolean values.
# + id="zSsJ3L7k2nWT" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 68} outputId="0f8641b1-29a4-4196-a36d-5c4ef404ce08" executionInfo={"elapsed": 898, "status": "ok", "timestamp": 1525227380322, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "100399137249051217878"}, "user_tz": -345}
2 != 3
# + id="cshnEo7nHtdU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="ddd8632c-a452-43b8-8aed-af4320282d95"
2 == 3
# + id="HJnEXGP3Htdg" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="f67fc68e-2de3-4309-81ca-4c64e7747e03"
7 >= 5
# + id="GxUp5yrfHtdo" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="bd072573-10ed-45aa-d794-a9b68ed68611"
7 >= 7
# + [markdown] id="suvygt9lHtes" colab_type="text"
# # Functions
# Properties of Functions in python:
# * Named entities
# * Perform specific tasks
# * Operate on 0, 1 or more values/variables
# * Can be called using their name followed by small brackets **()**
# * Follow the same naming convention as variables
# * Are defined using the keyword **def**
# * May or may not return 1 or more values
# * Use keyword **return** to return values
# * Do not use curly brackets or semicolon like in other languages
# * Use indentation to separate blocks
#
# *P.S. Do not forget the colon **:** in the end of the line in which def keyword is used.*
# + [markdown] id="ba9vZxTdHtes" colab_type="text"
# Function with no parameters
# + id="CV4z4ff_Hteu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def abc():
print(10)
# + id="vBAOWWPCHtey" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="a707368c-fce0-4c56-aacf-7b3eae29ad3e"
abc()
# + [markdown] id="QG8AVRRnHte8" colab_type="text"
# Function with 2 parameters
# + id="qsaWKuisHte-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def f(a, b):
x = a + b
print(x)
# + id="WcRjphDQHte-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="3f8c91fe-99ce-4baa-885a-ea04bc232f01"
f(10, 5)
# + [markdown] id="Qej0JOlUHtfA" colab_type="text"
# Function with 1 parameter that returns 3 values
# + id="Bc67mKDgHtfA" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def g(x):
return x, 2, 3
# + id="tp613Y4uHtfC" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="4022c21e-72ab-4014-86ca-b2d3a18dbc19"
g(8)
# + id="nxGH4X802nXI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="72759464-f0aa-4056-ee71-ae0bd88797a5"
def add_numbers(a, b):
return a + b
s = add_numbers(1, 2)
print(s)
# + [markdown] id="ne5g1xX8HtfG" colab_type="text"
# The variables used inside the functions are specific to that function and cannot be accessed outside without special cases.
# + id="gFFaz9XQHtfG" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="f23ffff0-cc48-4c6b-fb2d-5cc8d55106d3"
x
# + [markdown] id="ToVPyDZ12nWk" colab_type="text"
# # Flow Control
# *Do not forget colon **:** a the end of the conditional lines.*
# + [markdown] id="RWfjEpu72nWl" colab_type="text"
# ## Conditions
# * if
# * if ... else
# * if ... elif
# * if ... elif ... else
# + id="fkAnBNj62nWn" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def rel(a, c):
if a == c:
print("Equal")
elif a > c:
print("Greater")
else:
print("Less")
# + id="M8nRxTFuHtdw" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="5dfdb0f6-2842-45d7-fccc-ebd4356eb52c"
rel(1, 2)
# + id="bM15ud73Htd0" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="6342b71e-6911-4f22-ec93-25ae6da1936d"
rel(1, 1)
# + id="5CkgjO6pHtd4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="5eee5bf5-8f44-45b3-e483-ec5f419c88ea"
rel(2, 1)
# + [markdown] id="tDxwu71W2nW1" colab_type="text"
# ## while loop
# + id="IzThOM_O2nW4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 102} outputId="e3b769d4-08ef-418b-a7fb-b2ebdab85031" executionInfo={"elapsed": 983, "status": "ok", "timestamp": 1525235570472, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-bs8EIri6whU/AAAAAAAAAAI/AAAAAAAAFOA/2worzW5Poi0/s50-c-k-no/photo.jpg", "userId": "103954825905713429993"}, "user_tz": -345}
w = 0
while w != 10:
print(w)
w = w + 1
# + [markdown] id="iTZDBibA2nW5" colab_type="text"
# ## for loop
# Iterate over the specified iterator types of items such as lists and dictionaries.
# + id="yxjR4_UjHteE" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="7226bbac-b4ee-4880-8ca8-4962d4196be8"
for i in [1, 2, 3]:
print(i)
# + [markdown] id="WWtYuhJYHteG" colab_type="text"
# *Note: range(x) gives an iterator from 0 to x-1*
# + id="UnXsFbTZ2nW6" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="5fdd462a-6e45-493c-e8a5-3b5250793d01"
for i in range(5):
print(i)
# + id="V9DL0IbB2nW8" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="33d796e1-eede-4aa9-b0b3-bae8faf96b2f"
for i in range(10, 20, 2):
print(i)
# + id="Cw3wursJ2nXB" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="b113bc0a-bb36-4b3a-83da-fa8f15a63ce5"
total = 0
for num in range(4):
total = total + num
print(total)
# + [markdown] id="xBEgdRuxHtec" colab_type="text"
# # Mutable vs Immutable
# The difference between lists and tuples is the fact that lists are mutable while tuples are immutable.
# To understand it clearly, at a higher level, we can say that mutable objects are the ones that can be changed while immutable objects are the ones that cannot be changed after assignment.
#
# You can read more about it [here](https://medium.com/@meghamohan/mutable-and-immutable-side-of-python-c2145cf72747).
# + [markdown] id="QC9iKz9uHtec" colab_type="text"
# <br>
# Lists --> Mutable
# + id="-DcAr_RUHtee" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="46a67c19-a4fd-4138-9087-44edc42e4cf5"
pl
# + id="FYqIp0kCHtei" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="e24dbd19-33f2-4388-a461-cb9631134855"
pl[0] = "new"
pl
# + [markdown] id="TxOLcokxHtek" colab_type="text"
# <br>
# Tuples --> Immutable
# + id="97rUmlHJHtem" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="6baf4ea7-91d4-44be-f5dc-ecb4464e222d"
tpl
# + id="MPBotOo3Hteq" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="7caf3fa6-75df-45e1-c37c-70fddb13e726"
tpl[0] = "new"
# + [markdown] id="P-sLL1r_HtfI" colab_type="text"
# # Importing modules
# + id="uq61ZY-CHtfI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import numpy
# + id="RoSIXQppHtfI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="918301db-b010-4060-c3de-8b6cfa6cbc0d"
numpy.array([1, 2])
# + [markdown] id="aLPOWMFkHtfK" colab_type="text"
# ## Alias
# Short words used instead of longer library names.
# + id="jrtrsbKNHtfK" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import numpy as np
# + id="V4BnV6tKHtfM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="89dd6a39-5bde-4915-d24d-dbfde7f129e2"
np.array([1, 2])
# + id="vqyqOTbgHtfO" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
| AIDevNepal/Python-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/https-deeplearning-ai/GANs-Public/blob/master/ProteinGAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ViSpF4I70O4b"
# # ProteinGAN: Generative Adversarial Network for Functional Protein Generation
# *Please note that this is an optional notebook that is meant to introduce more advanced concepts, if you're up for a challenge. So, don't worry if you don't completely follow every step! We provide external resources for extra base knowledge required to grasp some components of the advanced material.*
#
# [ProteinGAN](https://www.biorxiv.org/content/10.1101/789719v2) was developed by [Biomatters Designs](https://www.biomatterdesigns.com/) and [Zelezniak lab at Chalmers University of Technology](https://twitter.com/AZelezniak).
#
# ## Goal
# The goal of this notebook is to demonstrate that core GAN ideas can be applied outside of the image domain. In this notebook, you will be able to play around with a pre-trained ProteinGAN model to see how it can be used in bioinformatics to generate functional molecules.
#
# + [markdown] id="q04P9icA8xIK"
# ## Background
#
#
# ### Proteins
#
# Proteins are large, complex molecules that play many critical roles in living organisms, including humans. You can think of them as very tiny, programmable robots used by nature to perform various functions, e.g. building, modifying or breaking down other molecules, aiding in cell replication and division, and transporting other proteins inside of cells. Apart from the crucial cellular functions, proteins are used virtually everywhere in our daily life, starting from animal nutrition and washing powders down to costly drugs and therapeutic antibodies. Using synthetic biology, protein engineering, adaptive evolutions experimental techniques, researchers enhance proteins' properties, making them more active or "sticky" towards a particular drug target or resistant to harsh environemental conditions. However, it is challenging to randomly modify proteins in a "biochemically meaningful" way such that protein would remain functional leading in a very costly time-consuming experiments. Thus generating natural-like diverse proteins that remain functional is of outstanding importance for biotechnology and biomedical applications.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 621} id="6BRDjqgjZgkg" outputId="5a8d46cb-bdd1-4d79-86a0-5af3523822f1"
from IPython.display import YouTubeVideo
YouTubeVideo('wJyUtbn0O5Y', start=75, end=80, autoplay=1, controls=0, loop=1, width=800, height=600)
# + [markdown] id="rLPKgWGCZdL7"
# *Above, animation of motor protein responsible for transporting objects in cells*
#
# Source: https://www.youtube.com/watch?v=wJyUtbn0O5Y
# + [markdown] id="l2PDRSD4kDTR"
#
# Proteins, like images, can be represented in various ways on the computer. Images are represented as integers from 0 to 256 that indicate the intensity of red, green, or blue (RGB) color. Proteins, similarly, use letters to represent 20 unique amino acids, like the one below:
#
# > MKYATLLEYAFQALKNSYAPYSRFRVGAALLSDDGEVVTGCNVENASYGLSMCAERTAVFRAVAQGVKKFDAIAVVSGKVNPVYPCGACRQVLREFNPRLTVVVAGPGKKPLTTSLDKLLPKSFGKESLRRR
#
# Raw pixel RGB values are easy for computers to work with, though they are not very meaningful to the human eye, which is why they are displayed as images on the screen. Similarly, the sequence of amino acids is a compact, convenient representation of the actual molecule, while the more meaningful view of the protein molecule is its 3D structure. For an example, see [Cytidine deaminase](https://colab.research.google.com/drive/1O0_wyl3i-9F-5mDTlShaMfR2uOWHKwwE#scrollTo=Q277ab8R9WEU).
#
# For you to appreciate and reason about the outputs, you want your models (GANs) to ultimately produce meaningful structures. There are two important common features that make images and proteins both suitable candidates for GANs:
#
# * A random combination of building blocks, whether amino acids or pixels, will not produce a realistic outcomes. This means the GAN cannot simply guess! There are meaningful, realistic patterns of pixels and amino acids that it must model and generate.
# * The mathematical formula for how to evaluate the correctness of the generated item is unknown. For images, correctness is "realism" -- how realistic does a generated image of a dog look? There's no math formula for that, so instead you have another model (the discriminator!) learn to assess that. The same goes for proteins.
#
#
# | | Image | Protein |
# | ------- |:----------:| --------:|
# | Data type | integers from 0 to 256 | vocab of 20 amino acids |
# | Dimension| 2D | 1D|
# | Number of possible variants | $3*256^{size}$ | $20^{length}$ |
#
#
#
# + [markdown] id="8huQi0yQ8qla"
# ### ProteinGAN
#
# ProteinGAN is a generative adversarial network adapted to generate functional protein sequences. At its core, it consists of common building blocks: a discriminator and generator, spectral normalization (as in the [SN-GAN optional notebook](https://www.coursera.org/learn/build-basic-generative-adversarial-networks-gans/ungradedLab/c2FPs/optional-sn-gan)), and a loss function based on earth mover's distance (as in the [WGAN-GP assignment](https://www.coursera.org/learn/build-basic-generative-adversarial-networks-gans/programming/mTm3U/wgan)), etc.
#
# To make the GAN concept work in the field of synthetic biology, the generator and discriminator architectures have been modified to handle sequences of categorical values, capture long-distance relationships, as well as discriminate between various areas in the sequences. This is a major difference from pixel values in images and helps specifically with this type of long, categorical, and sequential data. One question to mull over: could this data processing and understanding help with generating text?
#
#
# **Data pre-processing.** The explored protein space is very unevenly distributed. Some proteins and their close variants are widely studied while others are just recorded in public databases. Without the balancing, the neural network mainly focuses on big clusters of similar well-studied sequences while treating unrepresented cluster members as anomalies. ProteinGAN has in-built upsampling capability to balance the dataset based on the size of the cluster in order to preserve the diversity of sequences.
#
# **Discrete values.** One of the biggest differences between images and proteins is the data type: while images consist of continuous values, proteins are built from discrete building blocks. To address this challenge for backpropagation, ProteinGAN employs the [Gumbel-Softmax trick with temperature](https://arxiv.org/abs/1611.01144), which serves as a differentiable approximation to sampling discrete data. This allows to end-to-end training of the discriminator and generator while operating in discrete input space.
#
# **Convergence.** GANs are known to be difficult to train due to stability issues. The discrete nature of the input further aggravates this problem. Despite the implementation of spectral normalization and WGAN loss, the optimization of ProteinGAN did not lead to convergence. However, as demonstrated in [this paper](https://arxiv.org/abs/1801.04406), training with zero-centered gradient penalties leads to improved training and guarantees local convergence even if data and generator distributions are not continuous. Adapting the implementation of [non-saturating loss with R1 regularization](https://arxiv.org/abs/1801.04406) greatly improves the performance of the GAN as demonstrated in the figure below.
#
#
# 
#
# > *GAN performance in the first 35k steps using different losses. Model performances were measured using [BLOSUM45 scores](https://en.wikipedia.org/wiki/BLOSUM) (in the nutshell, similarity score which takes into account substitution probabilities of amino acids in known seuqences) against training sequences for the first 35,000 steps (average of 3 runs with different random seeds).*
#
#
# For more information please refer [ProteinGAN paper](https://www.biorxiv.org/content/10.1101/789719v2)
# + [markdown] id="scLQLcKcIeSS"
# ## Setup
# + colab={"base_uri": "https://localhost:8080/"} id="ogtiZDjSjiOJ" outputId="ffe7ea1c-91c8-49ec-98c2-71a58d81202b"
# Installing dependencies
# ! pip install biopython
# ! pip install py3Dmol
# ! apt-get install -y clustalo
# + colab={"base_uri": "https://localhost:8080/"} id="kEbt5Aq9YSyL" outputId="df25a09e-b727-4c6e-af68-89fa2e4e7778"
# Downloading pre-trained ProteinGAN model
# !gdown https://drive.google.com/uc?id=1BfDNgn3Hj2khPfkbjE8azY_yj19igb_n
# !unzip pre_trained_protein_gan.zip
# + id="7F9vYRgXV2uf"
# Helper methods
import shutil
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
import numpy as np
import pandas as pd
import py3Dmol
# A mapping between amino acids ids and their corresponding letters
ID_TO_AMINO_ACID = {0: '0', 1: 'A', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'K', 10: 'L', 11: 'M', 12: 'N', 13: 'P', 14: 'Q', 15: 'R', 16: 'S', 17: 'T', 18: 'V', 19: 'W', 20: 'Y'}
def to_seqs(model_output):
"""Takes ProteinGAN output and returns list of generated protein sequences"""
human_readable_seqs = []
seqs = model_output["prediction"]
for i in range(len(seqs)):
human_readable_seq ="".join([ID_TO_AMINO_ACID[a] for a in seqs[i].numpy()])
human_readable_seq = human_readable_seq.replace("0", "")
human_readable_seqs.append(human_readable_seq)
return human_readable_seqs
def get_blast_results(seq):
"""Takes a protein sequence, calls BLAST server and returns parsed results"""
print("Calling BLAST server. This might take a while")
r = NCBIWWW.qblast("blastp", "nr", seq, hitlist_size = 5, expect=0.5,
word_size=6, matrix_name="BLOSUM62")
blast_record = NCBIXML.read(r)
to_df = []
for a in blast_record.alignments:
to_df.append({"name": a.hit_def,"identity": a.hsps[0].identities,
"subject": a.hsps[0].sbjct})
return pd.DataFrame(to_df)
def append_to_fasta(path, seqs, prefix):
"""Appends new sequences to existing file in FASTA format."""
fasta = ""
for i, seq in enumerate(seqs):
fasta += f">{prefix}_{i}\n{seq}\n"
print(fasta, file=open(path, 'a'))
def interpolate(starting, ending, steps):
"""
Interpolates between starting and end points. Steps parameter determines
how many interpolated points will be returned.
"""
points = [starting]
step = (ending-starting)/steps
for i in range(steps):
starting = starting + step
points.append(starting)
return np.asanyarray(points)
# + [markdown] id="Q277ab8R9WEU"
# ## Cytidine deaminase
# This demonstration will use a relatively small protein called *cytidine deaminase* for simplicity. Its function in organisms is essential to DNA and RNA degradation. **Our aim is to be able to create variants of this protein that exhibit different properties.**
#
# Below is an example of cytidine deaminase 3D structure.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 531} id="J7EFUwbbWy9X" outputId="f60ba6c1-5c64-4c73-e223-4388029743b4"
view = py3Dmol.view(query='pdb:1UX1')
view.setStyle({'cartoon':{'color':'spectrum'}})
print("Cytidine deaminase")
view
# + [markdown] id="hy4cQXYSqe7X"
# ## Random amino acid sequence
#
# Let's consider a very naive approach first: choosing amino acids at random. As mentioned before, only a very tiny portion of amino acids can make up a protein with a desired function. So... what are the odds?
#
# There are around 17k annotated sequences that are categorized as cytidine deaminase: [see here](https://www.uniprot.org/uniprot/?query=ec%3A3.5.4.5+taxonomy%3A%22Bacteria+%5B2%5D%22+length%3A%5B64+TO+256%5D&sort=score)
#
# The protein length varies depending on the organism, but let's say you want to generate 131 length cytidine deaminase. So there are: $20^{131}$ possible combinations (just for comparison: there are ~ $10^{80}$ atoms in the observable universe!)
#
# It's safe to say that random sequences are unlikely to work. Even brute forcing all combinations is not an option. Nevertheless, let's try to generate a sequence to see what happens. :)
#
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="5X5tLlgIucAR" outputId="34bac72f-2d9a-455c-fda6-99e17def84f4"
np.random.seed(42)
random_seq = "".join(np.random.choice(list(ID_TO_AMINO_ACID.values())[1:], 131))
random_seq
# + [markdown] id="iwpDnHa7vLCv"
# Here, you see a 131 letter long amino acid sequence. It is hard to tell anything about this sequence only by looking. So instead, you can use a bioinformatics tool called Blast [(Basic Local Alignment Search Tool)](https://blast.ncbi.nlm.nih.gov/Blast.cgi) that searches a large database of known proteins to find the most similar matches. In most cases, a random sequence should not return any high-similarity results.
#
# If you do get anything returned, it should have a small _identity value_, which is the percentage of the sequence that matches. When the identity value is small, this means that only a small fragment of the sequence could be identified as a part of some random protein.
# + colab={"base_uri": "https://localhost:8080/", "height": 49} id="nBoKiboYvG73" outputId="04aca09f-bac8-43e9-be8f-8d0ddc0174ff"
get_blast_results(random_seq)
# + [markdown] id="_P-wnHQYjvqZ"
# ## ProteinGAN sequences
#
# What if, instead, you train a GAN to generate desirable (realistic, reasonable, non-random) protein sequences?
#
# + id="o6G2mA7vPVlc"
import tensorflow as tf
tf.random.set_seed(42)
from absl import logging
logging.set_verbosity("ERROR")
tf.get_logger().setLevel("ERROR")
# Loading pre-trained model.
model = tf.saved_model.load("pre_trained_protein_gan/").signatures["serving_default"]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="EbC5FmhhD1vI" outputId="1daf4fc2-f98a-4bcc-b289-e8bcbf6d78f4"
# Choosing random points from latent space.
noise = tf.random.truncated_normal([64, 128], stddev=0.5, dtype=tf.float32)
# Feeding noise to generator to get an output.
model_output = model(noise)
# Model returns indices of amino acids. Here we convert them to actual letters.
seqs = to_seqs(model_output)
seqs[0]
# + [markdown] id="fk_9Nn7FxnG0"
# Again, not much can be said about the sequence just by looking at it (unless you're a protein savant). Time to run BLAST again!
# + colab={"base_uri": "https://localhost:8080/", "height": 221} id="TLjZJMXmpaZx" outputId="805df8ea-cf9f-4070-f5b6-7e520d44529f"
get_blast_results(seqs[0])
# + [markdown] id="fUKadK0Vx05y"
# Nice! This time, you got some matches that are either cytidine deaminase or other types of deaminase with a high indentity. This is a good indication that the GAN works well in generating realistic protein sequences.
# + [markdown] id="JpAlWkRgvSJs"
# ## Latent space
#
# As you already know, GANs learn to map points in the latent space to generated items. You can explore this latent space and perform a meaningful modifications to a generated item by moving in different directions. On generated faces, that might be changing hair color or adding sunglasses. Here, it's also to change something semantically meaningful, but for protein sequences.
#
# To start off, you can play with the diversity of generated sequences by changing how widely you sample the latent space. This can be achieved by modifying the standard deviation of the distribution. Let's try 0.1 and 1.0 to start!
# + id="Ls7MUljzvRwG"
# Generating sequences from points which are close to each other
model_output = model(tf.random.truncated_normal([64, 128], stddev=0.1, dtype=tf.float32))
small_var_seqs = to_seqs(model_output)
# Generating sequences more distrbuted points
model_output = model(tf.random.truncated_normal([64, 128], stddev=1.0, dtype=tf.float32))
large_var_seqs = to_seqs(model_output)
# + id="J-Ns4dAgRYXh"
# Creating fasta files which will be used for clustalo to calculate distances
#pre_trained_protein_gan/train_rep.fasta - contains some representative sequences of training dataset
shutil.copy("pre_trained_protein_gan/train_rep.fasta","sequences.fasta")
#Appending generated sequences to training sequences
append_to_fasta("sequences.fasta", small_var_seqs, "small_var")
append_to_fasta("sequences.fasta", large_var_seqs, "large_var")
# + [markdown] id="xiykPRgYh4OC"
# [Clustalo](http://www.clustal.org/omega/) is a bioinformatics tool for biological sequence alignment and comparison that calculates the edit distances between multiple strings, taking into account that some letters are more similar than others biologically. You can use it to calculate all-to-all distances from different protein sequence sets - training representatives, sequences generated using low and high standard deviation.
# + colab={"base_uri": "https://localhost:8080/"} id="7D-4DqB_FcV1" outputId="8e1ecd11-2459-41fa-f7d4-9744bb603cc4"
# ! clustalo -i sequences.fasta -o fasta.aln --threads=2 -v --full --distmat-out=dist_out.dist --force
# + id="NIm7_vnPcKe4"
from sklearn.manifold import TSNE
#Loading calculated distances
distance_matrix = pd.read_csv("dist_out.dist", delimiter='\s+', skiprows=[0],header=None,index_col=0)
distance_matrix.columns = distance_matrix.index.values
#Using TSNE to compress all pair wise distances between sequences into two components which then could be plotted.
tsne = TSNE(n_components=2, metric='precomputed')
coordinates_2d = tsne.fit_transform(distance_matrix.values)
# + colab={"base_uri": "https://localhost:8080/", "height": 483} id="a9gIgVbNJrBW" outputId="fabbe86e-324b-4881-83d6-e645587820fe"
from matplotlib import pyplot as plt
# Plotting train representatives and generated sequences with different diversity
plt.figure(figsize=(12, 8))
plt.scatter(coordinates_2d[:-128,0], coordinates_2d[:-128,1], c="green", label="Train representative sequences", alpha=0.5, s=30)
small_var_el = distance_matrix.index.str.contains("small_var")
plt.scatter(coordinates_2d[small_var_el,0], coordinates_2d[small_var_el,1], c="orange", label="Generated sequences with 0.1 standard deviation")
large_var_el = distance_matrix.index.str.contains("large_var")
plt.scatter(coordinates_2d[large_var_el,0], coordinates_2d[large_var_el,1], c="red", label="Generated sequences with 1.0 standard deviation ")
plt.legend()
plt.show()
# + [markdown] id="nSFhBtY3NhQR"
# As expected, oranges sequences are more similar to each other than the red ones.
# + [markdown] id="Y62ZHV1SUqYA"
# ### Controlling biological properties
#
# After generating realistic sequences, you want to be able to control their properties. As with images, it's possible to find a direction in the latent space that will change a specific property of the generated outcome. Here, you can vary values of the 100th dimension and measure the molecular weight of generated sequences. You'll use the [biopython](https://biopython.org/) library to calculate the molecule's weight.
# + id="i3osQT59e-JT"
from scipy.stats import pearsonr
from Bio.SeqUtils.ProtParam import ProteinAnalysis
# Changing the values of 100th dimension from -1.0 to 1.0
d = 99
starting = np.zeros([128])
starting[d] = -1.0
ending = np.zeros([128])
ending[d] = 1.0
points = interpolate(starting, ending, 1023)
seqs = []
for i in range(0, 1024, 64):
model_output = model(tf.constant(points[i:i+64], tf.float32))
seqs.extend(to_seqs(model_output))
# + [markdown] id="dNvRNj2vVTFh"
# Then, you can calculate the molecular weight of each sequence and calculate the correlation with latent space direction.
# + colab={"base_uri": "https://localhost:8080/"} id="mmxAP-WwUZ8q" outputId="e7803654-673c-44c6-d40b-fb8a2f3cdf9e"
w = [ProteinAnalysis(s).molecular_weight() for s in seqs]
pearsonr(w, points[:,d])
# + colab={"base_uri": "https://localhost:8080/", "height": 502} id="JHeiy1y9hFfp" outputId="d898af5d-2f2d-4c79-d5ac-fb4e6686d5ae"
plt.figure(figsize=(16, 8))
plt.scatter(points[:,d], w, c = 'b', s = 20, label = 'Molecule weight')
plt.xlabel("Latent dimension value", fontsize = 15)
plt.ylabel("Molecular weight", fontsize = 15)
plt.legend(fontsize = 14)
plt.grid(True)
plt.show()
# + [markdown] id="937OnQVxVjKh"
# Of course, this is a very simplistic example; it is a good illustrative example of how latent space can be explored.
#
#
# ## Summary
#
# In summary, you have learned about:
#
# * Proteins as non-random sequences of 20 amino acids (aa) that nature has tweaked over billions of years of evolution to drive essential life processes;
#
# * ProteinGAN and its technical features outlining the challenges of learning long-biological sequences such as proteins;
#
# * Generating random protein sequences from a family of cytidine deaminases using a generator from a pre-trained ProteinGAN model;
#
# * Visualizing biological sequences using sequence alignments and dimensionality reduction;
#
# * Exploring latent space dimensions and connecting it with physicochemical properties of generated proteins.
#
#
# + [markdown] id="_kH5E53bgFGC"
# ## Author's Contribution
#
# [<NAME>](https://www.linkedin.com/in/donatasrep/) (Biomatter Designs) was responsible for the notebook's content and design;
#
# [<NAME>](https://twitter.com/AZelezniak) (Zelezniak lab at Chalmers University of Technology) gave input into summarizing and editing the text.
#
# ## Acknowledgment
# The authors would like to thank [Biomatter Designs](https://www.biomatterdesigns.com/) and [DeepLearning.AI](https://www.deeplearning.ai/) teams for their comments and insightful suggestions:
#
# * [<NAME>](https://www.linkedin.com/in/vykintas-jauniskis/) (Biomatter Designs);
# * [<NAME>](https://www.linkedin.com/in/laurynaskarpus/) (Biomatter Designs);
# * [<NAME>](https://www.linkedin.com/in/audrius-lauryn%C4%97nas-307687b2/) (Biomatter Designs);
# * [<NAME>](https://www.linkedin.com/in/aurimas-repe%C4%8Dka-23064ab2/) (Biomatter Designs);
# * [<NAME>](https://www.linkedin.com/in/irmantas-rokaitis-52336b18b/) (Biomatter Designs);
# * [<NAME>](https://www.linkedin.com/in/audron%C4%97-valan%C4%8Di%C5%ABt%C4%97-730785158/) (Biomatter Designs);
# * [<NAME>](https://www.linkedin.com/in/zilakauskis95/) (Biomatter Designs).
#
| ProteinGAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="9CagYlhclDR4"
# # Ransac and Outlier Removal
#
# ## Notebook Setup
# The following cell will install Drake, checkout the manipulation repository, and set up the path (only if necessary).
# - On Google's Colaboratory, this **will take approximately two minutes** on the first time it runs (to provision the machine), but should only need to reinstall once every 12 hours.
#
# More details are available [here](http://manipulation.mit.edu/drake.html).
# + colab={} colab_type="code" id="3kqzbo_AlDR6"
import importlib
import os, sys
from urllib.request import urlretrieve
if 'google.colab' in sys.modules and importlib.util.find_spec('manipulation') is None:
urlretrieve(f"http://manipulation.csail.mit.edu/setup/setup_manipulation_colab.py",
"setup_manipulation_colab.py")
from setup_manipulation_colab import setup_manipulation
setup_manipulation(manipulation_sha='c1bdae733682f8a390f848bc6cb0dbbf9ea98602', drake_version='0.25.0', drake_build='releases')
# python libraries
import numpy as np
from manipulation import running_as_notebook
# Start a single meshcat server instance to use for the remainder of this notebook.
from meshcat.servers.zmqserver import start_zmq_server_as_subprocess
server_args = []
if 'google.colab' in sys.modules:
server_args = ['--ngrok_http_tunnel']
proc, zmq_url, web_url = start_zmq_server_as_subprocess(server_args=server_args)
# TODO(russt): upstream this to drake
import meshcat.geometry as g
import meshcat.transformations as tf
from pydrake.all import RigidTransform, RotationMatrix, RollPitchYaw
import open3d as o3d
import meshcat
from IPython.display import clear_output
clear_output()
from manipulation import FindResource
# Visualize Stanford Bunny
pcd = o3d.io.read_point_cloud(FindResource("models/bunny/bun_zipper_res2.ply"))
pointcloud_model = np.asarray(pcd.points).transpose()
# First, clean the origin a bit to define nominal pose.
X = np.array([[1., 0., 0., 0.0],
[0., np.cos(np.pi/2), -np.sin(np.pi/2), 0.],
[0., np.sin(np.pi/2), np.cos(np.pi/2), -0.05]])
Xtemp = RigidTransform(X)
X = np.array([[np.cos(np.pi/2), -np.sin(np.pi/2), 0, 0.],
[np.sin(np.pi/2), np.cos(np.pi/2), 0., 0.],
[0., 0., 1., 0.]])
X = RigidTransform(X).multiply(Xtemp)
pointcloud_model = X.multiply(pointcloud_model)
# point clouds of planar surface
import numpy as np
grid_spec = 50
xy_axis = np.linspace(-0.5, 0.5, grid_spec)
plane_x, plane_y = np.meshgrid(xy_axis, xy_axis)
points_plane_xy = np.c_[plane_x.flatten(), plane_y.flatten(), np.zeros(grid_spec**2)]
bunny_w_plane = np.c_[points_plane_xy.T, pointcloud_model]
def fit_plane(xyzs):
'''
Args:
xyzs is (N, 3) numpy array
Returns:
(4,) numpy array
'''
center = np.mean(xyzs, axis=0)
cxyzs = xyzs - center
U, S, V = np.linalg.svd(cxyzs)
normal = V[-1] # last row of V
d = -center.dot(normal)
plane_equation = np.hstack([normal, d])
return plane_equation
# visualize a facet
def DrawFacet(vis, abcd, name, center=None,
prefix='facets', radius=0.02, thickness=0.001, color=0xffffff, opacity=0.6):
normal = np.array(abcd[:3]).astype(float)
normal /= np.linalg.norm(normal)
d = -abcd[3] / np.linalg.norm(normal)
R = np.eye(3)
R[:, 2] = normal
z = normal
if abs(z[0]) < 1e-8:
x = np.array([0, -normal[2], normal[1]])
else:
x = np.array([-normal[1], normal[0], 0])
x /= np.linalg.norm(x)
R[:, 0] = x
R[:, 1] = np.cross(z, x)
X = np.eye(4)
Rz = RollPitchYaw(np.pi/2, 0, 0).ToRotationMatrix().matrix()
X[:3, :3] = R.dot(Rz)
if center is None:
X[:3, 3] = d * normal
else:
X[:3, 3] = center
X_normal = X.copy()
X_normal[:3, :3] = R
material = meshcat.geometry.MeshLambertMaterial(
color=color, opacity=opacity)
vis[prefix][name]["plane"].set_object(
meshcat.geometry.Cylinder(thickness, radius), material)
normal_vertices = np.array([[0, 0, 0], [0, 0, radius]]).astype(float)
vis[prefix][name]["normal"].set_object(
meshcat.geometry.Line(meshcat.geometry.PointsGeometry(normal_vertices.T)))
vis[prefix][name]["plane"].set_transform(X)
vis[prefix][name]["normal"].set_transform(X_normal)
def generate_color_mat(color_vec, shape):
color_mat = np.tile(np.array(color_vec).astype(np.float32).reshape(3,1), (1, shape[1]))
return color_mat
vis = meshcat.Visualizer(zmq_url)
# vis = meshcat.Visualizer(zmq_url)
def visualize_point_clouds(pc_A, vis=None):
if vis is None:
vis = meshcat.Visualizer(zmq_url)
vis["/Background"].set_property('visible', False)
#vis["/Cameras/default/"].set_transform(tf.translation_matrix([0, 0, 1]))
vis["/Cameras/default/rotated/<object>"].set_property("zoom", 10.5)
vis["red_bunny"].set_object(g.PointCloud(pc_A, generate_color_mat([1, 0, 0], pc_A.shape), size=0.01))
return vis
# + [markdown] colab_type="text" id="Bva0aj0GlDSI"
# # Problem Description
# In the lecture, we learned about the RANSAC algorithm. In this exercise, you will implement the RANSAC algorithm to separate the Stanford bunny from its environment!
#
# **These are the main steps of the exercise:**
# 1. Implement the `ransac` method.
# 2. Implement the `remove_plane` method to remove the points that belong to the planar surface.
#
# Let's first visualize the point clouds of Stanford bunny in meshcat!
# + colab={} colab_type="code" id="XtPLYaZhlDSJ"
vis = visualize_point_clouds(bunny_w_plane, vis)
# + [markdown] colab_type="text" id="zF54ocy-lDSR"
# You should notice that now there is a planar surface underneath the bunny. You may assume the bunny is currently placed on a table, where the planar surface is the tabletop. In this exercise, your objective is to remove the planar surface.
# + [markdown] colab_type="text" id="ki2f7sxZlDSS"
# A straightforward way to achieve a better fit is to remove the planar surface underneath the bunny. To do so, we provide you a function to fit a planar surface.
#
# Recall that a plane equation is of the form
# $$a x + b y + c z + d = 0$$
# where $[a,b,c]^T$ is a vector normal to the plane and (if it's a unit normal) $d$ is the negative of the distance from the origin to the plane in the direction of the normal. We'll represent a plane by the vector $[a,b,c,d]$.
#
# The fitted planes are shown as translucent disks of radius $r$ centered at the points. The gray lines represent the planes' normals.
# + colab={} colab_type="code" id="g2tMdp5PlDSS"
plane_equation = fit_plane(bunny_w_plane.T)
print(plane_equation)
DrawFacet(vis, plane_equation, 'naive_plane', center=[0,0,-plane_equation[-1]], thickness=0.005, radius=0.1)
# + [markdown] colab_type="text" id="HQs47D9SlDSX"
# You should notice that the planar surface cannot be fitted exactly either. This is because it takes account of all points in the scene to fit the plane. Since a significant portion of the point cloud belongs to the bunny, the fitted plane is noticeably elevated above the ground.
#
# To improve the result of the fitted plane, you will use RANSAC!
# + [markdown] colab_type="text" id="8VB5_NrqlDSX"
# ## RANSAC
# With the presence of outliers (bunny), we can use RANSAC to get more reliable estimates. The idea is to fit a plane using many random choices of a minimal set of points (3), fit a plane for each one, count how many points are within a distance tolerance to that plane (the inliers), and return the estimate with the most inliers.
#
# **Complete the function `ransac`. It takes a data matrix, a tolerance, a value of iterations, and a model regressor. It returns an equation constructed by the model regressor and a count of inliers.**
# + colab={} colab_type="code" id="8PZI0rU_lDSY"
def ransac(point_cloud, model_fit_func, tolerance=1e-3, max_iterations=500):
'''
Args:
point_cloud is (N, 3) numpy array
tolerance is a float
max_iterations is a (small) integer
Returns:
(4,) numpy array
'''
best_ic = 0 # inlier count
best_model = np.ones(4) # plane equation ((4,) array)
##################
# your code here
##################
return best_ic, best_model
# + [markdown] colab_type="text" id="ubTmMUnelDSi"
# Now you should have a lot better estimate of the planar surface with the use of RANSAC! Let's visualize the plane now!
# + colab={} colab_type="code" id="OST0NACZlDSi"
inlier_count, ransac_plane = ransac(bunny_w_plane.T, fit_plane, 0.001, 500)
print(ransac_plane)
DrawFacet(vis, ransac_plane, 'ransac_plane', center=[0,0,-ransac_plane[-1]], thickness=0.005, radius=0.1)
# + [markdown] colab_type="text" id="IjB0y8hAlDSm"
# ## Remove Planar Surface
#
# Now all you need to do is to remove the points that belong to the planar surface. You may do so by rejecting all points that are
# \begin{equation}
# || a x + b y + c z + d || > tol
# \end{equation}
#
# Note that since you are fitting a plane, the bunny is this case is the "outlier". Your job, however, is to keep the bunny and remove the planar surface.
#
# **Complete the function below to remove the points that belongs to the planar surface**.
# + colab={} colab_type="code" id="M7Bt0qYqlDSn"
def remove_plane(point_cloud, ransac, tol=1e-4):
"""
Find the nearest (Euclidean) neighbor in point_cloud_B for each
point in point_cloud_A.
Args:
point_cloud: Nx3 numpy array of points
ransac: The RANSAC function to use (call ransac(args))
plane_equation: (4,) numpy array, contains the coefficients of the plane
Returns:
point_cloud_wo_plane: Nx3 numpy array of points
"""
point_cloud_wo_plane = np.zeros((100,3))
return point_cloud_wo_plane
# + colab={} colab_type="code" id="oizYYZ1KlDSw"
bunny_wo_plane = remove_plane(bunny_w_plane.T, ransac)
vis = visualize_point_clouds(bunny_wo_plane.T, vis)
# + [markdown] colab_type="text" id="MwE8yNg58VQN"
# ## How will this notebook be Graded?##
#
# If you are enrolled in the class, this notebook will be graded using [Gradescope](www.gradescope.com). You should have gotten the enrollement code on our announcement in Piazza.
#
# For submission of this assignment, you must do two things.
# - Download and submit the notebook `ransac.ipynb` to Gradescope's notebook submission section, along with your notebook for the other problems.
# - Copy and Paste your answer to the kinematic singularity problem to Gradescope's written submission section.
#
# We will evaluate the local functions in the notebook to see if the function behaves as we have expected. For this exercise, the rubric is as follows:
# - [4 pts] `ransac` must be implemented correctly.
# - [2 pts] `remove_plane` must be implemented correctly.
# + colab={} colab_type="code" id="xj5nAh4g8VQO"
from manipulation.exercises.pose.test_ransac import TestRANSAC
from manipulation.exercises.grader import Grader
Grader.grade_output([TestRANSAC], [locals()], 'results.json')
Grader.print_test_results('results.json')
| exercises/pose/ransac.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200>
# <br></br>
# <br></br>
#
# ## *Data Science Unit 4 Sprint 3 Assignment 1*
#
# # Recurrent Neural Networks and Long Short Term Memory (LSTM)
#
# 
#
# It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM.
#
# This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txt
#
# Use it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach.
#
# Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size.
#
# Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more!
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.optimizers import RMSprop
# + colab={} colab_type="code" id="Ltj1je1fp5rO"
import sys
import numpy as np
def prepare_to_shakespeare(url, number_of_chars):
import urllib.request
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
data = response.read().decode("utf8")[:number_of_chars]
data_stripped = [item.strip("\ufeff|\x80|\x99|\n|\r|ï|¿|#|»|9|8|0|â|ï") for item in data]
return data_stripped
# -
# Apply our function
works = prepare_to_shakespeare('https://www.gutenberg.org/files/100/100-0.txt', 100000)
type(works)
# Turn works into a long string
text = ''.join(works)
type(text), text[:1000]
''' text is a long string. We just want the unique characters
as a list'''
unique_chars = list(set(text))
print(unique_chars)
# Assign each unique character to a number. Easy way is by enumerating
char_int = {c:i for i,c in enumerate(unique_chars)}
int_char = {i:c for i,c in enumerate(unique_chars)}
# +
## Create Sequence Data
# Scan text variable by 40 character chunks to create first sequence
maxlen = 40
step = 1
# Encode as sequence data as its unique character
encoded = [char_int[unique_chars] for unique_chars in text]
batch_size = 32
sequences = [] # Each element is 40 characters long
next_chars = [] # One element for each sequence
for i in range(0, len(encoded) - maxlen, step):
sequences.append(encoded[i : i + maxlen])
next_chars.append(encoded[i + maxlen])
print('sequences:', len(sequences))
# +
# Specify x & y
# you can onehotencode
# rows, width, dimensions are vector of 40 characters
x = np.zeros((len(sequences), maxlen, len(unique_chars)), dtype=np.bool)
# rows, width (1)
y = np.zeros((len(sequences), len(unique_chars)), dtype=np.bool)
for i, sequence in enumerate(sequences):
for t, char in enumerate(sequence):
x[i,t,char] = 1
y[i, next_chars[i]] = 1
x.shape, y.shape
# -
# build the model: a single LSTM
model = Sequential()
# the input_shape of a singular observation
# 40 chars, so multiclass, therefore softmax
model.add(LSTM(128, input_shape=(maxlen, len(unique_chars))))
model.add(Dense(len(unique_chars), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
#return index location of most probable character
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# +
def on_epoch_end(epoch, _):
# Function invoked at end of each epoch. Prints generated text.
print()
print('----- Generating text after Epoch: %d' % epoch)
start_index = np.random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x_pred = np.zeros((1, maxlen, len(unique_chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_int[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = int_char[next_index]
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
# -
model.fit(x, y,
batch_size=128,
epochs=5,
callbacks=[print_callback])
| module1-rnn-and-lstm/Assignment1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image
# +
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
# -
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28, 28)
print(img.shape)
img_show(img)
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
return x_test, t_test
def init_network():
with open("mnist.pkl", 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y
# +
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
p = np.argmax(y)
if p == t[i]:
accuracy_cnt += 1
print("Accuracy: " + str(float(accuracy_cnt / len(x))))
# -
| ch03/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Урок 2
#
# ## Дискретные случайные величины. Закон распределения вероятностей. Биномиальный закон распределения. Распределение Пуассона
#
# ### Случайные величины
#
# __Случайная величина__ — величина, которая в результате опыта принимает некоторое значение, неизвестное заранее.
#
# __Дискретные__ случайные величины принимают конечное или счётное множество значений (например, _натуральные_ или _рациональные_ числа). __Непрерывные__ случайные величины принимают несчётное множество значений (например, _вещественные_ числа).
#
# Примеры дискретных случайных величин:
#
# 1. Сумма очков при $100$-кратном подбрасывании игрального кубика.
# 2. Число метеоритов, упавших на Землю за год.
# 3. Количество машин, которые успевают проехать через данный светофор за один цикл.
#
# __Пример 1__
#
# Смоделируем стократное подбрасывание монетки. Рассмотрим случайную величину, равную числу выпаданий орла.
# + pycharm={"is_executing": false}
import numpy as np
# + pycharm={"is_executing": false}
np.random.randint(0, 2, size=100).sum()
# + colab={} colab_type="code" id="KPHPGWZyzdn5" outputId="1a5eb150-bf24-4214-925e-b4a1f7c8fddf" pycharm={"is_executing": false}
np.random.binomial(n=100, p=0.5)
# -
# Можно повторить этот эксперимент большее количество раз (например, $50$ раз) и посмотреть, какие получаются значения.
# + pycharm={"is_executing": false}
print(np.random.binomial(n=100, p=0.5, size=50))
# -
# Пусть $X$ — дискретная случайная величина. __Закон распределения__ этой случайной величины — это соответствие между значениями, которые принимает эта величина, и вероятностями, с которыми она их принимает.
#
# __Пример 2__
#
# Пусть $X$ — сумма значений двух подбрасываемых игральных кубиков. Вот её закон распределения:
#
# <table border="3">
# <tr>
# <th>$x$</th>
# <td>2</td>
# <td>3</td>
# <td>4</td>
# <td>5</td>
# <td>6</td>
# <td>7</td>
# <td>8</td>
# <td>9</td>
# <td>10</td>
# <td>11</td>
# <td>12</td>
# </tr>
# <tr>
# <th>$P(X=x)$</th>
# <td>0.028</td>
# <td>0.056</td>
# <td>0.083</td>
# <td>0.111</td>
# <td>0.139</td>
# <td>0.167</td>
# <td>0.139</td>
# <td>0.111</td>
# <td>0.083</td>
# <td>0.056</td>
# <td>0.028</td>
# </tr>
# </table>
#
# __Пример 3__
#
# В урне $8$ шаров, из которых $5$ белых, остальные — чёрные. Наудачу вынимают $3$ шара. Найти закон распределения количества белых шаров в выборке.
#
# Принимаемые значения данной случайной величины будут: $x_1 = 0$, $x_2 = 1$, $x_3 = 2$, $x_4 = 3$.
#
# Посчитаем вероятность того, что $X = 0$, т.е. что среди трёх вытянутых шаров нет ни одного белого. Значит, вытянуты в точности все чёрные шары. Такое возможно лишь в одном случае.
#
# Кстати, из скольки? Общее число способов вытянуть $3$ шара из урны с $8$ шарами — число сочетаний:
# $C_8^3 = \dfrac{8!}{3! \cdot 5!} = \dfrac{8 \cdot 7 \cdot 6}{6} = 56.$
#
# Итак, $P(X = 0) = \dfrac{1}{56}$.
#
# Событие $X = 1$ означает, что среди вытянутых шаров один белый и два чёрных: $P(X = 1) = \dfrac{C_5^1 \cdot C_3^2}{56} = \dfrac{15}{56}$. Аналогично,
# $P(X = 2) = \dfrac{C_5^2 \cdot C_3^1}{56} = \dfrac{30}{56}, \:\:$
# $P(X = 3) = \dfrac{C_5^3}{56} = \dfrac{10}{56}.$
#
# Заметим, что сумма этих четырёх вероятностей равна $1$.
#
# Пусть $X$, $Y$ — дискретные случайные величины, причём $X$ принимает значения $x_j$ с вероятностями $P(X = x_j)$, $j = 1,2,\dots$, а $Y$ принимает значения $y_k$ с вероятностями $P(Y = y_k)$, $k = 1,2,\dots$.
#
# * Их __сумма__ $Z = X + Y$ — случайная величина, которая принимает значения $z_{jk} = x_j + y_k$ с вероятностями $P(X = x_j, Y = y_k)$.
# * Аналогично считаются __разность__ и __произведение__ случайных величин, надо лишь заменить соответствующие символы операций.
# * __Квадрат__ $Z = X^2$ — случайная величина, которая принимает значения $z_j = x_j^2$ по тому же закону распределения, что и $X$.
#
# __Пример 4__
#
# Разберёмся, почему квадрат случайной величины считается именно так.
#
# По определению произведения случайных величин, квадрат величины $X$ должен принимать все возможные значения $x_j x_k$ с вероятностями $P(X = x_j, X = x_k)$.
#
# Но случайная величина не может принимать несколько значений одновременно. Отсюда следует, что если $j \neq k$, то $P(X = x_j, X = x_k) = 0$.
#
# Поэтому остаются значения $x_j^2$ с вероятностями $P(X = x_j, X = x_j) = P(X = x_j)$.
#
# __Математическим ожиданием__ случайной величины $X$ называется среднее значение величины $X$ при стремлении количества испытаний к бесконечности. Обозначается $M(X)$.
#
# Если $X$ — дискретная случайная величина, принимающая значения $x_j$ с вероятностями $p_j = P(X = x_j)$,
# $j = 1,2, \dots$, то
# $$M(X) = \displaystyle\sum_j p_j x_j = p_1 x_1 + p_2 x_2 + \dots$$
#
# __Пример 5__
#
# Посчитаем математическое ожидание случайной величины из примера 2.
# + pycharm={"is_executing": false}
x_values = np.arange(2, 13)
print(x_values)
# + pycharm={"is_executing": false}
x_probabilities = np.array([1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1]) / 36
print(x_probabilities)
# -
# Чтобы посчитать математическое ожидание, нужно поэлементно перемножить значения этих двух массивов (первый с первым, второй со вторым и т.д.), затем сложить результаты. С помощью библиотеки `numpy` это можно сделать так:
# + pycharm={"is_executing": false}
m = x_values.dot(x_probabilities)
print(m)
# -
# Подробнее про умножение матриц [здесь](https://ru.wikipedia.org/wiki/%D0%A3%D0%BC%D0%BD%D0%BE%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_%D0%BC%D0%B0%D1%82%D1%80%D0%B8%D1%86) и [здесь](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html).
#
# __Дисперсией__ случайной величины $X$ называется математическое ожидание квадрата отклонения случайной величины от её математического ожидания:
# $$D(X) = M \left( (X - M(X))^2 \right).$$
#
# Дисперсия является мерой разброса случайной величины относительно её среднего значения.
#
# __Пример 6__
#
# Посчитаем дисперсию случайной величины из примера 2. Её математическое ожидание мы уже считали, оно равно:
# + pycharm={"is_executing": false}
m = 7
m
# -
# Далее, определим случайную величину $Y = X - M(X)$. Она будет принимать значения:
# + pycharm={"is_executing": false}
y_values = x_values - m
print(y_values)
# -
# Отметим, что распределения вероятностей у величины $Y$ такое же, что и у $X$.
#
# Далее, возведём эту величину в квадрат, тем самым получим новую случайную величину $Z$ со всё еще таким же распределением вероятностей:
# + pycharm={"is_executing": false}
z_values = y_values ** 2
print(z_values)
# -
# Итак, дисперсия случайной величины $X$ равна:
# + pycharm={"is_executing": false}
d = z_values.dot(x_probabilities)
print(d)
# -
# ### Законы распределения случайных величин
#
# Пусть имеется некоторое событие $A$, которое наступает с вероятностью $p$. __Биномиальный закон__ описывает распределение случайной величины $X$, задающей число наступлений события $A$ в ходе проведения $n$ независимых опытов.
#
# Биномиальный закон распределения описывается __формулой Бернулли__:
# $$P(X = k) = C_n^k p^k (1 - p)^{n - k}$$
#
# __Пример 7__
#
# Доказать формулу Бернулли довольно легко, используя уже известные нам правила комбинаторики. То, что событие $A$ в результате проведения $n$ независимых опытов наступило $k$ раз означает, что $n - k$ раз наступило событие $\overline{A}$. Вероятность наступления такого события равна $1 - p$.
#
# При этом наступить событие $A$ могло в любые $k$ «моментов» из $n$, поэтому число вариантов таких «моментов» равно $C_n^k$. Наконец, эксперименты проводились независимо, поэтому итоговая вероятность равна $C_n^k p^k (1 - p)^{n - k}$.
#
# Математическое ожидание и дисперсию для биномиально распределённой дискретной величины можно вычислить по формулам:
# $$M(X) = np, \: D(X) = np(1 - p).$$
#
# __Пример 8__
#
# Посчитаем распределение числа выпаданий орла при трёхкратном подбрасывании монеты. Возможные значения такой случайной величины: $x_1 = 0$, $x_2 = 1$, $x_3 = 2$, $x_4 = 3$.
#
# Посчитаем вероятность каждого значения:
# $$P(X = 0) = C_n^k p^k (1 - p)^{n - k} = C_3^0 \cdot 0.5^0 \cdot 0.5^3 = \frac{3!}{0! \: (3-0)!} \cdot 0.5^0 \cdot 0.5^3 = 1 \cdot 1 \cdot 0.125 = 0.125.$$
# $$P(X = 1) = C_3^1 \cdot 0.5^1 \cdot 0.5^2 = \frac{3!}{1! \: 2!} \cdot 0.5^1 \cdot 0.5^2 = 3 \cdot 0.5 \cdot 0.25 = 0.375.$$
# $$P(X = 2) = C_3^2 \cdot 0.5^2 \cdot 0.5^1 = \frac{3!}{2! \: 1!} \cdot 0.5^2 \cdot 0.5^1 = 0.375.$$
# $$P(X = 3) = C_3^3 \cdot 0.5^3 \cdot 0.5^0 = \frac{3!}{3! \: 0!} \cdot 0.5^3 \cdot 0.5^0 = 0.125.$$
#
# Итак, закон распределения такой случайной величины:
#
# <table border="3">
# <tr>
# <th>$x$</th>
# <td>0</td>
# <td>1</td>
# <td>2</td>
# <td>3</td>
# </tr>
# <tr>
# <th>$P(X = x)$</th>
# <td>0.125</td>
# <td>0.375</td>
# <td>0.375</td>
# <td>0.125</td>
# </tr>
# </table>
#
# __Пример 9__
#
# Посчитаем математическое ожидание распределения из предыдущего примера. Поскольку $n = 3$ и $p = 0.5$, получаем:
# $$M(X) = 3 \cdot 0.5 = 1.5.$$
#
# Результат можно понимать так: в среднем при трёхкратном подбрасывании монеты орёл выпадает $1.5$ раза.
#
# Дисперсия такой величины:
# $$D(X) = 3 \cdot 0.5 \cdot (1 - 0.5) = 0.75.$$
#
# __Распределение Пуассона__
#
# Допустим теперь, что имеется некоторый поток событий, такой, что в среднем за единицу времени событие наступает $\lambda$ раз (т.е. с _интенсивностью_ $\lambda$). Тогда случайная величина $X$, равная количеству наступлений события за единицу времени, имеет __распределение Пуассона__ с параметром $\lambda$.
#
# Случайная величина $X$ принимает значения $0, 1, 2, \dots$ (счётное множество значений), а соответствующие вероятности выражаются __формулой Пуассона__:
# $$P(X = k) = \dfrac{\lambda^k e^{-\lambda}}{k!}$$
#
# Здесь $\lambda$ — положительное вещественное число.
#
# Как мы уже отметили, распределение Пуассона описывает счётчики событий, наступивших за единицу времени. Например, распределение Пуассона описывает:
# * число бракованных деталей в партии фиксированного размера,
# * число опечаток в тексте фиксированного размера,
# * число автобусов, проехавших за фиксированное время мимо автобусной остановки.
#
# Математическое ожидание и дисперсия распределения Пуассона равны:
# $$M(X) = D(X) = \lambda$$
#
# Распределение Пуассона является предельным случаем биномиального. Если в последнем имеется очень большое число экспериментов ($n \rightarrow \infty$), а вероятность наступления события $A$ достаточно мала (можно считать, что $p \approx \lambda/n$), то такое распределение становится очень похоже на распределение Пуассона с параметром $\lambda = np$.
#
# __Пример 10__
#
# В среднем за час мимо автобусной остановки проезжают 30 автобусов. Какова вероятность, что за час мимо остановки проедут: а) 30 автобусов? б) не более 15 автобусов? в) более 50 автобусов?
#
# Для решения первого пункта достаточно прямо применить формулу Пуассона:
# $$P(X = 30) = \dfrac{30^{30} e^{-30}}{30!}$$
# + pycharm={"is_executing": false}
def poisson_proba(k: int, lambda_: float) -> float:
"""Формула Пуассона.
"""
return (lambda_ ** k) * (np.exp(-lambda_)) / np.math.factorial(k)
# + pycharm={"is_executing": false}
lambda_ = 30
# + colab={} colab_type="code" id="91FItk5GzdqS" outputId="8a82347b-b529-4f62-d0c1-a95ca84dedbb" pycharm={"is_executing": false}
poisson_proba(k=30, lambda_=lambda_)
# -
# Для решения второго пункта нужно сложить несколько вероятностей. Вероятность получить не более 15 автобусов предполагает получение от 0 до 15 автобусов:
# $$P(X \leq 15) = \displaystyle\sum_{i = 0}^{15} \dfrac{30^{i} e^{-30}}{i!}$$
# + pycharm={"is_executing": false}
sum(poisson_proba(k=i, lambda_=lambda_) for i in range(16))
# -
# Последний пункт наиболее хитрый. Вероятность получить более 50 автобусов предполагает получение от 50... до скольки? В принципе число автобусов, проезжающих мимо автобусной остановки за час, не ограничено. Так что суммировать придётся до бесконечности, что невозможно.
#
# Пойдём другим способом. Мы знаем, что сумма вероятностей случайной величины всегда равна $1$. Значит, вероятность получить более 50 автобусов равна:
# $$P(X \geq 50) = 1 - P(X < 50)$$
# + pycharm={"is_executing": false}
1 - sum(poisson_proba(k=i, lambda_=lambda_) for i in range(50))
# -
# __Другие дискретные распределения__
#
# * __Дискретное равномерное__: случайная величина $X$ принимает $n$ различных значений с одинаковой вероятностью $1 / n$. Не путать с _непрерывным равномерным_.
# * __Геометрическое__: число независимых испытаний до первого наступления события $A$, где событие $A$ наступает в однократном испытании с вероятностью $p$. В этом случае
# $$P(X = k) = (1 - p)^k \cdot p$$
# ### Упражнения
# __Задача 1__
#
# Контрольная работа состоит из пяти вопросов. На каждый вопрос приведено четыре варианта ответа, один из которых правильный. Составьте закон распределения числа правильных ответов при простом угадывании. Найдите `M(X)`, `D(X)`.
#
# __Задача 2__
#
# Пользователь получает в среднем 10 писем со спамом на свой почтовый ящик за сутки. Найти число `N`, такое, что с вероятностью 0.95 пользователь получит не более `N` писем со спамом за текущий день.
#
# __Задача 3__
#
# Производятся выстрелы по мишени. Вероятность попадания в мишень при одном выстреле равна 0.01. Сколько выстрелов нужно сделать чтобы быть уверенным с вероятностью 0.9, что хотя бы 6 раз будет совершено попадание?
#
# _Подсказка_. Здесь предстоит немножко покодить.
# + pycharm={"is_executing": false}
| practice2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inferential statistics:
# - ## Refers to methods that rely on _probability theory_ and _distributions_
# - ## Often used to _predict population values_ based on sample data
#
# - # What is a _distribution_?
# - ## A distribution is a function that shows the possible values for a variable and the frequency of their occurence.
# - ## The distribution of an event consists not only of the input values that can be observed but is made up of all the possible values
# - ## The distribution is defined by the underlying probabilities, _not_ its graphical/visual representation
# - ## In inferences, _continuous_ distributions are more common than _discrete_ distributions
# - ## In statistics, generally a _"distribution"_ refers to a _"probability distribution"_
# - ## Distribution = _Probability distribution_
# - ## _Normal_ (Gaussian) distribution
# - ## $N$~$(\mu,\sigma^2)$
# - ### _Controlling for the standard deviation_ moves the curve along the x-axis left/right
# - ### _Controlling for the mean_ decreases/increases the standard deviation and tightens/flattens the curve
# - ### Resembles a bell curve
# - ### Symmetrical
# - ### Mean = median = mode
# - ### Approximate a wide variety of random variables
# - ### Distributions of sample means with large enough sample sizes could be approximated to normal
# - ### All computable statistics are elegant
# - ### Decisions based on normal distribution insights have a good track record
#
# - ## _Standard normal distribution (z)_ is a particular case of the Normal distribution
# - ## Every normal distribution can be _'standardized'_
# - ## If you take a dataset, subtract its mean from each data point and then calculate the mean once again, you will get 0.
# - ## $N$~$(\mu,\sigma^2)$ -> standardizing function -> $N$~$(0,1)$
# - ## $StandardizedVariable = \frac{OrigVariable-Mean}{Stdev}$
# - ## $zScore = \frac{x-\mu}{\sigma}$
# - ## _Binomial_ distribution - skewed left/right
# - ## _Uniform_ distribution - values are all the same
#
#
# - # _Point estimates_
#
# - # _Confidence intervals_
#
# - # _The Central Limit Theorem_
# (One of the most important concepts in statistics)
# - ## Given a dataset:
# - ### No matter the distribution of the entire dataset,
# - ### The more samples you extract ($k\rightarrow\infty$),
# - ### The bigger the samples ($n\rightarrow\infty$),
# - ### The means of the samples you took from the entire dataset will approximate a normal distribution
#
#
# - # _The Standard error_
# (widely used and VERY important)
# - ## The _standard deviation_ of the _distribution_ formed by the _sample means_
# - ## $Stdev = \sqrt{\frac{\sigma^2}{n}} = \frac{\sigma}{\sqrt{n}}$
# - ## Standard error decreases $\frac{\sigma}{\sqrt{n}}\downarrow$ when sample size $n\uparrow$
# - ## Bigger samples give a better approximation of the population
| udemy/statistics-for-data-science-and-business-analysis/my_progress/notes/Section 6 - Introduction to Inferential Statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import os
import glob
import sys
sys.path.insert(0,'../../..')
from sed_endtoend.pcen.model import SMel_PCEN, concatenate_stft
from sed_endtoend.cnn.model import build_custom_cnn
from params import *
os.environ["CUDA_VISIBLE_DEVICES"]="1"
# files parameters
Nfiles = None
resume = False
load_subset = Nfiles
# +
sequence_frames = 44
model_mel = SMel_PCEN(mel_bands,sequence_frames,audio_win,audio_hop)
model_cnn = build_custom_cnn(n_freq_cnn=mel_bands, n_frames_cnn=sequence_frames,large_cnn=large_cnn)
model = concatenate_stft(sequence_frames,513,model_cnn,model_mel)
model.load_weights("../03_train_SMel_CNN/weights_best.hdf5")
model_mel.summary()
filters = model_mel.layers[1].get_weights()[0]
print(filters.shape)
b = model_mel.layers[2].get_weights()[0]
print(b.shape)
[alpha, delta, r] = model_mel.layers[3].get_weights()
print(alpha.shape)
print(delta.shape)
print(r.shape)
np.save('filters.npy',filters)
np.save('b.npy',b)
np.save('alpha.npy',alpha)
np.save('delta.npy', delta)
np.save('r.npy',r)
| exps/08_PCEN/04_visualize/extract_weights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Compute Velocity along Z
# > Compute Velocity along Z
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [openFoam]
# - image: images/chart-preview.png
# +
import os
import glob
import numpy as np
from math import sqrt, log
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from collections import OrderedDict
linestyles = OrderedDict(
[('solid', (0, ())),
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 5))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
def ustar(uref, zref, z0):
"""Calculate friction velocity for a neutral ABL logartithmic profile."""
return KARMAN * uref / log((zref + z0) / z0)
def u_z(z, z0, zref, uref):
frac = ustar(uref, zref, z0)/ KARMAN
_ = frac * np.log(z/ z0)
return _.reshape(-1, 1)
# +
UREF = 3
ZREF = 5
KARMAN = 0.4
Cmu = 0.09
roughnessList = [0.001, 0.03,0.10, 0.25, 0.50, 1, 2]
Z = np.arange(1, 100, 2)
U_Z = np.zeros((Z.shape[0],len(roughnessList)))
for i, Z0 in enumerate(roughnessList):
U_Z[:, i] = u_z(Z, Z0, ZREF, UREF)[:, 0]
U_Z.shape, Z.shape
# -
linestyles.keys()
fig, (ax1) = plt.subplots(1, 1, figsize=(10,10))
ax1.grid(True, which = "major", axis = "both")
ln_styles = ['solid', 'loosely dotted', 'dotted', 'densely dotted', 'loosely dashed',\
'dashed', 'densely dashed','loosely dashdotted', 'dashdotted', 'densely dashdotted',\
'loosely dashdotdotted', 'dashdotdotted','densely dashdotdotted']
for j in range(U_Z.shape[1]):
ax1.plot(U_Z[:, j], Z, label='z0 = '+str(roughnessList[j])+' m', color = "k", ls = linestyles[ln_styles[j]])
# add an ellipse
patches = []
ellipse = mpatches.Rectangle((-2, 0), 2, 4,color = 'r')
#patches.append(ellipse)
ax1.add_patch(ellipse)
ax1.legend(loc='upper left', shadow=True)
ax1.set_ylabel('Height above ground [m]')
ax1.set_title("Wind speed [m/s]")
ax1.set_ylim((0,100))
ax1.set_xlim((-2,10))
ax1.annotate('Model breaks for high roughness', xy=(-1.0, 3.5), xytext=(-1.0, 15),
arrowprops=dict(facecolor='red', edgecolor='red', shrink=0.05), color = 'red')
#plt.show()
plt.savefig("vel_Uz.png", dpi = 300)
| _notebooks/2021-03-04_vel_Uz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nltk
# +
paragraph = """Thank you all so very much. Thank you to the Academy.
Thank you to all of you in this room. I have to congratulate
the other incredible nominees this year. The Revenant was
the product of the tireless efforts of an unbelievable cast
and crew. First off, to my brother in this endeavor, Mr. Tom
Hardy. Tom, your talent on screen can only be surpassed by
your friendship off screen … thank you for creating a t
ranscendent cinematic experience. Thank you to everybody at
Fox and New Regency … my entire team. I have to thank
everyone from the very onset of my career … To my parents;
none of this would be possible without you. And to my
friends, I love you dearly; you know who you are. And lastly,
I just want to say this: Making The Revenant was about
man's relationship to the natural world. A world that we
collectively felt in 2015 as the hottest year in recorded
history. Our production needed to move to the southern
tip of this planet just to be able to find snow. Climate
change is real, it is happening right now. It is the most
urgent threat facing our entire species, and we need to work
collectively together and stop procrastinating. We need to
support leaders around the world who do not speak for the
big polluters, but who speak for all of humanity, for the
indigenous people of the world, for the billions and
billions of underprivileged people out there who would be
most affected by this. For our children’s children, and
for those people out there whose voices have been drowned
out by the politics of greed. I thank you all for this
amazing award tonight. Let us not take this planet for
granted. I do not take tonight for granted. Thank you so very much."""
paragraph
# -
#first we will tokenize all different sentences and then preprocess (removing all impurities)
import re
#importing regular expression library for preprocessing
dataset = nltk.sent_tokenize(paragraph)
dataset
for i in range(len(dataset)):
dataset[i] = dataset[i].lower() #converting everything to lower case
dataset[i] = re.sub(r'\W',' ',dataset[i]) #this will substitute all the non word characters with space
dataset[i] = re.sub(r'\s+',' ',dataset[i]) #so this wherever there will be 1 or more space this will subtitute it
#with single space
dataset
#now this data is ready to create the BOW model
# +
#Creating the histogram
#so first we need a list/dictionary of all the different words mapped with no. of times those words appear
word2count = {} #this is how you declare a dictionary in python
for data in dataset:
words = nltk.word_tokenize(data) #this will tokenize every sentence into words
for word in words: #nested loop
if word not in word2count.keys():
word2count[word] = 1 #this mean this word have not appeared in any of the previous sentences
else:
word2count[word] += 1 #if the word is already in the dictionary we are simply going to increase the count
#so in this way we will get all the different words mapped with no. of times they occured in the whole corpus of doc
print(word2count)
# -
#now we will filter the words
import heapq
#importing the heapq library to find n most frequent words out of the whole dictionary
freq_words = heapq.nlargest(100, word2count, key=word2count.get)
#it will get us 100 most frequent words from dictionary the third parameter will take that based on which key are we
#going to find the n largest here we will just pass word2count.get
print(freq_words)
# +
#Now we will build the BOW model
X = [] #this will contain the whole BOW model
#now in each BOW model each document is represented as a vector of 0's and 1's
for data in dataset:
vector = [] #this is going to contain the complete vector for each of the documents
for word in freq_words: #to check which of the frequent words are in the document
if word in nltk.word_tokenize(data): #if word is in tokenized document
vector.append(1)
else:
vector.append(0)
X.append(vector)
#X is list of lists
print(X)
#here X contains 21 elements i.e., 21 documents and each of those elements is a list and the size of the list is 100.
# -
#Now we are going to create a 2d array from this to easily visualize and we can do it with numpy
import numpy as np
#we will store the 2d array in the same variable i.e., X
X = np.asarray(X)
X
print(X)
| bow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.10 64-bit (''PythonDataNew'': conda)'
# name: python_defaultSpec_1604377063745
# ---
# +
# %matplotlib inline
#Import dependecies
import pandas as pd
import seaborn as sns
import numpy as np
import random
import matplotlib.pyplot as plt
import os
import requests
import json
# -
# import api_keys
from api_keys import weather_api_key,g_key
#import the data in a df
path = "Resources/data/worldcities.csv"
cities_data = pd.read_csv(path)
cities_data.head()
# + tags=[]
#Random choice of cities
bins = np.arange(-60,90,10)
labels = ["<-50","-50:-41","-40:-31","-30:-21","-20:-11","-10:-0",
"1:10","11:20","21:30","31:40","41:50","51:60","61:70","70>"]
cities_data["Cuts"] = pd.cut(cities_data["Latitude"],bins = bins, labels = labels, include_lowest = True)
#choose randomly in the quartiles selected
cities_random = []
random.seed(15)
for cut in labels:
grouped = cities_data[cities_data["Cuts"] == cut]
if len(list(grouped["City"]))<=45:
for city in list(grouped["City"]):
cities_random.append(city)
else :
samples = random.sample(list(grouped["City"]),50)
for city in samples:
cities_random.append(city)
print(f"There are {len(cities_random)} random chosen cities")
# + tags=["outputPrepend"]
#use the API to find the information needed
import concurrent.futures
import time
#create the base url
base_url = f"http://api.openweathermap.org/data/2.5/weather?appid={weather_api_key}&units=imperial&q="
#create empty lists
cities_dic = {}
print("Beginning Data Retrieval")
print("-"*15)
# define a function to retrieve the data
def download_data(city):
time.sleep(1)
response = requests.get(base_url + str(city)).json()
try:
cities_dic[city] = response["id"],response["sys"]["country"],\
response["name"],response["dt"],response["coord"]["lat"],\
response["coord"]["lon"],response["clouds"]["all"],\
response["main"]["humidity"],response["wind"]["speed"],\
response["main"]["temp_max"]
print(f"Processing Record of {city} \n")
except:
print(f"{city} not found, Skipping .. \n")
# make the download faster
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(download_data,cities_random)
print("-"*15)
print("Data Retrieve Complete")
print("-"*15)
# + tags=[]
# Create the df from the dict
cities_df = pd.DataFrame(cities_dic).T.rename(
{0 : "City_ID", 1 : "Country", 2:"City",3: "Date",\
4:"Lat",5:"Long",6: "Cloudiness",7:"Humidity",\
8:"Wind_Speed",9:"Temp_Max" },axis=1).\
reset_index(drop = True)
# Drop duplicates if duplicated rows
print(f'{(cities_df[cities_df.duplicated()==True]).count().count()} duplicates')
cities_df.drop_duplicates(inplace=True)
# -
#create the output files and export the df
output_csv = 'Resources/data/cities_data.csv'
output_html = 'Resources/cities_data.html'
outputs = [output_csv,output_html]
for file in outputs:
f = open(file,'w')
f.close()
# export df to to csv and html
cities_df.to_csv(output_csv,encoding = "UTF-8",index = False, header=True)
cities_df.to_html(output_html,columns = cities_df.columns,index = False, header=True)
# + tags=[]
# Remove the cities with >100% humidity
print(f'There are {cities_df[cities_df["Humidity"]>100]["City"].count()} city with a humidity over 100%')
# -
#plot latitude vs temperature
from datetime import datetime as dt
date = dt.strftime(dt.fromtimestamp(cities_df['Date'][0]),"%d-%m-%Y")
#plotting
plt.figure(figsize=(8,6))
sns.set_theme(style="darkgrid",palette="muted")
sns.scatterplot(x=cities_df['Lat'], y=cities_df['Temp_Max'])
sns.despine(offset=10, trim=True)
plt.ylabel('Max Temperature (°F)')
plt.title(f'Temperature vs latitude on the {date}', fontsize=15, pad=20)
plt.tight_layout()
#save the graph
path = "Resources/assets/temperature.png"
f = open(path,'w')
plt.savefig(path,facecolor=(1,1,1,0))
f.close()
#plot lat vs humidity
plt.figure(figsize=(8,6))
sns.set_theme(style="darkgrid",palette="muted")
sns.scatterplot(x=cities_df['Lat'], y=cities_df['Humidity'])
sns.despine(offset=10, trim=True)
plt.ylabel('Humidity (%)')
plt.title(f'Humidity vs latitude on the {date}', fontsize=15, pad=20)
plt.tight_layout()
#save the graph
path = "Resources/assets/humidity.png"
f = open(path,'w')
plt.savefig(path,facecolor=(1,1,1,0))
f.close()
#plot lat vs cloudiness
plt.figure(figsize=(8,6))
sns.set_theme(style="darkgrid",palette="muted")
sns.scatterplot(x=cities_df['Lat'], y=cities_df['Cloudiness'])
sns.despine(offset=10, trim=True)
plt.ylabel('Cloudiness (%)')
plt.title(f'Cloudiness vs latitude on the {date}', fontsize=15, pad=20)
plt.tight_layout()
#save the graph
path = "Resources/assets/cloudiness.png"
f = open(path,'w')
plt.savefig(path,facecolor=(1,1,1,0))
f.close()
#plot lat vs Wind speed
plt.figure(figsize=(8,6))
sns.set_theme(style="darkgrid",palette="muted")
sns.scatterplot(x=cities_df['Lat'], y=cities_df['Wind_Speed'])
sns.despine(offset=10, trim=True)
plt.ylabel('Wind Speed (mph)')
plt.title(f'Wind speed vs latitude on the {date}', fontsize=15, pad=20)
plt.tight_layout()
#save the graph
path = "Resources/assets/wind_speed.png"
f = open(path,'w')
plt.savefig(path,facecolor=(1,1,1,0))
f.close()
| mainanalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from htools import *
from incendio.layers import mish
from incendio.utils import get_einops_image, plot_img_batch
# -
cd_root()
bs = 2
n_seq = 4
n_emb = 8
x = torch.randn(bs, n_seq, n_emb)
x.shape
lin = nn.Linear(n_emb, n_emb*3)
qkv = lin(x)
qkv.shape
q, k, v = qkv.chunk(3, dim=-1)
smap(q, k, v)
qk = q @ k.transpose(-2, -1)
qk.shape
w = F.softmax(qk/np.sqrt(k.shape[-1]), dim=-1)
w.shape
w.shape, v.shape
w
w.sum(-1)
v
res = w @ v
res.shape
# + run_control={"marked": false}
class Projector(nn.Module):
"""Project input into multiple spaces. Used in DotProductAttention to
generate queries/keys/values.
"""
def __init__(self, n_in, n_out_single=None, spaces=3):
"""
Parameters
----------
n_in: int
Size of input feature dimension, where input is (bs, n_in) or
(bs, seq_len, n_in). If the latter, this ONLY transforms the last
dimension. If you want to take multiple dimensions of information
into account simultaneously, you can flatten the input prior to
passing it in.
n_out_single: int or None
This determines the size of the feature dimension in each new
space. By default, this will be the same as n_in.
spaces: int
Number of spaces to project the input into. Default is 3 because
we commonly use this to generate queries, keys, and values for
attention computations.
"""
super().__init__()
self.spaces = spaces
self.n_in = n_in
self.n_out_single = n_out_single or n_in
self.spaces = spaces
self.n_out = self.n_out_single * self.spaces
self.fc = nn.Linear(self.n_in, self.n_out)
def forward(self, x):
"""
Parameters
----------
x: torch.Tensor
Shape (bs, n_in) or (bs, seq_len, n_in).
Returns
-------
tuple[torch.Tensor]: Tuple of `spaces` tensors where each tensor has
shape (bs, n_out_single) or (bs, seq_len, n_out_single), depending on
the input shape.
"""
return self.fc(x).chunk(self.spaces, dim=-1)
# -
# ## Detour: testing fc projector vs conv projector
#
# NOTE: nn.Linear only acts on last dimension so if we pass in input of shape (bs, seq_len, emb_dim), we're not really using the seq dimension here. Maybe refresh memory of 1d conv to see if that would take both into account in the way I want. Otherwise we could do some reshaping before feeding to linear layer.
#
# UPDATE: Now that I think about this, maybe this behavior is desirable. At this stage we're not looking to take info from other time steps into account: we're just looking to get 3 different representations of EACH time step separately, and attention will be used to blend this information.
img = get_einops_image()
img_bw = get_einops_image(color=False)
img.shape, img_bw.shape
plot_img_batch(img)
plot_img_batch(img_bw)
# +
proj = Projector(*img_bw.shape[1:], 3)
with torch.no_grad():
proj.fc.weight.data.fill_(2)
qkv = torch.cat(proj(img_bw), dim=0)
qkv.shape
# -
# Because we filled weights with constant, q/k/v are all equivalent for a
# single sample. Precision is low for some reason (e.g. tolerance of 1e-3
# finds differences).
for i in range(6):
print(i)
assert torch.isclose(qkv[i], qkv[i+6], 1e-2, 1e-2).float().mean() == 1
assert torch.isclose(qkv[i], qkv[i+12], 1e-2, 1e-2).float().mean() == 1
plot_img_batch(qkv)
conv = nn.Conv1d(img_bw.shape[-1], img_bw.shape[-1]*2,
kernel_size=3, padding=1)
with torch.no_grad():
conv.weight.data.fill_(2)
# bw_out = conv(img_bw.permute(0, 2, 1))
bw_out = conv(img_bw)
bw_out.shape
plot_img_batch(bw_out)
bw_out.shape[1]
fc = nn.Linear(img_bw.shape[-1] * img_bw.shape[-2],
img_bw.shape[-1] * img_bw.shape[-2] // 4)
with torch.no_grad():
fc.weight.data.fill_(2)
bw_out = fc(img_bw.view(img_bw.shape[0], -1))
plot_img_batch(
bw_out.view(bw_out.shape[0], int(np.sqrt(bw_out.shape[1])), -1)
)
class DotProductAttention(nn.Module):
"""GPT2-style attention block. This was mostly an intuition-building
exercise - in practice, Huggingface provides layers that should probably
be used instead.
"""
def __init__(self, n_in, n_out=None, nf=None, n_heads=12,
temperature='auto', p1=0.1, p2=0.1, return_attn=False):
"""
Parameters
----------
n_in: int
Last dimension of input, usually embedding dimension.
n_out: int or None
Size of output vectors. By default, this will be the same as the
input.
nf: int or None
Size ("nf = number of features") of queries/keys/values.
By default, this will be the same as n_in. Must be divisible by
n_heads.
n_heads: int
Number of attention heads to use. nf must be divisible
by this as each projected vector will be divided evenly among
each head.
temperature: str or float
If str, must be "auto", meaning softmax inputs will be scaled by
sqrt(n_proj_single). You can also specify a float, where values
<1 sharpen the distribution (usually not what we want here) and
values greater than one soften it (allowing attention head to
route more information from multiple neurons rather than almost
all from one).
p1: float
Value in (0.0, 1.0) setting the dropout probability on the
attention weights.
p2: float
Value in (0.0, 1.0) setting dropout probability following the
output layer.
return_attn: bool
If True, the `forward` method will return a tuple of
(output, attention_weights) tensors. If False (the default), just
return the output tensor.
"""
super().__init__()
nf = nf or n_in
n_out = n_out or n_in
assert nf % n_heads == 0, \
'n_proj_single must be divisible by n_heads'
self.proj_in = Projector(n_in, nf, spaces=3)
# Reshape so hidden dimension is split equally between each head.
self.head_splitter = Rearrange('bs seq (heads f) -> bs heads seq f',
heads=n_heads)
self.soft = SmoothSoftmax(temperature)
self.drop_attn = nn.Dropout(p1)
# Concatenate output of each head.
self.head_merger = Rearrange('bs heads seq f -> bs seq (heads f)')
self.fc_out = nn.Linear(nf, n_out)
self.drop_out = nn.Dropout(p2)
# Non-layer attributes.
self.n_heads = n_heads
self.temperature = temperature
self.p1 = p1
self.p2 = p2
self.return_attn = return_attn
def forward(self, x):
"""
Parameters
----------
x: torch.Tensor
Shape (bs, seq_len, n_in). n_in will usually be the sum of
embedding dimensions (word and positional). For other problems
(e.g. web browsing sequence classificaiton), this might include
other features about the page at time step T.
"""
q, k, v = map(self.head_splitter, self.proj_in(x))
scores = q @ k.transpose(-2, -1)
weights = self.drop_attn(self.soft(scores))
x = weights @ v
x = self.head_merger(x)
x = self.drop_out(self.fc_out(x))
return (x, weights) if self.return_attn else x
print(x.shape)
z = attn.head_splitter(x)
print(z.shape)
merger = Rearrange('bs heads seq f -> bs seq (heads f)')
merger(z).shape
# +
bs = 2
seq_len = 6
n_emb = 768
n_heads = 12
x = torch.randn(bs, seq_len, n_emb)
print(x.shape)
attn = DotProductAttention(n_emb, n_heads=n_heads)
res = attn(x)
print(res.shape)
# -
attn = DotProductAttention(n_emb, n_heads=3)
res = attn(x)
res.shape
class FanForward(nn.Module):
"""Fan out to a larger dimension and back in to the original space.
Not fully clear on why this is necessary or useful but we apparently need
it for our GPT2-esque decoder.
"""
def __init__(self, n_in, n_hid=None, scalar=4, p=0.1, act=mish):
"""
Parameters
----------
n_in: int
Number of input features. x will typically have shape
(bs, seq_len, n_in).
n_hid: int or None
If int, this will be the size of the output of the first linear
layer. If None, hidden_scalar will be used to determine this.
scalar: int
In practice, we often want our hidden layer to produce outputs
of n_in*n where n is some semi-arbitrary scalar. If n_hid is None,
this scalar will be used to compute n_hid from n_in.
p: float
Dropout probability. Value in (0.0, 1.0). This is applied after
the output layer but not the input layer for some reason.
act: nn.Module or function
Callable activation function. This will be used after both the
first and second linear layers.
"""
super().__init__()
n_hid = ifnone(n_hid, n_in * scalar)
self.fc1 = nn.Linear(n_in, n_hid)
self.fc2 = nn.Linear(n_hid, n_in)
self.act = act
self.drop = nn.Dropout(p)
def forward(self, x):
"""
Parameters
----------
x: torch.Tensor
Shape (bs, n_in) or (bs, seq_len, n_in).
Output
------
torch.Tensor: Same shape as input tensor.
"""
x = self.act(self.fc1(x))
return self.drop(self.act(self.fc2(x)))
ff = FanForward(24, 2)
ff(res).shape
class TransformerDecoder(nn.Module):
"""GPT2-esque decoder network. This was mostly an intuition-building
exercise - in practice, it's probably best to just use something from
Huggingface.
"""
def __init__(self, n_in=768, n_heads=12):
super().__init__()
self.attn = DotProductAttention(n_in, n_heads=n_heads)
self.ff = FanForward(n_in)
self.norm_1 = nn.LayerNorm(n_in)
self.norm_2 = nn.LayerNorm(n_in)
def forward(self, x):
x += self.attn(self.norm_1(x))
return x + self.ff(self.norm_2(x))
FanForward(5, 6)
ff = FanForward(n_emb, 4)
ff(x).shape
decoder = TransformerDecoder()
decoder
x.shape
decoder(x).shape
from transformers.modeling_gpt2 import Attention
config = Args(n_head=1, attn_pdrop=.1, resid_pdrop=.1, n_ctx=n_emb*2,
n_layer=1, n_embd=n_emb, layer_norm_epsilon=1e-3,
output_attentions=False)
attn_gpt = Attention(n_emb, n_emb*2, config)
res_gpt = attn_gpt(x)
len(res_gpt)
res_gpt[-2].shape
attn_gpt.c_attn(x).shape
# +
# FROM BLOG POST
class Conv1D(nn.Module):
def __init__(self, nx, nf):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class FeedForward(nn.Module):
def __init__(self, dropout, d_model=768, nx=768*4):
super().__init__()
self.c_fc = Conv1D(d_model, nx)
self.c_proj = Conv1D(nx, d_model)
self.act = F.gelu
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.act(self.c_fc(x))
print(x.shape)
return self.dropout(self.c_proj(x))
# -
FeedForward(0, n_emb, n_emb*4)(x).shape
w.shape, v.shape
z = torch.randn(bs, n_seq, n_emb, n_emb)
z.shape
lin2 = nn.Linear(3, 5)
lin2(z).shape
lin2.weight
z.dim()
| scratch_notebooks/annotated-gpt2-blogpost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# To Read,display the image
import cv2
cheetha_img = cv2.imread('pic1.jpg',1)
cv2.imshow('cheetha',cheetha_img)
cv2.waitKey(0)
destroyAllWindows()
import cv2
cheetha_img = cv2.imread('pic1.jpg',1)
cv2.imwrite('new image.jpg',cheetha_img)
import cv2
cheetha_img = cv2.imread('pic1.jpg',1)
print(cheetha_img.shape)
import random
for i in range(100):
for j in range(cheetha_img.shape[1]):
cheetha_img[i][j] = [random.randint(0,255),random.randint(0,255),random.randint(0,255)]
cv2.imshow('cheetha',cheetha_img)
cv2.waitKey(0)
destroyAllWindows()
import cv2
panda_image = cv2.imread('pic1.jpg',-1)
tag = cheetha_image[300:400,300:400]
cheetha_image[50:150,50:150] = tag
cv2.imshow('cheetha',cheetha_image)
cv2.waitKey(0)
destroyAllWindows()
| Exp_1_DIP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Imports
# %matplotlib inline
import quandl
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import collections
from collections import defaultdict
# Get price data from Quandl
bitcoin = quandl.get("BCHAIN/MKPRU")
bitcoin = bitcoin.shift(-1) # data set has daily open, we want daily close
bitcoin = bitcoin.loc['2011-01-01':] # Remove the 0's
bitcoin.columns = ['Last']
bitcoin['RollingRet'] = (bitcoin['Last'].shift(-365) / bitcoin['Last'] - 1) * 100
ax = bitcoin.loc['2011':'2018','RollingRet'].plot(figsize=(14,10))
ax.set_ylabel("% return over next 365 days")
plt.axhline(y=0, color='r', linestyle='-');
# Count days each year where return over next 365 days would have been negative
neg_return = bitcoin[bitcoin['RollingRet'] < 0 ]
neg_return['Last'].groupby(neg_return.index.year).count().to_frame().rename(columns={'Last':'Count'})
# +
# Get start and end dates and see what was happening with price at those times
print neg_return['2011'].index[0], neg_return.loc['2011','Last'].iloc[0]
print neg_return['2011'].index[-1], neg_return.loc['2011','Last'].iloc[-1]
print neg_return['2013'].index[0], neg_return.loc['2013','Last'].iloc[0]
print neg_return['2014'].index[-1], neg_return.loc['2014','Last'].iloc[-1]
print neg_return['2017'].index[0], neg_return.loc['2013','Last'].iloc[0]
# -
fig, ax = plt.subplots(figsize=(14,10))
ax.plot(bitcoin.loc['2011':'2012','Last'].index, bitcoin.loc['2011':'2012','Last'])
#ax.plot([neg_return['2011'].index[0],neg_return['2011'].index[-1]], [neg_return.loc['2011','Last'].iloc[0],neg_return.loc['2011','Last'].iloc[-1]], 'x', color='Red', markersize=8)
ax.plot(bitcoin.loc[neg_return['2011'].index[0]:neg_return['2011'].index[-1]].index, bitcoin.loc[neg_return['2011'].index[0]:neg_return['2011'].index[-1],'Last'], color='#CB4335');
fig, ax = plt.subplots(figsize=(14,10))
ax.plot(bitcoin.loc['2013':'2016-05','Last'].index, bitcoin.loc['2013':'2016-05','Last'])
#ax.plot([neg_return['2013'].index[0],neg_return['2014'].index[-1]], [neg_return.loc['2013','Last'].iloc[0],neg_return.loc['2014','Last'].iloc[-1]], 'x', color='Red', markersize=8)
ax.plot(bitcoin.loc[neg_return['2013'].index[0]:neg_return['2014'].index[-1]].index, bitcoin.loc[neg_return['2013'].index[0]:neg_return['2014'].index[-1],'Last'], color='#CB4335');
fig, ax = plt.subplots(figsize=(14,10))
ax.plot(bitcoin.loc['2017':'2018','Last'].index, bitcoin.loc['2017':'2018','Last'])
#ax.plot([neg_return['2013'].index[0],neg_return['2014'].index[-1]], [neg_return.loc['2013','Last'].iloc[0],neg_return.loc['2014','Last'].iloc[-1]], 'x', color='Red', markersize=8)
ax.plot(bitcoin.loc[neg_return['2017'].index[0]:'2018-11-13'].index, bitcoin.loc[neg_return['2017'].index[0]:'2018-11-13','Last'], color='#CB4335');
| bitcoin_rolling_returns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Cabeçalho
print('\n********** Python Calculator **********')
# +
# Funções
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
# -
#Opções da calculadora
print('\n Selecione o número com a operação desejada: \n')
print('1 - Soma')
print('2 - Subtração')
print('3 - Multiplicação')
print('4 - Divisão')
# +
escolha = input('\nDigite sua opção (1/2/3/4): ')
num1 = int(input('\nDigite o primeiro número: '))
num2 = int(input('\nDigite o segundo número: '))
if escolha == '1':
print('\n')
print(num1, '+', num2, '=', add(num1, num2))
print('\n')
elif escolha == '2':
print('\n')
print(num1, '-', num2, '=', subtract(num1, num2))
print('\n')
elif escolha == '3':
print('\n')
print(num1, '*', num2, '=', multiply(num1, num2))
print('\n')
elif escolha == '4':
print('\n')
print(num1, '/', num2, '=', divide(num1, num2))
print('\n')
else:
print('\n Opção inválida!')
# -
| CAP-03/Calculadora.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + language="html"
# <h1>NumPy</h1>
# -
from pathlib import Path
from IPython.display import HTML
import numpy as np
from numpy.random import randn
data = [[0.9526, -0.246, -0.8856], [0.5639, 0.2379, 0.9104]]
data = np.array(data)
print(data)
data * 10
data + data
data.shape
data.dtype
data1 = [6, 7.5, 8, 0, 1]
arr1 = np.array(data1)
arr1
data2 = [[1, 2, 3, 4], [5, 6, 7, 8]]
arr2 = np.array(data2)
arr2
arr2.ndim
arr2.shape
arr1.dtype
arr2.dtype
np.zeros(10)
np.zeros((3, 6))
np.empty((2, 3, 2))
np.arange(15)
# + language="html"
# <h2>Array Creation Functions</h2>
# <table width="100%">
# <tr><td width="20%"><code>array</code></td>
# <td>Convert input data (<code>list</code>, <code>tuple</code>, <code>array</code>, or other
# sequence type) to an <code>ndarray</code> either by inferring a <code>dtype</code> or
# explicitly specifying a <code>dtype</code>. Copies the input data by default.</td></tr>
# <tr><td width="20%"><code>asarray</code></td>
# <td>Convert input to <code>ndarray</code>, but do not copy if the input is already an
# <code>ndarray</code>.</td></tr>
# <tr><td width="20%"><code>arange</code></td>
# <td>Like the built-in <code>range</code> but returns an <code>ndarray</code> instead of a
# <code>list</code>.</td></tr>
# <tr><td><code>ones, ones_like</code></td>
# <td>Produce an array of all <code>1</code>'s with the given shape and <code>dtype</code>.
# <code>ones_like</code> takes another array and produce a ones array of the same shape and
# <code>dtype</code>.</td></tr>
# <tr><td><code>zeros, zeros_like</code></td>
# <td>Like <code>ones</code> and <code>ones_like</code> but producing arrays of
# <code>0</code>'s instead.</td></tr>
# <tr><td width="20%"><code>empty, empty_like</code></td>
# <td>Create new arrays by allocating new memory, but do not populate with any values like
# <code>ones</code> and <code>zeros</code>.</td></tr>
# <tr><td><code>eye, identity</code></td>
# <td>Create a square <code>NxN</code> identity matrix (<code>1</code>'s on the diagonal and
# <code>0</code>'s elsewhere).</td></tr>
# </table>
# + language="html"
# <h2>Data Types</h2>
# -
arr1 = np.array([1, 2, 3], dtype=np.float64)
arr2 = np.array([1, 2, 3], dtype=np.int32)
print("Array 1 data type:", arr1.dtype)
print("Array 2 data type:", arr2.dtype)
# + language="html"
# <table width="100%">
# <thead><th width="25%">Type</th><th width="15%">Type Code</th><th>Description</th></thead>
# <tr><td><code>int8, uint8</code></td><td><code>i2, u2</code></td>
# <td>Signed and unsigned 8-bit (1 byte) integers</td></tr>
# <tr><td><code>int16, uint16</code></td><td><code>i2, u2</code></td>
# <td>Signed and unsigned 16-bit integers</td></tr>
# <tr><td><code>int32, uint32</code></td><td><code>i4, u4</code></td>
# <td>Signed and unsigned 32-bit integers</td></tr>
# <tr><td><code>int64, uint64</code></td><td><code>i8, u8</code></td>
# <td>Signed and unsigned 32-bit integers</td></tr>
# <tr><td><code>float16</code></td><td><code>f2</code></td>
# <td>Half-precision floating point</td></tr>
# <tr><td><code>float32</code></td><td><code>f4 or f</code></td>
# <td>Standard single-precision floating point. Compatible with C <code>float</code>.</td></tr>
# <tr><td><code>float64, float128</code></td><td><code>f8 or d</code></td>
# <td>Standard double-precision floating point. Compatible with C <code>double</code> and
# Python <code>float</code> object.</td></tr>
# <tr><td><code>float128</code></td><td><code>f16 or g</code></td>
# <td>Extended-precision floating point</td></tr>
# <tr><td><code>complex64, complex128, complex256</code></td><td><code>c32</code></td>
# <td>Complex numbers represented by two 32, 64 or 128 floats, respectively</td></tr>
# <tr><td><code>bool</code></td><td><code>?</code></td>
# <td>Boolean type storing <code>True</code> and <code>False</code> values.</td></tr>
# <tr><td><code>object</code></td><td><code>0</code></td>
# <td>Python <code>object</code> type</td></tr>
# <tr><td><code>string_</code></td><td><code>S</code></td>
# <td>Fixed-length string type (1 byte per character). To create a string <code>dtype</code>
# with length 10, use <code>'S10'.</td></tr>
# <tr><td><code>unicode_</code></td><td><code>U</code></td>
# <td>Fixed-length Unicode type (number of bytes is platform-specific). Same specification
# semantics as <code>string_</code> (e.g. <code>'U10'</code>).</td></tr>
# </table>
# -
arr = np.array([1, 2, 3, 4, 5])
arr.dtype
float_arr = arr.astype(np.float64)
float_arr.dtype
arr = np.array([3.7, -1.2, -2.6, 0.5, 12.9, 10.1])
arr
arr.astype(np.int32)
numeric_strings = np.array(['1.25', '-9.6', '42'], dtype=np.string_)
numeric_strings.astype(float)
int_array = np.arange(10)
calibers = np.array([.22, .270, .357, .380, .44, .50], dtype=np.float64)
int_array.astype(calibers.dtype)
empty_uint32 = np.empty(8, dtype='u4')
empty_uint32
# + language="html"
# <p>Calling <code>astype</code> <em>always</em> creates a new array (a copy of the data),
# even if the new <code>dtype</code> is the same as the old <code>dtype</code>.</p>
# -
np.float64(42)
np.int8(42.0)
np.bool(42)
np.bool(0)
np.bool(42.0)
np.float(True)
np.float(False)
# + language="html"
# <h2>Operations Between Arrays and Scalars</h2>
# + language="html"
# <p>Any arithmetic operations between equal-size arrays applies the operation element-wise.
# </p>
# -
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
arr
arr * arr
arr - arr
# + language="html"
# <p>Arithmetic operations with arrays and scalars propagate to the value of each element:</p>
# -
1 / arr
arr ** 0.5
# + language="html"
# <h2>Basic Indexing and Slicing</h2>
# -
arr = np.arange(10)
arr
arr[5]
arr[5:8]
arr[5:8] = 12
arr
arr_slice = arr[5:8]
arr_slice[1] = 12345
arr
arr_slice[:] = 64
arr
# + language="html"
# <p>If you want a copy of a slice of an <code>ndarray</code> instead of a view, you will need
# to explicitly copy the array; for example <code>arr[5:8].copy()</code>.</p>
# -
arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
arr2d[2]
arr2d[0][2]
arr2d[0, 2]
# + language="html"
# <p>In multidimensional arrays, if you omit later indices, the returned object will be a
# lower-dimensional <code>ndarray</code> consisting of all the data along the higher
# dimensions. So in the 2 x 2 x 3 array <code>array3d</p>
# -
arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
arr3d
arr3d[0]
old_values = arr3d[0].copy()
arr3d[0] = 42
arr3d
arr3d[0] = old_values
arr3d
arr3d[1, 0]
arr3d.dtype.itemsize
np.dtype(float)
# + language="html"
# <h3>Indexing with Slices</h3>
# -
arr[1:6]
arr2d
arr2d[:2]
# + language="html"
# <p>A slice selects a range of elements along an axis. You can pass multiple slices.</p>
# -
arr2d[:2, 1:]
arr2d[1, :2]
arr2d[2, :1]
arr2d[:, :1]
arr2d[:2, 1:] = 0
arr2d
# + language="html"
# <h3>Boolean Indexing</h3>
# -
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
from numpy.random import randn
data = randn(7,4)
names
data
names == 'Bob'
data[names == 'Bob']
data[names == 'Bob', 2:]
data[names == 'Bob', 3]
names != 'Bob'
data[~(names == 'Bob')]
mask = (names == 'Bob') | (names == 'Will')
mask
data[mask]
# + language="html"
# <p>The Python keywords <code>and</code> and <code>or</code> do not work with Boolean arrays.
# </p>
# -
data[data < 0] = 0 # Set all the negative values to 0
data
data[names != 'Joe'] = 7
data
# + language="html"
# <h3>Fancy Indexing</h3>
# -
arr = np.empty((8, 4))
for i in range(8):
arr[i] = i
arr
arr[[4, 3, 0, 6]]
arr[[-3, -5, -7]]
arr = np.arange(32).reshape((8, 4))
arr
arr[[1, 5, 7, 2], [0, 3, 1, 2]] # Returns an array of [(a[1,0], a[5,3], a[7,1], a[2,2])]
# + active=""
# arr[np.loc([1, 5, 7, 2], [0, 3, 1, 2])] # This doesn't work right, apparently
# + language="html"
# <h2>Transposing Arrays and Swapping Axes</h2>
# -
arr = np.arange(15).reshape((3, 5))
arr
arr.T
arr = randn(6, 3)
np.dot(arr.T, arr)
# + language="html"
# <p>For higher-dimensional arrays, <code>transpose</code> will accept a tuple of axis numbers
# to permute the axes:
# -
arr = np.arange(16).reshape((2, 2, 4))
arr
arr.transpose((1, 0, 2))
arr
arr.swapaxes(1, 2)
# + language="html"
# <h2>Universal Functions</h2>
# -
arr = np.arange(10)
np.sqrt(arr)
np.exp(arr)
x = randn(8)
y = randn(8)
x
y
np.maximum(x, y) # Element-wise maximum
np.modf(arr)
# + language="html"
# <h3>Unary ufuncs</h3>
# <table width="100%">
# <thead><th width="20%">Function</th><th>Description</th></thead>
# <tr><td><code>abs, fabs</code></td>
# <td>Compute the absolute value element-wise for <code>int</code>, <code>float</code> or
# <code>complex</code> values. <code>fabs</code> is faster for non-complex data.</td></tr>
# <tr><td><code>sqrt</code></td>
# <td>Compute the square root of each element. Equivalent to <code>arr ** 0.5</code>.</td>
# </tr>
# <tr><td><code>square</code></td>
# <td>Compute the square of each element. Equivalent to <code>arr ** 2</code>.</td></tr>
# <tr><td><code>exp</code></td>
# <td>Compute the exponent $e^x$ of each element.</td></tr>
# <tr><td><code>log, log10, log2, log1p</code></td>
# <td>Natural logarithm (base-$e$), log base 10, log base 2 and log(1+x), respectively</td>
# </tr>
# <tr><td><code>sign</code></td>
# <td>Compute the sign of each element: 1 (positive), 0 (zero), or -1
# (negative)</td></tr>
# <tr><td><code>ceil</code></td>
# <td>Compute the ceiling of each element, i.e. the smallest integer greater than or equal to each
# element.</td></tr>
# <tr><td><code>floor</code></td>
# <td>Compute the floor of each element, i.e. the largest integer less than or equal to each
# element</td></tr>
# <tr><td><code>rint</code></td>
# <td>Round elements tot he nearest integer, preserving the <code>dtype</code></td></tr>
# <tr><td><code>modf</code></td>
# <td>Return fractional and integral parts of array as separate arrays</td></tr>
# <tr><td><code>isfinite, isinf</code></td>
# <td>Return Boolean array indicating whether each element is finite (non-inf, non-NaN) or
# infinite, respectively</td></tr>
# <tr><td><code>cos, cosh, sin, sinh, tan, tanh</code></td>
# <td>Regular and hyperbolic trigonometric functions</td></tr>
# <tr><td><code>arccos, arccosh, arcsin, arcsinh, arctan, arctanh</code></td>
# <td>Inverse trigonometric functions</td></tr>
# <tr><td><code>logical_not</code></td>
# <td>Compute truth value of not <code>x</code> element-wise. Equivalent to <code>~arr</code>
# </td></tr>
# </table>
# + language="html"
# <h3>Binary ufuncs</h3>
# <table>
# <thead><th width="20%">Function</th><th>Description</th>
# <tr><td><code>add</code></td>
# <td>Add corresponding elements in arrays</td></tr>
# <tr><td><code>subtract</code></td>
# <td>Subtract elements in second array from first array</td></tr>
# <tr><td><code>multiply</code></td>
# <td>Multiply array elements</td></tr>
# <tr><td><code>divide, floor_divide</code></td>
# <td>Divide or floor divide (truncating the remainder)</td></tr>
# <tr><td><code>power</code></td>
# <td>Raise elements in first array to powers indicated in second array</td></tr>
# <tr><td><code>maximum, fmax</code></td>
# <td>Element-wise maximum, <code>fmax</code> ignores <code>NaN</code></td></tr>
# <tr><td><code>minimum, fmin</code></td>
# <td>Element-wise minimum, <code>fmin</code> ignores <code>NaN</code></td></tr>
# <tr><td><code>mod</code></td>
# <td>Element-wise modulus (remainder of division)</td></tr>
# <tr><td><code>copysign</code></td>
# <td>Copy sign of values in second argument to values in first argument</td></tr>
# <tr><td><code>greater, greater_equal, less, less_equal, equal, not_equal</code></td>
# <td>Perform element-wise comparison, yielding Boolean array. Equivalent to infix operators
# <code>>, >=, <, <=, ==, !=</code></td></tr>
# <tr><td><code>logical_and, logical_or, logical_xor</code></td>
# <td>Compute element-wise truth value of logical operation. Equivalent to infix operators
# <code>&, |, ^</code></td></tr>
# </table>
# + language="html"
# <h2>Data Processing Using Arrays</h2>
# <p>The <code>np.meshgrid</code> function takes two 1D arrays and produces two 2d matrices
# corresponding to all pairs of <code>(x, y)</code> in the two arrays.</p>
# -
points = np.arange(-5, 5, 0.01)
xs, ys = np.meshgrid(points, points)
xs
ys
z = np.sqrt(xs ** 2 + ys ** 2)
z
import matplotlib.pyplot as plt
plt.imshow(z, cmap=plt.cm.gray)
plt.colorbar()
plt.title("Image plot of $\sqrt{x^2 + y^2}$ for a grid of values")
plt.show()
# + language="html"
# <h3>Conditional Logic as Array Operations</h3>
# -
xarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])
yarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])
cond = np.array([True, False, True, True, False])
result = [(x if c else y) for x, y, c in zip(xarr, yarr, cond)]
result
# + active=""
# result = np.where(cond, xarr, yarr)
# print(result)
# -
arr = randn(4, 4)
arr
np.where(arr > 0, 2, -2)
np.where(arr > 0, 2, arr) # set only positive values to 2
# + language="html"
# <h3>Mathematical and Statistical Methods</h3>
# -
arr = randn(5,4) # normally distributed data
arr.mean()
np.mean(arr)
arr.sum()
arr.mean(axis=1)
arr.sum(0)
arr = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
arr.cumsum(0)
arr.cumprod(1)
# + language="html"
# <table width="100%">
# <thead><th width="20%">Method</th><th>Description</th></thead>
# <tr><td><code>sum</code></td>
# <td>Sum of all the elements in the array or along an axis.</td></tr>
# <tr><td><code>mean</code></td>
# <td>Arithmetic mean.</td></tr>
# <tr><td><code>std, var</code></td>
# <td>Standard deviation and variance, respectively, with optional degrees of freedom
# adjustment (default denominator <code>n</code>).</td></tr>
# <tr><td><code>min, max</code></td>
# <td>Minimum and maximum</td></tr>
# <tr><td><code>argmin, argmax</code></td>
# <td>Indices of minimum and maximum elements, respectively.</td></tr>
# <tr><td><code>cumsum</code></td>
# <td>Cumulative sum of elements starting from 0.</td></tr>
# <tr><td><code>cumprod</code></td>
# <td>Cumulative product of elements starting from 1</td></tr>
# <tr><td><code>any, all</code></td>
# <td>Evaluate to <code>True</code> if any or all of the elements in the array are
# <code>True</code>, respectively. Otherwise return <code>False</code>.</td></tr>
# </table>
# -
bools = np.array([False, False, True, False])
bools.any()
bools.all()
# + language="html"
# <h3>Sorting</h3>
# -
arr = randn(8)
arr
arr.sort()
arr
arr = randn(5, 3)
arr
arr.sort(1)
arr
# + language="html"
# <p>The top-level method <code>np.sort</code> returns a sorted copy of an array instead of
# modifying it in place. A quick-and-dirty way to compute the quantiles of an array is to
# sort it and select the value at a particular rank:</p>
# -
large_arr = randn(1000)
large_arr.sort()
large_arr[int(0.05 * len(large_arr))] # 5% quantile
# + language="html"
# <h3>Unique and Other Set Logic</h3>
# -
np.unique(names)
ints = np.array([3, 3, 3, 2, 2, 1, 1, 4, 4])
np.unique(ints)
sorted(set(names))
values = np.array([6, 0, 0, 3, 2, 5, 6])
np.in1d(values, [2, 3, 6])
# + language="html"
# <h2>Array Set Operations</h2>
# <table width="100%">
# <thead><th width="20%">Method</th><th>Description</th>
# <tr><td><code>unique(x)</code></td>
# <td>Compute the sorted, unique elements in <code>x</code></td></tr>
# <tr><td><code>intersect1d(x, y)</code></td>
# <td>Compute the sorted, common elements in <code>x</code> and <code>y</code></td></tr>
# <tr><td><code>union1d(x, y)</code></td>
# <td>Compute the sorted union of elements</td></tr>
# <tr><td><code>in1d(x, y)</code></td>
# <td>Compute a Boolean array indicating whether each element of <code>x</code> is contained
# in <code>y</code></td></tr>
# <tr><td><code>setdiff1d(x, y)</code></td>
# <td>Set difference; elements in <code>x</code> that are not in <code>y</code></td></tr>
# <tr><td><code>setxorid(x, y)</code></td>
# <td>Set symmetric differences; elements that are in either of the arrays but not both</td>
# </tr>
# + language="html"
# <h3>Storing Arrays on Disk</h3>
# <h4>Binary Format</h4>
# <p>Arrays are saved by default in an uncompressed raw binary format with extension
# <code>.npy</code>.</p>
# -
arr = np.arange(10)
ARRAY_FILE = 'data/some_array.npy'
np.save(ARRAY_FILE, arr)
np.load(ARRAY_FILE)
# + language="html"
# <p><code>np.savez</code> can save multiple arrays (passed as kwargs) in a zip archive.</p>
# -
ARRAY_ZIP = 'data/array_archive.npz'
np.savez(ARRAY_ZIP, a=arr, b=arr)
arch = np.load(ARRAY_ZIP)
arch['b']
# + language="html"
# </h4>Text Files</h4>
# -
from pathlib import Path
ARRAY_CSV = 'data/array_ex.csv'
print(Path(ARRAY_CSV).read_text())
arr = np.loadtxt(ARRAY_CSV, delimiter=',')
arr
# + language="html"
# <p><code>np.savetxt</code> performs the inverse operation, writing the array to a delimited
# text file.</p>
# <p><code>genfromtxt</code> is similar to <code>loadtxt</code> but is geared for structured
# arrays and missing data handling.</p>
# + language="html"
# <h2>Linear Algebra</h2>
# <p>Multiplication by the <code>*</code> operator of matrices is performed element-wise.</p>
# -
x = np.array([[1., 2., 3.], [4., 5., 6.]])
y = np.array([[6., 23.], [-1, 7], [8, 9]])
x
y
x.dot(y) # equivalently np.dot(x, y)
np.dot(x, np.ones(3))
from numpy.linalg import inv, qr
X = randn(5, 5)
mat = X.T.dot(X)
inv(mat)
mat.dot(inv(mat))
# TODO: Have the output formatted with approximations
q, r = qr(mat)
r
# + language="html"
# <table width="100%">
# <caption>Commonly Used <code>numpy.linalg</code> Functions</caption>
# <thead><th width="20%">Function</th><th>Description</th></thead>
# <tr><td><code>diag</code></td>
# <td>Return the diagonal (or off-diagonal) elements of a square matrix as a 1D array, or
# convert a 1D arry into a square matrix with zeros on the off-diagonal</td></tr>
# <tr><td><code>dot</code></td>
# <td>Matrix multiplication</td></tr>
# <tr><td><code>trace</code></td>
# <td>Compute the sum of the diagonal elements</td></tr>
# <tr><td><code>det</code></td>
# <td>Compute the matrix determinant</td></tr>
# <tr><td><code>eig</code></td>
# <td>Compute the eigenvalues and eigenvectors of a square matrix</td></tr>
# <tr><td><code>inv</code></td>
# <td>Compute the inverse of a square matrix</td></tr>
# <tr><td><code>pinv</code></td>
# <td>Compute the Moore-Penrose pseudo-inverse inverse of a square matrix</td></tr>
# <tr><td><code>qr</code></td>
# <td>Compute the $QR$ decomposition</td></tr>
# <tr><td><code>svd</code></td>
# <td>Compute the singular value decomposition($SVD$)</td></tr>
# <tr><td><code>solve</code></td>
# <td>Solve the linear system $Ax=b$ for $x$, where $A$ is a square matrix</td></tr>
# <tr><td><code>lstsq</code></td>
# <td>Compute the least-squares solution to y=Xb</td></tr>
# </table>
# + language="html"
# <h2>Random Numbers</h2>
# -
samples = np.random.normal(size=(4, 4)) # 4 x 4 array of samples from the standard normal distribution
samples
from random import normalvariate
N= 1000000
# %timeit samples = [normalvariate(0, 1) for _ in range(N)]
# %timeit np.random.normal(size=N)
# + language="html"
# <table width="100%">
# <thead><th width="20%">Function</th><th>Description</th></thead>
# <tr><td><code>seed</code></td>
# <td>Return a random permutation of a sequence, or return a permuted range</td></tr>
# <tr><td><code>permutation</code></td>
# <td>Return a random permutation of a sequence, or return a permuted range</td></tr>
# <tr><td><code>shuffle</code></td>
# <td>Randomly permute a sequence in place</td></tr>
# <tr><td><code>rand</code></td>
# <td>Draw samples from a uniform distribution</td></tr>
# <tr><td><code>randint</code></td>
# <td>Draw random integers from a given low-to-high range</td></tr>
# <tr><td><code>randn</code></td>
# <td>Draw samples from a normal distribution with mean $0$ and standard deviation $1$
# (MATLAB-like interface)</td></tr>
# <tr><td><code>binomial</code></td>
# <td>Draw samples from a binomial distribution</td></tr>
# <tr><td><code>normal</code></td>
# <td>Draw samples from a normal (Gaussian) distribution</td></tr>
# <tr><td><code>beta</code></td>
# <td>Draw samples from a beta distribution</td></tr>
# <tr><td><code>chisquare</code></td>
# <td>Draw samples from a chi-square distribution</td></tr>
# <tr><td><code>gamma</code></td>
# <td>Draw samples from a gamma distribution</td></tr>
# <tr><td><code>uniform</code></td>
# <td>Draw samples from a uniform $[0,1)$ distribution</td></tr>
# </table>
# + language="html"
# <h3>Random Walks</h3>
# -
import random
position = 0
walk = [position]
steps = 1000
for i in np.arange(steps):
step = 1 if random.randint(0, 1) else -1
position += step
walk.append(position)
D = range(100)
X = [x for x in D]
Y = [walk[i] for i in D]
plt.plot(X, Y)
plt.title("Random walk with -1 | 1 steps")
n = 1000
draws = np.random.randint(0, 2, size=n)
steps = np.where(draws > 0, 1, -1)
walk = steps.cumsum()
walk.min()
walk.max()
(np.abs(walk) >= 10).argmax()
# + language="html"
# <h3>Simulating Many Random Walks at Once</h3>
# -
nwalks = 5000
nsteps = 1000
draws = np.random.randint(0, 2, size=(nwalks, nsteps)) # 0 or 1
steps = np.where(draws > 0, 1, -1)
walks = steps.cumsum(1)
walks
walks.max()
walks.min()
hits30 = (np.abs(walks) >= 30).any(1)
hits30
hits30.sum() # Number that hit 30 or -30
crossing_times = (np.abs(walks[hits30]) >= 30).argmax(1)
crossing_times.mean()
steps = np.random.normal(loc=0, scale=0.25, size=(nwalks, nsteps))
steps
arr = np.arange(8)
arr
arr.reshape((4, 2))
arr.reshape((4, 2)).reshape((2, 4))
arr = np.arange(15)
arr.reshape((5, -1))
other_arr = np.ones((3, 5))
other_arr.shape
arr.reshape(other_arr.shape)
arr = np.arange(15).reshape((5, 3))
arr
arr.ravel() # Does not copy the data
arr.flatten() # Copies the data
arr1 = np.array([[1, 2, 3], [4, 5, 6]])
arr2 = np.array([[7, 8, 9], [10, 11, 12]])
np.concatenate([arr1, arr2], axis=0)
np.concatenate([arr1, arr2], axis=1)
np.vstack((arr1, arr2))
np.hstack((arr1, arr2))
arr = np.random.randn(5, 2)
arr
first, second, third = np.split(arr, [1, 3])
first
second
third
HTML(Path('data/array_concat_funcs.html').read_text())
arr = np.arange(6)
arr1 = arr.reshape((3, 2))
arr2 = randn(3, 2)
np.r_[arr1, arr2]
np.c_[np.r_[arr1, arr2], arr]
np.c_[1:6, -10:-5]
| nb/numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3.6.3
# language: python
# name: conda_python3.6.3
# ---
# # chi-square test
#
#
# ## Case
#
# In the unified examination of the whole district, the total variance of all students is 182, while the variance of 51 students in a school is 122. Is there any significant difference between the variance of students' performance and that of the whole district? (take α = 0.05)
#
#
# n=51, $S^2$=$12^2$, ${\sigma^2}$=$18^2$
#
# ${X^2}$ = $\frac{(n-1) {S^2}}{\sigma^2}$
#
# ${X^2}$ = $\frac{(51-1) {12^2}}{18^2}$
#
import math
x = (51-1)*math.pow(12, 2)/math.pow(18, 2)
print(x)
# ${X^2}$ = 22.22
#
# ${X_{0.025}^2(50)}$ = 71.4
#
# ${X_{0.975}^2(50)}$ = 32.4
#
# ${X^2}$ < ${X_{0.975}^2(50)}$
#
# Therefore, the variance of the whole city is significantly different from that of the school
| hypothesis-testing/chi-square-test/chi-square-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img src="http://xarray.pydata.org/en/stable/_static/dataset-diagram-logo.png" align="right" width="30%">
#
# # Introdução
#
# Desejo boas-vindas ao tutorial **Xarray**.
#
# Xarray é um pacote Python de código aberto que visa tornar o trabalho com arranjos de dados catalogados uma tarefa simples, eficiente e até mesmo divertida!
#
# Xarray introduz *labels* (mapeamento, rótulo, catálogo) como forma de expressar dimensões, coordenadas e atributos construidos acima de arranjos brutos do tipo [NumPy](https://numpy.org/),
# o que permite um fluxo de trabalho e desenvolvimento mais intuitivo, conciso e a prova de erros.
# O pacote inclui uma biblioteca grande e crescente de funções aplicadas para análises e visualização com essas estruturas de dados.
#
# Xarray é inspirado e inclusive toma várias funcionalidades emprestadas do [pandas](https://pandas.pydata.org/), o popular pacote para manipulação de dados tabelados.
# Também é especialmente adaptado para funcionar com [arquivos netCDF](http://www.unidata.ucar.edu/software/netcdf), que foram a fonte do modelo de dados em Xarray, além de integrar-se perfeitamente com [Dask](http://dask.org/) para computação paralela e [hvPlot](https://hvplot.holoviz.org/user_guide/Gridded_Data.html) para visualização interativa dos seus dados.
# Mas antes de começarmos, recomendo que você assista essa incrível palestra que tivemos mais cedo essa semana na Python Brasil 2021:
#
# * **Fatos interessantes sobre arrays, matrizes e tensores: o número 3 vai te surpreender**, por <NAME>.
from IPython.display import YouTubeVideo
YouTubeVideo("s7B8Cd02MF8?t=14802")
# ## Configurando o Tutorial
#
# Esse tutorial foi projetado para rodar no [Binder](https://mybinder.org/).
# O serviço permite executar totalmente na nuvem, nenhuma instalação extra é necessária.
# Para tanto, basta clicar [aqui](https://mybinder.org/v2/gh/fschuch/xarray-tutorial-python-brasil/master?urlpath=lab):
# [](https://mybinder.org/v2/gh/fschuch/xarray-tutorial-python-brasil/master?urlpath=lab)
#
# Se você prefere instalar o tutorial localmente, siga os seguintes passos:
#
# 1. Clone o repositório:
#
# ```
# git clone https://github.com/fschuch/xarray-tutorial-python-brasil
# ```
#
# 1. Instale o ambiente. O repositório inclui um arquivo `environment.yaml` no subdiretório `.binder` que contém uma lista de todos os pacotes necessários para executar esse tutorial.
# Para instalá-los usando conda, use o comando:
#
# ```
# conda env create -f .binder/environment.yml
# conda activate xarray
# ```
#
# 1. Acesse o diretório da edição Python Brasil 2021:
#
# ```
# cd python-brasil-2021
# ```
#
# 1. Inicie uma seção do Jupyter Lab:
#
# ```
# jupyter lab
# ```
#
# ## Material complementar
#
# 1. Referências
#
# - [Documentação](http://xarray.pydata.org/en/stable/)
# - [Overview: Why xarray?](http://xarray.pydata.org/en/stable/why-xarray.html)
# - [Repositório do Xarray](https://github.com/pydata/xarray)
#
# 1. Peça ajuda:
#
# - Use a seção [python-xarray](https://stackoverflow.com/questions/tagged/python-xarray) no StackOverflow
# - [GitHub Discussions](https://github.com/pydata/xarray/discussions) para dúvidas e sugestões
# - [GitHub Issues](https://github.com/pydata/xarray/issues) para reportar bugs
#
#
# ## Estrutura do Tutorial
#
# O material é composto por múltiplos Jupyter Notebooks. Eles, por sua vez, são compostos por uma mistura de código, texto, visualizações e exercícios.
#
# Se essa é sua primeira experiência com JupyterLab, não se preocupe, ele é bastante simular com o Jupyter Notebook clássico. Se essa é a sua primeira vez com um Notebook, aqui vai uma introdução rápida:
#
# 1. Existem células em dois modos: comando e edição;
# 1. A partir do modo de comando, pressione `Enter` para editar uma célula (assim como essa célula em Markdown);
# 1. Do modo de edição, pressione `Esc` para retornar ao modo de comando;
# 1. Pressione `Shift + Enter` para executar a célula e mover o cursor para a célula seguinte;
# 1. A barra de ferramentas contém botões para executar, converter, criar, quebrar e mesclar células.
#
# O conteúdo abordado será o seguinte:
#
# 1. [Introdução + Estruturas para dados Multidimensionais](./01_estruturas_de_dados_e_io.ipynb)
# 1. [Trabalhando com dados mapeados](./02_trabalhando_com_dados_mapeados.ipynb)
# 1. [Computação com Xarray](03_calculos_com_xarray.ipynb)
# 1. [Gráficos e Visualização](04_graficos_e_visualizacao.ipynb)
# 1. [Introdução ao Dask](05_introducao_ao_dask.ipynb)
# 1. [Dask e Xarray para computação paralela](06_xarray_e_dask.ipynb)
#
# ## Exercício: Exibir *Olá, mundo!*
#
# Cada notebook terá exercícios para você resolver. Você receberá um bloco de código vazio ou parcialmente preenchido, seguido por uma célula oculta com a solução. Por exemplo:
#
# Imprima o texto "Olá, mundo!".
# +
# Seu código aqui
# -
# Em alguns casos, a próxima célula terá a solução. Clique nas elipses para
# expandir a solução, e sempre certifique-se de executar a célula de solução, no caso
# das seções posteriores do notebook dependerem da saída da solução.
#
# + tags=[]
print("Olá, mundo!")
# -
# ## Indo além
#
# Os notebooks acima foram projetados para abordar o básico do Xarray desde do início
# ao fim. Para lhe ajudar a ir além, também criamos uma lista de notebooks que
# demonstram aplicações do Xarray no mundo real em uma variedade de estudos de caso.
# Eles não precisam ser explorados em qualquer sequência particular, em vez disso, eles se destinam a
# fornecem uma amostra de como o Xarray pode ser usado.
#
# ### Xarray para manipulação de modelos meteorológicos
#
# 1. [Global Mean Surface Temperature from CMIP6](https://binder.pangeo.io/v2/gh/pangeo-gallery/cmip6/binder?urlpath=git-pull?repo=https://github.com/pangeo-gallery/cmip6%26amp%3Burlpath=lab/tree/cmip6):
# Start with `global_mean_surface_temp.ipynb` then feel free to explore the
# rest of the notebooks.
# <!-- 1. [Natural climate variability in the CESM Large Ensemble](https://aws-uswest2-binder.pangeo.io/v2/gh/NCAR/cesm-lens-aws/master?urlpath=lab) -->
# 1. [National Water Model Streamflow Analysis](https://aws-uswest2-binder.pangeo.io/v2/gh/rsignell-usgs/esip-gallery/binder?urlpath=git-pull?repo=https://github.com/rsignell-usgs/esip-gallery%26amp%3Burlpath=lab/tree/esip-gallery):
# Start with `02_National_Water_Model.ipynb` then feel free to explore the rest
# of the notebooks.
#
# ### Xarray e dados de satélites
#
# 1. [Landsat-8 on AWS](https://aws-uswest2-binder.pangeo.io/v2/gh/pangeo-data/landsat-8-tutorial-gallery/master/?urlpath=git-pull?repo=https://github.com/pangeo-data/landsat-8-tutorial-gallery%26amp%3Burlpath=lab/tree/landsat-8-tutorial-gallery/landsat8.ipynb%3Fautodecode)
#
# ### Xarray e a modelagem estatística bayesiana
#
# 1. [Xarray and PyMC3](https://mybinder.org/v2/gh/pymc-devs/pymc3/master?filepath=%2Fdocs%2Fsource%2Fnotebooks):
# Start with `multilevel_modeling.ipynb` then feel free to explore the rest of
# the notebooks. Also checkout [Arviz](https://arviz-devs.github.io/arviz/)
# which uses Xarray as its data model.
#
# ### Xarray para manipulação de dados de Fluidodinâmica Computacional
#
# 1. [Xarray and xcompact3d-toolbox: Computing and Plotting](https://xcompact3d-toolbox.readthedocs.io/en/stable/tutorial/computing_and_plotting.html)
#
| python-brasil-2021/00_visao_geral.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import requests
# -
# # Routing with OTP API
# +
# Played around with OTP API for routing
url = 'http://localhost:8080/otp/routers/default/plan'
query = {
"fromPlace":'-36.87927732520206,174.8173713684082',
"toPlace":'-36.85183998798974,174.76100206375122',
"time":'8:00am',
"date":'07-20-2017',
"mode":'TRANSIT,WALK',
"maxWalkDistance":'1000',
"arriveBy":"false",
"wheelchair":"false",
"locale":"en"}
r = requests.get(url, params=query)
r.json()
# -
# # Isochrone with OTP API
# +
# Played around with OTP API for routing
url = 'http://localhost:8080/otp/routers/default/isochrone'
query = {
"fromPlace":'-36.87845344732706,174.81805801391602',
"time":'8:00am',
"date":'07-20-2017',
"mode":'TRANSIT,WALK',
"cutoffSec":[300, 600]}
r = requests.get(url, params=query)
r.json()
# +
# Played around with OTP API for routing
url = 'http://localhost:8080/otp/surfaces'
query = {
"fromPlace":'-36.87845344732706,174.81805801391602',
"toPlace":'-36.87845344732706,174.81805801391602',
"time":'8:00am',
"date":'07-20-2017',
"mode":'TRANSIT,WALK',
"maxWalkDistance":'1000',
"batch": 'true',
"layers":'traveltime',
"styles":'color30'}
r = requests.get(url, params=query)
r
# -
r.json()
| ipynb/.ipynb_checkpoints/otp_api_only-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: hcad_pred
# language: python
# name: hcad_pred
# ---
# # Load previously exported files
#
# The files being imported below were constructed in the last six exploratory notebooks (1.0 to 1.5). Here, we join them all using the `acct` column as key.
# %load_ext autoreload
# %autoreload 2
# +
from pathlib import Path
import pickle
import pandas as pd
from src.definitions import ROOT_DIR
from src.data.utils import Table, save_pickle
# +
building_res_comps_fn = ROOT_DIR / 'data/raw/2016/building_res_comps.pickle'
assert building_res_comps_fn.exists()
with open(building_res_comps_fn, 'rb') as f:
building_res_comps = pickle.load(f)
# +
fixtures_comps_fn = ROOT_DIR / 'data/raw/2016/fixtures_comps.pickle'
assert fixtures_comps_fn.exists()
with open(fixtures_comps_fn, 'rb') as f:
fixtures_comps = pickle.load(f)
# +
real_acct_comps_fn = ROOT_DIR / 'data/raw/2016/real_acct_comps.pickle'
assert real_acct_comps_fn.exists()
with open(real_acct_comps_fn, 'rb') as f:
real_acct_comps = pickle.load(f)
# +
extra_features_uts_grade_comps_fn = ROOT_DIR / 'data/raw/2016/extra_features_uts_grade_comps.pickle'
assert extra_features_uts_grade_comps_fn.exists()
with open(extra_features_uts_grade_comps_fn, 'rb') as f:
extra_features_uts_grade_comps = pickle.load(f)
# +
exterior_comps_fn = ROOT_DIR / 'data/raw/2016/exterior_comps.pickle'
assert exterior_comps_fn.exists()
with open(exterior_comps_fn, 'rb') as f:
exterior_comps = pickle.load(f)
# +
structural_elem1_comps_fn = ROOT_DIR / 'data/raw/2016/structural_elem1_comps.pickle'
assert structural_elem1_comps_fn.exists()
with open(structural_elem1_comps_fn, 'rb') as f:
structural_elem1_comps = pickle.load(f)
# -
# # Merge tables
comps = building_res_comps.merge(fixtures_comps,
how='left',
on='acct',
validate='one_to_one')
comps = comps.merge(real_acct_comps,
how='left',
on='acct',
validate='one_to_one')
comps = comps.merge(extra_features_uts_grade_comps,
how='left',
on='acct',
validate='one_to_one')
comps = comps.merge(exterior_comps,
how='left',
on='acct',
validate='one_to_one')
comps = comps.merge(structural_elem1_comps,
how='left',
on='acct',
validate='one_to_one')
comps.head()
comps.columns
comps.shape
# # Export comps
save_fn = ROOT_DIR / 'data/interim/2016/comps.pickle'
save_pickle(comps, save_fn)
| notebooks/01_Exploratory/1.6-rp-hcad-data-view-join-selected-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit
# language: python
# name: python_defaultSpec_1598098675753
# ---
# + tags=[]
import numpy as np
import random
import json
import os
from PIL import Image, ImageOps, ImageFont, ImageDraw
import matplotlib.pyplot as plt
import pandas as pd
# + tags=[]
# summarize controller parameters
from models.vae import VAE
from models.controller import Controller
from torchsummary import summary
input_size = 32 + 13
output_size = 13
model = Controller(input_size, output_size).cuda()
summary(model, (1, input_size))
# +
# convert ic/pc to fitness
pc0 = 0.0910
pc1 = 0.1467
b = 0.6
c = 0.3
pc = ((pc0*pc1)**b) * (((pc1-pc0)/pc1)**c)
ic = 0.0697
fitness = 100.0 + ic/pc
print(f'{fitness}, {pc}')
# +
# open fitness files
# dirname = 'WIDE_20200820_003939'
# name = 'WIDE_VISION'
# title = 'Wide-perspective experiment'
dirname = 'NARROW_20200820_003627'
name = 'NARROW_VISION'
title = 'Narrow-perspective experiment'
results_folder = 'results/final'
optimizer = 'cma'
num_rollouts = 1
popsize = 32
exp_results_folder = os.path.join(results_folder, dirname)
filename = os.path.join(results_folder, dirname, 'fitness.txt')
filename_ind = os.path.join(results_folder, dirname, 'ind_fitness.txt')
# + tags=[]
# plot fitness results
with open(filename, 'r') as f:
data = np.loadtxt(f, delimiter='/', skiprows=1)
plt.rcParams["font.family"] = "Arial"
fig = plt.figure(figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k')
line_mean, = plt.plot(data[:, 0], data[:, 1])
line_best_gen, = plt.plot(data[:, 0], data[:, 2])
line_best_overall, = plt.plot(data[:, 0], data[:, 3])
plt.legend([line_mean, line_best_gen, line_best_overall], ['Generation mean', 'Generation best', 'Best overall'], loc=4)
plt.xlabel('Generation')
plt.ylabel('Fitness')
plt.title(title)
plt.savefig(os.path.join(results_folder, dirname, name) + ".svg")
fig = plt.figure(figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k')
plt.title(title)
#plt.savefig(os.path.join(results_folder, dirname, name) + ".svg")
plt.hist(data[:, 2], density=False, bins=64)
plt.ylabel('Frequency')
plt.xlabel('Fitness');
plt.show()
# + tags=[]
# plot some individual statistics
with open(filename_ind, 'r') as f:
data_ind = np.loadtxt(f, delimiter='/', skiprows=1)
df = pd.DataFrame(data=data_ind, columns=["gen", "id", "fitness", "coverage", "coverage_reward", "IC", "PC", "PCt0", "PCt1"])
df["PC"] = (df["PCt1"] - df["PCt0"])
df["coverage"] = df["coverage"]*1000.0
# grouped_df = df.groupby('gen').agg({
# "coverage": [np.mean],
# "fitness": [np.mean],
# "IC": [np.mean],
# "PC": [np.mean]})
grouped_df = df.groupby('gen').agg({
"coverage": [np.mean],
"fitness": [np.mean]
})
fig = plt.figure(figsize=(4, 2), dpi=80, facecolor='w', edgecolor='k', )
ax = plt.gca()
plt.gcf().subplots_adjust(bottom=0.25, left=0.15)
grouped_df.plot(kind='line', ax=ax)
ax.legend(["Mean coverage (x1000)", "Mean fitness"], loc=4);
plt.xlabel('Generation')
plt.ylabel('Value')
plt.title("Coverage and complexity (Wide)")
plt.savefig(os.path.join(results_folder, dirname, "narrow_cov_fitness_comp.svg"))
plt.show()
print("p: " , np.corrcoef(df["fitness"], df["coverage"])[1,0])
# +
impath = os.path.join(exp_results_folder, 'artifacts')
imlist = os.listdir(impath)
dim = 8
list_im = [[ImageOps.expand(Image.open(os.path.join(impath, random.choice(imlist))), border=1, fill='black') for i in range(dim)] for i in range(dim)]
imgs_arr = [np.vstack(list_im[i]) for i in range(dim)]
imgs_comb = [Image.fromarray(imgs_arr[i]) for i in range(dim)]
imgs_grid = Image.fromarray(np.hstack(imgs_comb))
#imgs_grid.save(os.path.join(results_folder, dirname, 'artifact_sample_grid.png')
# + tags=[]
with open(filename, 'r') as f:
fitness_readout = np.loadtxt(f, delimiter='/', skiprows=1)
with open(filename_ind, 'r') as f:
ind_fitness_readout = np.loadtxt(f, delimiter='/', skiprows=1)
font = ImageFont.load("arial.pil")
ids = []
for i in fitness_readout:
gen, best = i[0], i[3]
lis = np.array([x for x in ind_fitness_readout if x[0] == gen])
result = f'{int(gen)}_{np.argmax(lis[:,2])}.jpg'
ids.append(result)
ids = np.array(ids)[:64]
list_im = [[ImageOps.expand(Image.open(os.path.join(impath, ids[j+i*8])), border=1, fill='black') for i in range(8)] for j in range(8)]
for x in range(8):
for y in range(8):
draw = ImageDraw.Draw(list_im[x, y])
img = Image.open("sample_in.jpg")
draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("sans-serif.ttf", 16)
# draw.text((x, y),"Sample Text",(r,g,b))
draw.text((0, 0),"Sample Text",(255,255,255),font=font)
img.save('sample-out.jpg')
imgs_arr = [np.vstack(i) for i in list_im]
imgs_comb = [Image.fromarray(i) for i in imgs_arr]
imgs_grid = Image.fromarray(np.hstack(imgs_comb))
imgs_grid.save(os.path.join(results_folder, dirname, 'bestgrid.png'))
| plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Listen, Attend and Spell
#
# Implementation of the paper from W.Chan and al., submitted in [arXiv](https://arxiv.org/abs/1508.01211) on 5 Aug 2015.
# The final paper has been presented in ICASSP 2016 and is available on the [author site](http://williamchan.ca/papers/wchan-icassp-2016.pdf)
# ## Abstract
# > We present Listen, Attend and Spell (LAS), a neural network that learns to transcribe speech utterances to characters. Unlike traditional DNN-HMM models, this model learns all the components of a speech recognizer jointly. Our system has two components: a listener and a speller. The listener is a pyramidal recurrent network encoder that accepts filter bank spectra as inputs. The speller is an attention-based recurrent network decoder that emits characters as outputs. The network produces character sequences without making any independence assumptions between the characters. This is the key improvement of LAS over previous end-to-end CTC models. On a subset of the Google voice search task, LAS achieves a word error rate (WER) of 14.1% without a dictionary or a language model, and 10.3% with language model rescoring over the top 32 beams. By comparison, the state-of-the-art CLDNN-HMM model achieves a WER of 8.0%.
# ## 1. Introduction
#
# State-of-the-art speech recognizers of today are complicated systems comprising of various components - acoustic models, language models, pronunciation models and text normalization. Each of these components make assumptions about the underlying probability distributions they model. For example n-gram language models and Hidden Markov Models (HMMs) make strong Markovian independence assumptions between words/symbols in a sequence. Connectionist Temporal Classification (CTC) and DNN-HMM systems assume that neural networks make independent predictions at different times and use HMMs or language models (which make their own independence assumptions) to introduce dependencies between these predictions over time [[1], [2], [3]]. _End-to-end_ training of such models attempts to mitigate these problems by training the components jointly [4, 5, 6]. In these models, acoustic models are updated based on a WER proxy, while the pronunciation and language models are rarely updated [7], if at all.
#
# In this paper we introduce Listen, Attend and Spell (LAS), a neural network that learns to transcribe an audio sequence signal to a word sequence, one character at a time, without using explicit language models, pronunciation models, HMMs, etc. LAS does not make any independence assumptions about the nature of the probability distribution of the output character sequence, given the input acoustic sequence. This method is based on the sequence-to-sequence learning framework with attention [8, 9, 10, 11, 12, 13]. It consists of an encoder Recurrent Neural Network (RNN), which is named the _listener_, and a decoder RNN, which is named the _speller_. The listener is a pyramidal RNN that converts speech signals into high level features. The speller is an RNN that transduces these higher level features into output utterances by specifying a probability distribution over the next character, given all of the acoustics and the previous characters. At each step the RNN uses its internal state to guide an attention mechanism [10, 11, 12] to compute a "context" vector from the high level features of the listener. It uses this context vector, and its internal state to both update its internal state and to predict the next character in the sequence. The entire model is trained jointly, from scratch, by optimizing the probability of the output sequence using a chain rule decomposition. We call this an _end-to-end model_ because all the components of a traditional speech recognizer are integrated into its parameters, and optimized together during training, unlike _end-to-end training_ of conventional models that attempt to adjust acoustic models to work well with the other fixed components of a speech recognizer.
#
# Our model was inspired by [11, 12] that showed how end-to-end recognition could be performed on the TIMIT phone recognition task. We note a recent paper from the same group that describes an application of these ideas to WSJ [14]. Our paper independently explores the challenges associated with the application of these ideas to large scale conversational speech recognition on a Google voice search task. We defer a discussion of the relationship between these and other methods to section 5.
#
# [1]: https://arxiv.org/abs/1303.5778 "Speech Recognition with Deep Recurrent Neural Networks"
# [2]: https://arxiv.org/abs/1701.02720 "Towards End-to-End Speech Recognition with Recurrent Neural Networks"
# [3]: https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/38131.pdf "Deep Neural Networks for Acoustic Modeling in Speech Recognition: The Shared Views of Four Research Groups"
# ## 2. Model
#
# In this section, we formally describe LAS. Let $\mathbf{x} = (x_1,...,x_T)$ be the input sequence of filter bank spectra features and $\mathbf{y} = (\langle \text{sos} \rangle,y_1,...,y_S,\langle \text{eos} \rangle)$, $y_i \in \{a,··· ,z,0,··· ,9,\langle \text{space} \rangle, \langle \text{comma} \rangle, \langle \text{period} \rangle, \langle \text{apostrophe} \rangle, \langle \text{unk} \rangle \}$ be the output sequence of characters. Here $\langle \text{sos} \rangle$ and $\langle \text{eos} \rangle$ are the special start-of-sentence token, and end-of-sentence tokens, respectively, and $\langle \text{unk} \rangle$ are unknown tokens such as accented characters.
#
# LAS models each character output $y_i$ as a conditional distribution over the previous characters $y_{<i}$ and the input signal $\mathbf x$ using the chain rule for probabilities:
#
# $$
# P(y \mid x) = \prod_{i}{P(y_i \mid \mathbf{x}, y_{<i})}
# \tag{1}\label{eq:1}
# $$
#
# This objective makes the model a discriminative, end-to-end model, because it directly predicts the conditional probability of character sequences, given the acoustic signal.
#
# LAS consists of two sub-modules: the listener and the speller. The listener is an acoustic model encoder that performs an operation called $\operatorname{Listen}$. The $\operatorname{Listen}$ operation transforms the original signal $\mathbf{x}$ into a high level representation $\mathbf{h} = (h_1,...,h_U)$ with $U \le T$. The speller is an attention-based character decoder that performs an operation we call $\operatorname{AttendAndSpell}$. The $\operatorname{AttendAndSpell}$ operation consumes $h$ and produces a probability distribution over character sequences:
#
# $$
# \mathbf{h} = \operatorname{Listen}(\mathbf{x})
# \tag{2}\label{eq:2}
# $$
# $$
# P(y_i \mid \mathbf{x}, y{<i}) = \operatorname{AttendAndSpell}(y_{<i}, \mathbf{h})
# \tag{3}\label{eq:3}
# $$
#
# Figure 1 depicts these two components. We provide more details of these components in the following sections.
#
#  model: the listener is a pyramidal BLSTM encoding our input sequence x into high level features h, the speller is an attention-based decoder generating the y characters from h.")
#
# ### 2.1. Listen
#
# The $\operatorname{Listen}$ operation uses a Bidirectional Long Short Term Memory RNN (BLSTM) [15, 16, 2] with a pyramidal structure. This modification is required to reduce the length $U$ of $\mathbf{h}$, from $T$, the length of the input $\mathbf{x}$, because the input speech signals can be hundreds to thousands of frames long. A direct application of BLSTM for the operation $\operatorname{Listen}$ converged slowly and produced results inferior to those reported here, even after a month of training time. This is presumably because the operation $\operatorname{AttendAndSpell}$ has a hard time extracting the relevant information from a large number of input time steps.
#
# We circumvent this problem by using a pyramidal BLSTM (pBLSTM). In each successive stacked pBLSTM layer, we reduce the time resolution by a factor of 2. In a typical deep BLSTM architecture, the output at the $i$-th time step, from the $j$-th layer is computed as follows:
#
# $$
# h^j_i = \operatorname{BLSTM}(h^j_{i−1},h^{j−1}_i )
# \tag{4}\label{eq:4}
# $$
#
# In the pBLSTM model, we concatenate the outputs at consecutive steps of each layer before feeding it to the next layer, i.e.:
#
# $$
# h^j_i = \operatorname{pBLSTM}(h^j_{i−1}, \left[ h^{j−1}_{2i} , h^{j−1}_{2i+1} \right])
# \tag{5}\label{eq:5}
# $$
#
# In our model, we stack 3 pBLSTMs on top of the bottom BLSTM layer to reduce the time resolution $2^3 = 8$ times. This allows the attention model (described in the next section) to extract the relevant information from a smaller number of times steps. In addition to reducing the resolution, the deep architecture allows the model to learn nonlinear feature representations of the data. See Figure 1 for a visualization of the pBLSTM.
#
# The pyramidal structure also reduces the computational complexity. The attention mechanism in the speller $U$ has a computational complexity of $\mathcal O(US)$. Thus, reducing $U$ speeds up learning and inference significantly. Other neural network architectures have been described in literature with similar motivations, including the hierarchical RNN [17], clockwork RNN [18] and CNN [19].
# +
import numpy as np
from keras.layers import Bidirectional, LSTM, Reshape, Lambda
from keras.models import Sequential
def Listen(unit, timesteps, features):
return Sequential([
Bidirectional(LSTM(units, return_sequences=True), merge_mode='concat', input_shape=(timesteps, features)),
Reshape((-1, units * 4)),
Bidirectional(LSTM(units, return_sequences=True), merge_mode='concat'),
Reshape((-1, units * 4)),
Bidirectional(LSTM(units, return_sequences=True), merge_mode='concat'),
Reshape((-1, units * 4)),
Bidirectional(LSTM(units, return_sequences=True), merge_mode='concat')
])
# units = 256
# timesteps = 55*8 # =440, about 10ms of a 44100Hz sampled sound. Must be a multiple 2^L with L is the number of layers
# features = 40
l = Listen(256, 55*8, 40)
print(l.output)
l.summary()
# -
# ### 2.2. Attend and Spell
#
# The $\operatorname{AttendAndSpell}$ function is computed using an attention-based LSTM transducer [10, 12]. At every output step, the transducer produces a probability distribution over the next character conditioned on all the characters seen previously. The distribution for $y_i$ is a function of the decoder state $s_i$ and context $c_i$. The decoder state $s_i$ is a function of the previous state $s_{i−1}$, the previously emitted character $y_{i−1}$ and context $c_{i−1}$. The context vector $c_i$ is produced by an attention mechanism. Specifically:
#
# $$
# c_i = \operatorname{AttentionContext}(s_i, \mathbf{h})
# \tag{6}\label{eq:6}
# $$
# $$
# s_i = \operatorname{RNN}(s_{i−1}, y_{i−1}, c_{i−1})
# \tag{7}\label{eq:7}
# $$
# $$
# P(y_i| \mathbf{x}, y_{<i}) = \operatorname{CharacterDistribution}(s_i, c_i)
# \tag{8}\label{eq:8}
# $$
#
# where $\operatorname{CharacterDistribution}$ is an MLP with softmax outputs over characters, and where RNN is a 2 layer LSTM.
#
# At each time step, $i$, the attention mechanism, $\operatorname{AttentionContext}$ generates a context vector, $c_i$ encapsulating the information in the acoustic signal needed to generate the next character. The attention model is content based - the contents of the decoder state $s_i$ are matched to the contents of $h_u$ representing time step $u$ of $\mathbf{h}$, to generate an attention vector $\alpha_i$. The vectors $h_u$ are linearly blended using $\alpha_i$ to create $c_i$.
#
# Specifically, at each decoder timestep $i$, the $\operatorname{AttentionContext}$ function computes the scalar energy $e_{i,u}$ for each time step $u$, using vector $h_u \in \mathbf{h}$ and $s_i$. The scalar energy $e_{i,u}$ is converted into a probability distribution over times steps (or attention) $\alpha_i$ using a softmax function. The softmax probabilities are used as mixing weights for blending the listener features $h_u$ to the context vector $c_i$ for output time step $i$:
#
# $$
# e_{i,u} = \langle \phi(s_i), \psi(h_u) \rangle
# \tag{9}\label{eq:9}
# $$
# $$
# \alpha_{i,u} = \frac {\exp(e_{i,u})} {\sum_{u'} \exp(e_{i,u'})}
# \tag{10}\label{eq:10}
# $$
# $$
# c_i = \sum_u {\alpha_{i,u} h_u}
# \tag{11}\label{eq:11}
# $$
#
# where $\phi$ and $\psi$ are MLP networks. After training, the $\alpha_i$ distribution is typically very sharp and focuses on only a few frames of $\mathbf{h}$; $c_i$ can be seen as a continuous bag of weighted features of $\mathbf{h}$. Figure 1 shows the LAS architecture.
# ### 2.3. Learning
#
# We train the parameters of our model to maximize the log probability of the correct sequences. Specifically:
#
# $$
# \tilde{\theta} = \max_{\theta}{\sum_i {\log P(y_i \mid x, \tilde{y}_{<i};\theta)}}
# \tag{12}\label{eq:12}
# $$
#
# where $ \tilde {y}_{i−1}$ is the ground truth previous character or a character randomly sampled (with 10% probability) from the model, i.e. $\operatorname{CharacterDistribution}(s_{i−1}, c_{i−1})$ using the procedure from [20].
# ### 2.4. Decoding and Rescoring
#
# During inference we want to find the most likely character sequence given the input acoustics:
#
# $$
# \hat{y} = \underset{\mathbf{y}}{\operatorname {arg\,max}}\,\log P(\mathbf{y} \mid \mathbf{x})
# \tag{13}\label{eq:13}
# $$
#
# We use a simple left-to-right beam search similar to [8]. We can also apply language models trained on large external text corpora alone, similar to conventional speech systems [21]. We simply rescore our beams with the language model. We find that our model has a small bias for shorter utterances so we normalize our probabilities by the number of characters $\left| \mathbf{y} \right|_c$ in the hypothesis and combine it with a language model probability $P_\text{LM}(\mathbf{y})$:
#
# $$
# s(\mathbf{y} \mid \mathbf{x}) = \log P(\mathbf{y} \mid \mathbf{x}) \left| \mathbf{y} \right|_c + \lambda \log P_\text{LM}(\mathbf{y})
# \tag{14}\label{eq:14}
# $$
#
# where $\lambda$ is our language model weight and can be determined by a held-out validation set.
#
# ## 3. EXPERIMENTS
#
# We used a dataset with three million Google Voice Search utterances (representing 2000 hours of data) for our experiments. Approximately 10 hours of utterances were randomly selected as a held-out validation set. Data augmentation was performed using a room simulator, adding different types of noise and reverberations; the noise sources were obtained from YouTube and environmental recordings of daily events [22]. This increased the amount of audio data by 20 times with a SNR between 5dB and 30dB [22]. We used 40-dimensional log-mel filter bank features computed every 10ms as the acoustic inputs to the listener. A separate set of 22K utterances representing approximately 16 hours of data were used as the test data. A noisy test set was also created using the same corruption strategy that was applied to the training data. All training sets are anonymized and hand-transcribed, and are representative of Google’s speech traffic.
#
# The text was normalized by converting all characters to lower case English alphanumerics (including digits). The punctuations: space, comma, period and apostrophe were kept, while all other tokens were converted to the unknown $\langle \text{unk} \rangle$ token. As mentioned earlier, all utterances were padded with the start-of-sentence $\langle \text{sos} \rangle$ and the end-of-sentence $\langle \text{eos} \rangle$ tokens. The state-of-the-art model on this dataset is a CLDNN-HMM system that was described in [22]. The CLDNN system achieves a WER of 8.0% on the clean test set and 8.9% on the noisy test set. However, we note that the CLDNN uses unidirectional LSTMs and would certainly benefit from the use of a BLSTM architecture. Additionally, the LAS model does not use convolutional filters which have been reported to yield 5-7% WER relative improvement [22].
#
# For the $\operatorname{Listen}$ function we used 3 layers of 512 pBLSTM nodes (i.e., 256 nodes per direction) on top of a BLSTM that operates on the input. This reduced the time resolution by 8 = $2^3$ times. The $\operatorname{Spell}$ function used a two layer LSTM with 512 nodes each. The weights were initialized with a uniform distribution $\mathcal{U}(−0.1,0.1)$. Asynchronous Stochastic Gradient Descent (ASGD) was used for training our model [23]. A learning rate of 0.2 was used with a geometric decay of 0.98 per 3M utterances (i.e., 1/20-th of an epoch). We used the DistBelief framework [23] with 32 replicas, each with a minibatch of 32 utterances. In order to further speed up training, the sequences were grouped into buckets based on their frame length [8]. The model was trained until the results on the validation set stopped improving, taking approximately two weeks. The model was decoded using N-best list decoding with beam size of $N = 32$.
#
#
# ## References
#
# [1] <NAME>, <NAME>, and <NAME>, “Speech Recognition with Deep Recurrent Neural Networks,” in IEEE International Conference on Acoustics, Speech and Signal Processing, 2013.
#
# [2] <NAME> and <NAME>, “Towards End-to-End Speech Recognition with Recurrent Neural Networks,” in International Conference on Machine Learning, 2014.
#
# [3] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “Deep Neural Networks for Acoustic Modeling in Speech Recognition: The Shared Views of Four Research Groups,” IEEE Signal Processing Magazine, Nov. 2012.
#
# [4] <NAME>, <NAME>, <NAME>, and <NAME>, “Sequencediscriminative training of deep neural networks,” in INTERSPEECH, 2013.
#
# [5] H.Sak,O.Vinyals,G.Heigold,A.Senior,E.McDermott,<NAME>, and <NAME>, “Sequence Discriminative Distributed TrainingofLongShort-TermMemoryRecurrentNeuralNetworks,” in INTERSPEECH, 2014.
#
# [6] Y.Miao,M.Gowayyed,andF.Metze,“EESEN:End-to-End Speech Recognition using Deep RNN Models and WFSTbased Decoding,” in Http://arxiv.org/abs/1507.08240, 2015.
#
# [7] <NAME>, <NAME>, and <NAME>, “Integrating Deep Neural Networks into Structured Classification Approach based on Weighted Finite-State Transducers,” in INTERSPEECH, 2012.
#
# [8] <NAME>, <NAME>, and <NAME>, “Sequence to Sequence LearningwithNeuralNetworks,”inNeuralInformationProcessing Systems, 2014.
#
# [9] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,H.Schwen,andY.Bengio,“LearningPhraseRepresentationsusingRNNEncoder-DecoderforStatisticalMachine Translation,” in Conference on Empirical Methods in Natural Language Processing, 2014.
#
# [10] <NAME>, <NAME>, and <NAME>, “Neural Machine Translation by Jointly Learning to Align and Translate,” in International Conference on Learning Representations, 2015.
#
# [11] J.Chorowski,D.Bahdanau,K.Cho,andY.Bengio,“End-toend Continuous Speech Recognition using Attention-based RecurrentNN:FirstResults,”inNeuralInformationProcessing Systems: Workshop Deep Learning and Representation Learning Workshop, 2014.
#
# [12] J.Chorowski,D.Bahdanau,D.Serdyuk,K.Cho,andY.Bengio, “Attention-Based Models for Speech Recognition,” in Neural Information Processing Systems, 2015.
#
# [13] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “Show, Attend and Tell: Neural Image Caption Generation with Visual Attention,” in International Conference on Machine Learning, 2015.
#
# [14] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “End-to-end attention-based large vocabulary speech recognition,” in Http://arxiv.org/abs/1508.04395, 2015.
#
# [15] <NAME> and <NAME>, “Long Short-Term Memory,”NeuralComputation,vol.9,no.8,pp.1735–1780,Nov. 1997.
#
# [16] <NAME>, <NAME>, and <NAME>, “Hybrid Speech RecognitionwithBidirectionalLSTM,”inAutomaticSpeech Recognition and Understanding Workshop, 2013.
#
# [17] <NAME> and <NAME>, “Hierarchical Recurrent Neural Networks for Long-Term Dependencies,” in Neural Information Processing Systems, 1996.
#
# [18] <NAME>, <NAME>, <NAME>, and <NAME>, “A Clockwork RNN,” in International Conference on Machine Learning, 2014.
#
# [19] <NAME>, <NAME>, <NAME>, and <NAME>, “Gradientbased learning applied to document recognition,” Proceedings of the IEEE, vol. 86, pp. 2278–2324, 11 Nov. 1998.
#
# [20] <NAME>, <NAME>, <NAME>, and <NAME>, “Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks,” in Neural Information Processing Systems, 2015.
#
# [21] D.Povey,A.Ghoshal,G.Boulianne,L.Burget,O.Glembek, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “The Kaldi Speech Recognition Toolkit,” in Automatic Speech Recognition and Understanding Workshop, 2011.
#
# [22] <NAME>, <NAME>, A. Senior, and <NAME>, “Convolutional, Long Short-Term Memory, Fully Connected Deep Neural Networks,” in IEEE International Conference on Acoustics, Speech and Signal Processing, 2015.
#
# [23] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, A. Senior, <NAME>, <NAME>, and <NAME>, “Large Scale Distributed Deep Networks,” in Neural Information Processing Systems, 2012.
#
# [24] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “Deep Speech: Scaling up end-to-end speech recognition,” in Http://arxiv.org/abs/1412.5567, 2014.
#
# [25] <NAME>, <NAME>, <NAME>, and <NAME>, “Lexicon-free conversational speech recognition with neural networks,” in North American Chapter of the Association for Computational Linguistics, 2015.
#
# [26] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “Learning acoustic frame labeling for speech recognition with recurrent neural networks,” in IEEE International Conference on Acoustics, Speech, and Signal Processing, 2015.
#
# [27] <NAME>, <NAME>, <NAME>, and <NAME>, “Fast and Accurate Recurrent Neural Network Acoustic Models for Speech Recognition,” in INTERSPEECH, 2015.
#
| notebooks/PapersImpl/las.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
# ### Read the csv data
raw_data = pd.read_csv('fish.csv')
raw_data
# __Check for number of null values in each column__
print('Number of null values in each column')
raw_data.isnull().sum()
# __Check for datatypes__
raw_data.info()
# * Here there is no preprocessing required, since there are no null values and only required column is of object type
def split_for_validation(a,n):
'''
a = dataframe,
n = percentage of split
'''
return a[: len(a) - (int(len(a) * n))].copy(), a[len(a) - (int(len(a) * n)):].copy()
# ### Function for random forest classifier with final confusion matrix plot
# +
# function for random forest algorithm classifier
def rand_forest_classifier(raw_data, validation_split):
feature_columns = raw_data.iloc[:,1:].values
dependent_column = raw_data.iloc[:,0].values
X_train, X_valid = split_for_validation(a = feature_columns,
n = validation_split)
y_train, y_valid = split_for_validation(a = dependent_column,
n = validation_split)
print('Training data: ', X_train.shape, y_train.shape)
print('Validation data: ', X_valid.shape, y_valid.shape)
m = RandomForestClassifier(n_jobs=-1)
m.fit(X_train, y_train)
predTree = m.predict(X_valid)
print()
print('Training accuracy: ', round((m.score(X_train, y_train))*100,2),'%')
print()
print('Validation accuracy: ', round((m.score(X_valid, y_valid))*100,2),'%')
plt.figure(figsize=(10,9))
from sklearn.metrics import confusion_matrix
labels = np.unique(y_valid)
cm = confusion_matrix(y_valid, predTree, labels = labels)
print()
print(cm)
print()
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
# -
# ### Result for Random Forest
raw_data = pd.read_csv('fish.csv')
rand_forest_classifier(raw_data = raw_data,
validation_split = 0.2)
# * The validation accuracy is very poor, it is as good as nothing.
# * The reason for such poor performance is the way we have split the data.
# * It may have happened that after splitting, since this is split manually, the last 20% of the data is sent to the validation dataset.
# * The validation dataset may just have one or two species of fish, however we have trained the model with around 7 species of fish.
validation_split = 0.2
feature_columns = raw_data.iloc[:,1:].values
dependent_column = raw_data.iloc[:,0].values
X_train, X_valid = split_for_validation(a = feature_columns,
n = validation_split)
y_train, y_valid = split_for_validation(a = dependent_column,
n = validation_split)
print(y_train)
print()
print(y_valid)
# ### Improving the accuracy of model
#
# * It is observed that training dataset has around 7 species of fishes, and validation dataset has only 2 species of fishes.
# * Hence to overcome this, we will shuffle the dataframe, so that we will have random 20% of the data as validation of dataset
raw_data = shuffle(raw_data)
rand_forest_classifier(raw_data = raw_data,
validation_split = 0.2)
# ### Function for logistic regression
def log_regression(raw_data, validation_split):
feature_columns = raw_data.iloc[:,1:].values
dependent_column = raw_data.iloc[:,0].values
X_train, X_valid = split_for_validation(a = feature_columns,
n = validation_split)
y_train, y_valid = split_for_validation(a = dependent_column,
n = validation_split)
print('Training data: ', X_train.shape, y_train.shape)
print('Validation data: ', X_valid.shape, y_valid.shape)
model = LogisticRegression()
model.fit(X_train, y_train)
predTree = model.predict(X_valid)
print('Training accuracy: ', round((model.score(X_train, y_train))*100,2),'%')
print()
print('Validation accuracy: ', round((model.score(X_valid, y_valid))*100,2),'%')
from sklearn.metrics import confusion_matrix
labels = np.unique(y_valid)
cm = confusion_matrix(y_valid, predTree, labels = labels)
print()
print(cm)
print()
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
raw_data = pd.read_csv('fish.csv')
log_regression(raw_data = raw_data,
validation_split = 0.2)
raw_data = shuffle(raw_data)
log_regression(raw_data = raw_data,
validation_split = 0.2)
# * Here one can observe that logistic regression has higher validation accuracy when compared to the training accuracy, this may prove that there is less chance of overfitting in logistic regression in small amount of data
# *
| fish_species_classifier/fish_species_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests,pprint,json,datetime,time
from pymongo import MongoClient
from secrets import *
# Sign up for free API key at https://openweathermap.org/appid
# # 3.1 Acquring data from an API
city='london'
pprint(key)
requestString=u'https://api.openweathermap.org/data/2.5/weather?q=%s&APPID=%s' % (city,key)
res=requests.get(requestString)
res.reason
pprint.pprint(res.json())
def getData(city):
requestString=u'https://api.openweathermap.org/data/2.5/weather?q=%s&APPID=%s' % (city,key)
res=requests.get(requestString)
return res
res=getData('La paz')
res
# # 3.2 Ingesting data into MongoDB
# ## Create DB and collection
client=MongoClient('localhost')
db=client.packt
weatherCollection=db.weather
res=weatherCollection.insert_one(res.json())
# ## Get list of cities
# !wget http://bulk.openweathermap.org/sample/city.list.json.gz
# !gunzip city.list.json.gz
# !head city.list.json
with open('data/city.list.json','r') as inFile:
citiesJson=json.loads(inFile.read())
citiesJson[0]
type(citiesJson)
# ## Limit to Chilean cities
citiesJsonCL=filter(lambda x:x[u'country']==u'CL',citiesJson)
len(citiesJsonCL)
cities=map(lambda x:x['name'],citiesJsonCL)
ids=map(lambda x:x['id'],citiesJsonCL)
res=getData(cities[0])
res
# ## Cycle through cities
for i,name in zip(ids,cities):
res=getData(name)
if not res.status_code==200:
print 'Error grabbing data for %s' % name
print res.reason
else:
try:
weatherCollection.insert_one(res.json())
except e:
print 'Error inserting into DB' % e
print '(City %s)' % name
time.sleep(1)
# Sleep so we dont thrash the API
def getTimestamp(dt):
return pd.datetime.fromtimestamp(dt)
def getDate(tstamp):
dt=datetime.datetime.fromtimestamp(tstamp)
return dt.strftime('%b %d - %H:%m')
# # 3.3 Querying MongoDB for useful information
cur=weatherCollection.find()
sortedCur=cur.sort('main.temp')
for doc in sortedCur.limit(10):
try:
print doc['name']
print doc['main']['temp']
except:
print 'Error: missing name/temp'
cur.count()
# ## Find the max recorded temperatures over time per station
pipeline=[]
pipeline.append({'$group':{'_id':'$name','maxTemp':{'$max':'$main.temp'}}})
# Group measurements by city name, extract maximum recorded tmep for each
pipeline.append({'$limit':10})
# Limit results to first 10 cities
cur=weatherCollection.aggregate(pipeline=pipeline)
for d in cur:
print d['_id'],d['maxTemp']
# ## Get datetime of max temp per station
pipeline=[]
pipeline.append({'$match':{'name':{'$exists':True}}})
# Filter out dirty rows
pipeline.append({'$sort':{'name':1,'main.temp':-1}})
# Sort by name (esc) and temperature (asc)
pipeline.append({'$group':{'_id':'$name','maxTemp':{'$first':'$main.temp'},'date':{'$first':'$dt'}}})
# Group by name, grab maximum temperature and date of temperature
# Sorted by temp so grab first document from each group
pipeline.append({'$limit':10})
# limit to first 10 cities
cur=weatherCollection.aggregate(pipeline=pipeline)
for d in cur:
print getDate(d['date']),d['_id'],d['maxTemp']
print ''
# ## Do sanity check for one city
for d in weatherCollection.find({'name':'Caletones'}):
print getDate(d['dt']),'',d['main']['temp']
| notebooks/ptyhonVideoForBigDataFiles/alexFiles/Video 3._.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Export/Import assets with `ibm-watson-machine-learning`
#
# This notebook demonstrates an example for exporting/importing assets using Watson Machine Learning service. It contains steps and code to work with [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository.
# ## Learning goals
#
# The learning goals of this notebook are:
#
# - Download an externally trained Keras model.
# - Persist an external model in Watson Machine Learning repository.
# - Export the model from the space
# - Import the model to another space and deploy
#
#
#
# ## Contents
#
# This notebook contains the following parts:
#
# 1. [Setup](#setup)
# 2. [Download externally created Keras model](#download)
# 3. [Persist externally created Keras model](#persistence)
# 4. [Export the model](#export)
# 5. [Import the model](#import)
# 6. [Deploy and score the imported model](#scoring)
# 7. [Clean up](#cleanup)
# 8. [Summary and next steps](#summary)
# <a id="setup"></a>
# ## 1. Set up the environment
#
# Before you use the sample code in this notebook, you must perform the following setup tasks:
#
# - Create a <a href="https://console.ng.bluemix.net/catalog/services/ibm-watson-machine-learning/" target="_blank" rel="noopener no referrer">Watson Machine Learning (WML) Service</a> instance (a free plan is offered and information about how to create the instance can be found <a href="https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/ml-service-instance.html?context=analytics" target="_blank" rel="noopener no referrer">here</a>).
# ### Connection to WML
#
# Authenticate the Watson Machine Learning service on IBM Cloud. You need to provide platform `api_key` and instance `location`.
#
# You can use [IBM Cloud CLI](https://cloud.ibm.com/docs/cli/index.html) to retrieve platform API Key and instance location.
#
# API Key can be generated in the following way:
# ```
# ibmcloud login
# ibmcloud iam api-key-create API_KEY_NAME
# ```
#
# In result, get the value of `api_key` from the output.
#
#
# Location of your WML instance can be retrieved in the following way:
# ```
# ibmcloud login --apikey API_KEY -a https://cloud.ibm.com
# ibmcloud resource service-instance WML_INSTANCE_NAME
# ```
#
# In result, get the value of `location` from the output.
# **Tip**: Your `Cloud API key` can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam#/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. You can also get a service specific url by going to the [**Endpoint URLs** section of the Watson Machine Learning docs](https://cloud.ibm.com/apidocs/machine-learning). You can check your instance location in your <a href="https://console.ng.bluemix.net/catalog/services/ibm-watson-machine-learning/" target="_blank" rel="noopener no referrer">Watson Machine Learning (WML) Service</a> instance details.
#
# You can also get service specific apikey by going to the [**Service IDs** section of the Cloud Console](https://cloud.ibm.com/iam/serviceids). From that page, click **Create**, then copy the created key and paste it below.
#
# **Action**: Enter your `api_key` and `location` in the following cell.
api_key = 'PASTE YOUR PLATFORM API KEY HERE'
location = 'PASTE YOUR INSTANCE LOCATION HERE'
wml_credentials = {
"apikey": api_key,
"url": 'https://' + location + '.ml.cloud.ibm.com'
}
# ### Install and import the latest `ibm-watson-machine-learning` package
# **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>. Latest client can be found <a href="https://pypi.org/project/ibm-watson-machine-learning/" rel="noopener no referrer">here.</a>
# +
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
# -
# ### Create two spaces. One for export and one for import
#
# **Tip**: You can refer to details and example for space management apis [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/instance-management/Space%20management.ipynb)
#
# Refer to the link in above Tip how to find the cos and instance crns
cos_resource_crn = "PUT_YOUR_COS_CRN"
instance_crn = "PUT_YOUR_WML_INSTANCE_CRN"
name = "PUT_YOUR_WML_INSTANCE_NAME_HERE"
# +
import uuid
space_name = str(uuid.uuid4())
export_space_metadata = {
client.spaces.ConfigurationMetaNames.NAME: "client_space_export_" + space_name,
client.spaces.ConfigurationMetaNames.DESCRIPTION: space_name + " description",
client.spaces.ConfigurationMetaNames.STORAGE: { "resource_crn": cos_resource_crn},
client.spaces.ConfigurationMetaNames.COMPUTE: {
"name": name,
"crn": instance_crn
}
}
space = client.spaces.store(meta_props=export_space_metadata)
export_space_id = client.spaces.get_id(space)
print("{}export space_id: {}{}".format('\n', export_space_id, '\n'))
import_space_metadata = {
client.spaces.ConfigurationMetaNames.NAME: "client_space_import_" + space_name,
client.spaces.ConfigurationMetaNames.DESCRIPTION: space_name + "description",
client.spaces.ConfigurationMetaNames.STORAGE: {"resource_crn": cos_resource_crn},
client.spaces.ConfigurationMetaNames.COMPUTE: {
"name": name,
"crn": instance_crn
}
}
space = client.spaces.store(meta_props=import_space_metadata)
import_space_id = client.spaces.get_id(space)
print("{}import space_id: {}".format('\n', import_space_id))
# + [markdown] pycharm={"name": "#%% md\n"}
# <a id="download"></a>
# ## 2. Download externally created Keras model and data
# In this section, you will download externally created Keras models and data used for training it.
# + pycharm={"is_executing": false, "name": "#%%\n"}
import os
import wget
import ssl
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
model_path = os.path.join(data_dir, 'mnist_keras.h5.tgz')
if not os.path.isfile(model_path):
wget.download("https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/keras/mnist_keras.h5.tgz", out=data_dir)
# +
import os
import wget
data_dir = 'MNIST_DATA'
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
filename = os.path.join(data_dir, 'mnist.npz')
if not os.path.isfile(filename):
wget.download('https://s3.amazonaws.com/img-datasets/mnist.npz', out=data_dir)
# + pycharm={"is_executing": false, "name": "#%%\n"}
import numpy as np
dataset = np.load(filename)
x_test = dataset['x_test']
# -
# <a id="persistence"></a>
# ## 3. Persist externally created Keras model
# In this section, you will learn how to store your model in Watson Machine Learning repository by using the Watson Machine Learning Client.
# ### 3.1: Publish model
# Define model name, type and software specification needed to deploy model later.
# + pycharm={"is_executing": false, "name": "#%%\n"}
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7")
# + pycharm={"is_executing": false, "name": "#%%\n"}
client.set.default_space(export_space_id)
metadata = {
client.repository.ModelMetaNames.NAME: 'External Keras model',
client.repository.ModelMetaNames.TYPE: 'tensorflow_2.1',
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(
model=model_path,
meta_props=metadata)
# -
# ### 3.2: Get model details
# + pycharm={"is_executing": false, "name": "#%%\n"}
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
# -
# ### 3.3 Get all models in the space
# space_id is automatically picked up from client.set.default_space() api call before
# + pycharm={"is_executing": false, "name": "#%%\n"}
models_details = client.repository.list_models()
# -
# <a id="export"></a>
# ## 4. Export
help(client.export_assets.start)
# client.export_assets has these apis. For any help on these apis, type 'help(api_name)' in your notebook
# Example: help(client.export_assets.start), help(client.export_assets.get_details)
#
# 1. client.export_assets.start: This starts the export job. export job is asynchronously executed
# 2. client.export_assets.get_details: Given export_id and corresponding space_id/project_id, this gives the export job details. Usually used for monitoring the export job submitted with start api
# 3. client.export_assets.list: Prints summary of all the export jobs
# 4. client.export_assets.get_exported_content: Downloads the exported content. This information will be used by the import process
# 5. client.export_assets.delete: Deletes the given export job
# 6. client.export_assets.cancel: Cancels the given export job if running
# ### 4.1: Start the export process
# Start the export process for the model created. Either ASSET_IDS or ASSET_TYPES or ALL_ASSETS can be provided.
# If you have more than one model ids, you need to provide them as array like client.export_assets.ConfigurationMetaNames.ASSET_IDS: [model_id1, model_id2]
# Refer to the help api above to see different usages and details
# +
metadata = { client.export_assets.ConfigurationMetaNames.NAME: "export_model",
client.export_assets.ConfigurationMetaNames.ASSET_IDS: [published_model_uid]
}
details = client.export_assets.start(meta_props=metadata, space_id=export_space_id)
print(json.dumps(details, indent=2))
export_job_id = details[u'metadata'][u'id']
# -
# ### 4.2: Monitor the export process
# +
import time
start_time = time.time()
diff_time = start_time - start_time
while True and diff_time < 10 * 60:
time.sleep(3)
response = client.export_assets.get_details(export_job_id, space_id=export_space_id)
state = response[u'entity'][u'status'][u'state']
print(state)
if state == 'completed' or state == 'error' or state == 'failed':
break
diff_time = time.time() - start_time
print(json.dumps(response, indent=2))
# -
# ### 4.3: Get the exported content
# +
export_dir = 'EXPORT_DATA'
if not os.path.isdir(export_dir):
os.mkdir(export_dir)
export_file_name = 'exported_content_' + str(uuid.uuid4()) + '.zip'
export_file_path = os.path.join(export_dir, export_file_name)
details = client.export_assets.get_exported_content(export_job_id,
space_id = export_space_id,
file_path = export_file_path)
print(details)
# -
# <a id="import"></a>
# ## 5. Import
# client.import_assets has these apis. For any help on these apis, type 'help(api_name)' in your notebook
# Example: help(client.import_assets.start), help(client.import_assets.get_details)
#
# 1. client.import_assets.start: This starts the import job. import job is asynchronously executed
# 2. client.import_assets.get_details: Given import_id and corresponding space_id/project_id, this gives the import job details. Usually used for monitoring the import job submitted with start api
# 3. client.import_assets.list: Prints summary of all the import jobs
# 4. client.import_assets.delete: Deletes the given import job
# 5. client.import_assets.cancel: Cancels the given import job if running
# ### 5.1: Start the import process
# +
details = client.import_assets.start(file_path=export_file_path,
space_id=import_space_id)
print(json.dumps(details, indent=2))
import_job_id = details[u'metadata'][u'id']
# -
# ### 5.2: Monitor the import process
# +
import time
start_time = time.time()
diff_time = start_time - start_time
while True and diff_time < 10 * 60:
time.sleep(3)
response = client.import_assets.get_details(import_job_id,
space_id=import_space_id)
state = response[u'entity'][u'status'][u'state']
print(state)
if state == 'completed' or state == 'error' or state == 'failed':
break
diff_time = time.time() - start_time
print(json.dumps(response, indent=2))
client.set.default_space(import_space_id)
print("{}List of models: {}".format('\n', '\n'))
client.repository.list_models()
details = client.repository.get_model_details()
for obj in details[u'resources']:
if obj[u'metadata'][u'name'] == "External Keras model":
model_id_for_deployment = obj[u'metadata'][u'id']
print("{}model id for deployment: {}".format('\n', model_id_for_deployment))
# -
# List the import and export jobs
print("Export jobs: \n")
client.export_assets.list(space_id=export_space_id)
print("\nImport jobs:")
client.import_assets.list(space_id=import_space_id)
# <a id="scoring"></a>
# ## 6. Deploy and score the imported model
# ### 6.1: Create model deployment
# #### Create online deployment for published model
# + pycharm={"is_executing": false, "name": "#%%\n"}
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of external Keras model",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
created_deployment = client.deployments.create(model_id_for_deployment, meta_props=metadata)
# -
deployment_uid = client.deployments.get_uid(created_deployment)
# Now you can print an online scoring endpoint.
# + pycharm={"is_executing": false, "name": "#%%\n"}
scoring_endpoint = client.deployments.get_scoring_href(created_deployment)
print(scoring_endpoint)
# -
# You can also list existing deployments.
client.deployments.list()
# ### 6.2: Get deployment details
# + pycharm={"is_executing": false, "name": "#%%\n"}
details = client.deployments.get_details(deployment_uid)
print(json.dumps(details, indent=2))
# -
# ### 6.3: Score
# You can use below method to do test scoring request against deployed model.
# Let's first visualize two samples from dataset, we'll use for scoring. You must have matplotlib package installed
# %matplotlib inline
import matplotlib.pyplot as plt
for i, image in enumerate([x_test[0], x_test[1]]):
plt.subplot(2, 2, i + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
# Prepare scoring payload with records to score.
score_0 = (x_test[0].ravel() / 255).tolist()
score_1 = (x_test[1].ravel() / 255).tolist()
# + pycharm={"is_executing": false, "name": "#%%\n"}
scoring_payload = {"input_data": [{"values": [score_0, score_1]}]}
# -
# Use ``client.deployments.score()`` method to run scoring.
# + pycharm={"is_executing": false, "name": "#%%\n"}
predictions = client.deployments.score(deployment_uid, scoring_payload)
# + pycharm={"is_executing": false, "name": "#%%\n"}
print(json.dumps(predictions, indent=2))
# -
# <a id="cleanup"></a>
# ## 7. Clean up
# +
client.export_assets.delete(export_job_id, space_id=export_space_id)
client.import_assets.delete(import_job_id, space_id=import_space_id)
client.spaces.delete(export_space_id)
client.spaces.delete(import_space_id)
# -
# If you want to clean up all created assets:
# - experiments
# - trainings
# - pipelines
# - model definitions
# - models
# - functions
# - deployments
#
# please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb).
# <a id="summary"></a>
# ## 8. Summary and next steps
# You successfully completed this notebook! You learned how to use export/import assets client apis. Check out our _[Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/wsj/getting-started/welcome-main.html?context=analytics?pos=2)_ for more samples, tutorials, documentation, how-tos, and blog posts.
# ### Authors
#
# *Mithun - *<EMAIL>**, Software Engineer
# Copyright © 2020 IBM. This notebook and its source code are released under the terms of the MIT License.
| cloud/notebooks/python_sdk/instance-management/Machine Learning artifacts export and import.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_enq4fqk"
# # Build a queue using a linked list
#
# By now, you may be noticing a pattern. Earlier, we had you implement a stack using an array and a linked list. Here, we're doing the same thing with queues: In the previous notebook, you implemented a queue using an array, and in this notebook we'll implement one using a linked list.
#
# It's good to try implementing the same data structures in multiple ways. This helps you to better understand the abstract concepts behind the data structure, separate from the details of their implementation—and it also helps you develop a habit of comparing pros and cons of different implementations.
#
# With both stack and queues, we saw that trying to use arrays introduced some concerns regarding the time complexity, particularly when the initial array size isn't large enough and we need to expand the array in order to add more items.
#
# With our stack implementation, we saw that linked lists provided a way around this issue—and exactly the same thing is true with queues.
# + [markdown] graffitiCellId="id_mfgdb96"
# 
# + [markdown] graffitiCellId="id_1gfxpqm"
# <span class="graffiti-highlight graffiti-id_1gfxpqm-id_shwp6yi"><i></i><button>Walkthrough</button></span>
# + [markdown] graffitiCellId="id_12pcjsd"
# ## 1. Define a `Node` class
# Since we'll be implementing a linked list for this, we know that we'll need a `Node` class like we used earlier in this lesson.
#
# See if you can remember how to do this, and implement it in the cell below.
# + graffitiCellId="id_6myldsv"
class Node:
def __init__(self, value):
self.value = value
self.next = None
# + [markdown] graffitiCellId="id_gna1fui"
# <span class="graffiti-highlight graffiti-id_gna1fui-id_v0zlq1c"><i></i><button>Show Solution</button></span>
# + [markdown] graffitiCellId="id_hdbsl0t"
# ## 2. Create the `Queue` class and its `__init__` method
#
# In the cell below, see if you can write the `__init__` method for our `Queue` class. It will need three attributes:
# * A `head` attribute to keep track of the first node in the linked list
# * A `tail` attribute to keep track of the last node in the linked list
# * A `num_elements` attribute to keep track of how many items are in the stack
# + graffitiCellId="id_xfuidcp"
class Queue:
def __init__(self):
self.head = None
self.tail = None
self.num_elements = 0
# + [markdown] graffitiCellId="id_rf0zi6d"
# <span class="graffiti-highlight graffiti-id_rf0zi6d-id_s7hiew4"><i></i><button>Show Solution</button></span>
# + [markdown] graffitiCellId="id_ba2hbei"
# ## 3. Add the `enqueue` method
#
# In the cell below, see if you can figure out how to write the `enqueue` method.
#
# Remember, the purpose of this method is to add a new item to the back of the queue. Since we're using a linked list, this is equivalent to creating a new node and adding it to the tail of the list.
#
# Some things to keep in mind:
# * If the queue is empty, then both the `head` and `tail` should refer to the new node (because when there's only one node, this node is both the head and the tail)
# * Otherwise (if the queue has items), add the new node to the tail (i.e., to the end of the queue)
# * Be sure to shift the `tail` reference so that it refers to the new node (because it is the new tail)
# + graffitiCellId="id_o0j1vfh"
class Queue:
def __init__(self):
self.head = None
self.tail = None
self.num_elements = 0
def enqueue(self, value):
new_node = Node(value)
if self.head is None:
self.head = new_node
self.tail = self.head
else:
self.tail.next = new_node
self.tail = self.tail.next
self.num_elements += 1
# + [markdown] graffitiCellId="id_pcfy0pd"
# <span class="graffiti-highlight graffiti-id_pcfy0pd-id_3h8yswv"><i></i><button>Show Solution</button></span>
# + [markdown] graffitiCellId="id_pmyq05q"
# ## 4. Add the `size` and `is_empty` methods
#
# You've implemented these a couple of times now, and they'll work the same way here:
# * Add a `size` method that returns the current size of the stack
# * Add an `is_empty` method that returns `True` if the stack is empty and `False` otherwise
#
# We'll make use of these methods in a moment when we write the `dequeue` method.
# + graffitiCellId="id_ma7425n"
class Queue:
def __init__(self):
self.head = None
self.tail = None
self.num_elements = 0
def enqueue(self, value):
new_node = Node(value)
if self.head is None:
self.head = new_node
self.tail = self.head
else:
self.tail.next = new_node # add data to the next attribute of the tail (i.e. the end of the queue)
self.tail = self.tail.next # shift the tail (i.e., the back of the queue)
self.num_elements += 1
def size(self):
return self.num_elements
def is_empty(self):
return self.num_elements == 0
# + [markdown] graffitiCellId="id_xmkf0bu"
# <span class="graffiti-highlight graffiti-id_xmkf0bu-id_dv5h7su"><i></i><button>Show Solution</button></span>
# + [markdown] graffitiCellId="id_x0h23zd"
# ## 5. Add the `dequeue` method
#
# In the cell below, see if you can add the `deqeueue` method.
#
# Here's what it should do:
# * If the queue is empty, it should simply return `None`. Otherwise...
# * Get the value from the front of the queue (i.e., the head of the linked list)
# * Shift the `head` over so that it refers to the next node
# * Update the `num_elements` attribute
# * Return the value that was dequeued
#
# + graffitiCellId="id_2uevha2"
class Queue:
def __init__(self):
self.head = None
self.tail = None
self.num_elements = 0
def enqueue(self, value):
new_node = Node(value)
if self.head is None:
self.head = new_node
self.tail = self.head
else:
self.tail.next = new_node # add data to the next attribute of the tail (i.e. the end of the queue)
self.tail = self.tail.next # shift the tail (i.e., the back of the queue)
self.num_elements += 1
# Add the dequeue method
def dequeue(self):
if self.is_empty():
return None
value = self.head.value
self.head = self.head.next
self.num_elements -= 1
return value
def size(self):
return self.num_elements
def is_empty(self):
return self.num_elements == 0
# + [markdown] graffitiCellId="id_s4lyv17"
# <span class="graffiti-highlight graffiti-id_s4lyv17-id_n15vlij"><i></i><button>Show Solution</button></span>
# + [markdown] graffitiCellId="id_tz9b0bm"
# ## Test it!
#
# Here's some code you can use to check if your implementation works:
# + graffitiCellId="id_aljgt39"
# Setup
q = Queue()
q.enqueue(1)
q.enqueue(2)
q.enqueue(3)
# Test size
print ("Pass" if (q.size() == 3) else "Fail")
# Test dequeue
print ("Pass" if (q.dequeue() == 1) else "Fail")
# Test enqueue
q.enqueue(4)
print ("Pass" if (q.dequeue() == 2) else "Fail")
print ("Pass" if (q.dequeue() == 3) else "Fail")
print ("Pass" if (q.dequeue() == 4) else "Fail")
q.enqueue(5)
print ("Pass" if (q.size() == 1) else "Fail")
# + [markdown] graffitiCellId="id_99n1nz4"
# ## Time Complexity
#
# So what's the time complexity of adding or removing things from our queue here?
#
# Well, when we use `enqueue`, we simply create a new node and add it to the tail of the list. And when we `dequeue` an item, we simply get the value from the head of the list and then shift the `head` variable so that it refers to the next node over.
#
# Both of these operations happen in constant time—that is, they have a time-complexity of O(1).
| Data Structures/Stacks and Queues/Build a queue using a linked list.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Neo4j Map of SP
from py2neo import Graph, Node, Relationship
# Start your neo4j graph server
# - port 7687 is where the data stuff goes through
# - port 7474 is what you'll use to view the graph (localhost:7474)
#
# ### Note: The password is configured on your own but you can usethe same password (<PASSWORD>) because why not
graph = Graph("bolt://localhost:7687", auth = ("neo4j",
"ILikeYuri"))
from graph_nodes import graph_nodes, relation_nodes
def ResetNeo():
graph.run('''
MATCH (n)
OPTIONAL MATCH (n)-[r]-()
WITH n,r LIMIT 50000
DELETE n,r
RETURN count(n) as deletedNodesCount
''')
ResetNeo()
for node_type in graph_nodes:
for node_name, node in node_type.items():
graph.create(node)
for rel_type in relation_nodes:
for rel in rel_type:
graph.create(rel)
def get_node(name: str):
query = f"match (n) where n.name = '{name}' return n"
node = graph.evaluate(query)
return node
# Get the label of that node
def get_node_label(node: Node):
label = str(node.labels).replace(':', '')
return label
# returns list of all paths (as tuple of relationship objects)
def get_paths(node_a_name: str, node_b_name: str):
node_a = get_node(node_a_name)
node_a_label = get_node_label(node_a)
node_b = get_node(node_b_name)
node_b_label = get_node_label(node_b)
if node_a == None or node_b == None:
print("Node not found.")
param_a = '{' + f"name: '{node_a_name}'" + '}'
param_b = '{' + f"name: '{node_b_name}'" + '}'
query_string = f"match p=(:{node_a_label}{param_a})-[*]-(:{node_b_label}{param_b}) return p"
path_list = []
path_obj_list = [] # contains real path objects
result = graph.run(query_string).data()
for path in result:
for x, y in path.items():
# print(type(y), y)
path_list.append(y.relationships)
path_obj_list.append(y)
return path_list, path_obj_list
def get_path_dist(path: tuple):
total_dist = 0
for rel in path:
dist = rel['distance']
total_dist += dist
return total_dist
def get_shortest_path_dist(path_list: list, real_paths_list):
all_path_dist = []
for path_tuple in path_list:
# print(path_tuple)
all_path_dist.append(get_path_dist(path_tuple))
# all_path_dist.append(7)
# print(all_path_dist)
shortest_path_ind = all_path_dist.index(min(all_path_dist))
return real_paths_list[shortest_path_ind]
def walk_path(path):
count = 0
total_dist = 0
path_size = len(path.nodes) + len(path.relationships)
while count < path_size:
try:
node_name = path.nodes[count]['name']
if count == len(path.nodes) - 1:
print(f"You have reached {node_name} :)")
return
print(f"At <{node_name}>,")
print(path.relationships[count]['description'])
path_dist = path.relationships[count]['distance']
if path_dist != 0:
print(f"This path is around {path_dist} metres")
#print(path.nodes[count]['name'])
#print(path.relationships[count]['distance'])
total_dist += path.relationships[count]['distance']
print()
count += 1
except IndexError:
print(f"Total Distance: {total_dist}")
return
# main function
def walk_shortest_path(node_a_name: str, node_b_name: str):
print(f"Fetching directions from <{node_a_name}> to <{node_b_name}>\n")
paths, real_paths_list = get_paths(node_a_name, node_b_name)
shortest_path = get_shortest_path_dist(paths, real_paths_list)
walk_path(shortest_path)
walk_shortest_path('T19', "Hilltop Library")
# ### Getting Nodes
def get_building(name: str):
b = get_node(name)
print(f"Building: {name}, School: {b['school']}")
print(b['description'])
get_building("T19")
def get_facility(name: str):
f = get_node(name)
print(f"Facility: {name}, Type: {f['type']}")
print(f['description'])
get_facility("Hilltop Library")
# ### Get relations
def get_all_node_rels(node_name: str, rel_name: str):
params = '{' + f"name: '{node_name}'" + '}'
query = f"match (:building{params})-[r]-() return r"
# print(query)
results = graph.run(query).data()
lots_available = 0
filtered_rels = []
for r in results:
r = r['r']
#print(r.type)
#print(type(r))
rel_label = str(type(r)).split('.')[-1].replace("'>", '')
if rel_label == rel_name:
filtered_rels.append(r)
return filtered_rels
def get_lots_available(parking_lot_name: str):
lots = get_all_node_rels(parking_lot_name, "HAS_PARKING_LOT")
return f"Available lots: {len(lots)}"
get_lots_available("Parking lot")
# +
# def get_relations(rel_name: str):
# query = f"match ()-[r:{rel_name.upper()}]-() return r"
# result = graph.run(query).data()
# print(result)
# print(type(result) + '\n')
# +
# get_relations("walk")
# -
# +
# t19_node = Node("Building", name = 'T19', heck = 'yeet')
# +
# graph.create(t19_node)
# +
# fc6 = Node("FoodCourt", name = "Food Court 6", meh = "heck")
# +
# graph.create(fc6)
# +
# connected_to_rel = Relationship(t19_node, "Connected_to", fc6, distance = 7, descrip = "VER LONG")
# graph.create(connected_to_rel)
| neo4j/.ipynb_checkpoints/smartpoly_neo4j_graph-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.0 64-bit (''tf_2.4'': conda)'
# name: python3
# ---
# + [markdown] id="mdPl3kwuD2AM"
# # Imports
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1628842224033, "user": {"displayName": "<NAME>\u00e9", "photoUrl": "", "userId": "15597030600762340349"}, "user_tz": -120} id="wfcHq8nfBkL4" outputId="93885359-79ef-4c85-e17f-b21337cae9ef"
from configuration.paths import *
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import models, layers
from src.datasets.pneumonia_detection_challenge import PneumoniaDetectionChallenge
from src.utils.schemes import Scheme
from src.utils.image import Image
# + [markdown] id="ohPUFDesDo2H"
# # Dataset loading for training
# -
IMAGE_SIZE = (256, 256)
pdc = PneumoniaDetectionChallenge(DATASET_PNEUMONIA_DETECTION_CHALLENGE_PATH, IMAGE_SIZE)
Scheme.dataset_info(pdc)
# +
x_train, y_train, _ = pdc.load_train_data()
x_train = x_train / 255.0
x_val, y_val, _ = pdc.load_val_data()
x_val = x_val / 255.0
# -
Scheme.labeled_images(x_train, y_train)
# # Mdel definition
# +
model = models.Sequential()
model.add(layers.Conv2D(filters=8, activation='relu', kernel_size=3, padding='same', input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3)))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=8, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=16, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=32, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.summary()
# +
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
# -
model.compile(optimizer='adam',
loss="binary_crossentropy",
metrics=['binary_accuracy',
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall')])
# # Model training
history = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
epochs=35)
Scheme.training_graphs(history)
del x_train
del y_train
# # Evaluating the model
x_test, y_test, images = pdc.load_test_data()
x_test = x_test / 255.0
test_loss, test_accuracy, _, _ = model.evaluate(x_test, y_test)
images = [x_test[6], x_test[15], x_test[18], x_test[20], x_test[25],
x_test[9], x_test[10], x_test[11], x_test[12], x_test[13]]
labels = [1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
images = Image.explainer(images, model, IMAGE_SIZE)
Scheme.labeled_images(images, labels)
predictions = model.predict(x_test)
predictions_rounded = np.round(predictions).astype(int)
Scheme.confusion_matrix(predictions_rounded, pdc.get_test_df().diagnosis.to_numpy())
| notebooks/lab1/stage1/pdc_1_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cdod]
# language: python
# name: conda-env-cdod-py
# ---
# # AEGMM and VAEGMM outlier detection on KDD Cup '99 dataset
#
# ## Method
#
# The *AEGMM* method follows the [Deep Autoencoding Gaussian Mixture Model for Unsupervised Anomaly Detection](https://openreview.net/forum?id=BJJLHbb0-) ICLR 2018 paper. The encoder compresses the data while the reconstructed instances generated by the decoder are used to create additional features based on the reconstruction error between the input and the reconstructions. These features are combined with encodings and fed into a Gaussian Mixture Model (GMM). Training of the *AEGMM* model is unsupervised on *normal* (inlier) data. The sample energy of the GMM can then be used to determine whether an instance is an outlier (*high sample energy*) or not (*low sample energy*). *VAEGMM* on the other hand uses a [variational autoencoder](https://arxiv.org/abs/1312.6114) instead of a plain autoencoder.
#
# ## Dataset
#
# The outlier detector needs to detect computer network intrusions using TCP dump data for a local-area network (LAN) simulating a typical U.S. Air Force LAN. A connection is a sequence of TCP packets starting and ending at some well defined times, between which data flows to and from a source IP address to a target IP address under some well defined protocol. Each connection is labeled as either normal, or as an attack.
#
# There are 4 types of attacks in the dataset:
#
# - DOS: denial-of-service, e.g. syn flood;
# - R2L: unauthorized access from a remote machine, e.g. guessing password;
# - U2R: unauthorized access to local superuser (root) privileges;
# - probing: surveillance and other probing, e.g., port scanning.
#
# The dataset contains about 5 million connection records.
#
# There are 3 types of features:
#
# - basic features of individual connections, e.g. duration of connection
# - content features within a connection, e.g. number of failed log in attempts
# - traffic features within a 2 second window, e.g. number of connections to the same host as the current connection
# +
import logging
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix, f1_score
import tensorflow as tf
tf.keras.backend.clear_session()
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.datasets import fetch_kdd
from alibi_detect.models.autoencoder import eucl_cosim_features
from alibi_detect.od import OutlierAEGMM, OutlierVAEGMM
from alibi_detect.utils.data import create_outlier_batch
from alibi_detect.utils.fetching import fetch_detector
from alibi_detect.utils.saving import save_detector, load_detector
from alibi_detect.utils.visualize import plot_instance_score, plot_feature_outlier_tabular, plot_roc
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# -
# ## Load dataset
#
# We only keep a number of continuous (18 out of 41) features.
kddcup = fetch_kdd(percent10=True) # only load 10% of the dataset
print(kddcup.data.shape, kddcup.target.shape)
# Assume that a model is trained on *normal* instances of the dataset (not outliers) and standardization is applied:
np.random.seed(0)
normal_batch = create_outlier_batch(kddcup.data, kddcup.target, n_samples=400000, perc_outlier=0)
X_train, y_train = normal_batch.data.astype('float32'), normal_batch.target
print(X_train.shape, y_train.shape)
print('{}% outliers'.format(100 * y_train.mean()))
mean, stdev = X_train.mean(axis=0), X_train.std(axis=0)
# Apply standardization:
X_train = (X_train - mean) / stdev
# ## Load or define AEGMM outlier detector
#
# The pretrained outlier and adversarial detectors used in the example notebooks can be found [here](https://console.cloud.google.com/storage/browser/seldon-models/alibi-detect). You can use the built-in ```fetch_detector``` function which saves the pre-trained models in a local directory ```filepath``` and loads the detector. Alternatively, you can train a detector from scratch:
load_outlier_detector = True
filepath = 'my_path' # change to directory (absolute path) where model is downloaded
if load_outlier_detector: # load pretrained outlier detector
detector_type = 'outlier'
dataset = 'kddcup'
detector_name = 'OutlierAEGMM'
od = fetch_detector(filepath, detector_type, dataset, detector_name)
filepath = os.path.join(filepath, detector_name)
else: # define model, initialize, train and save outlier detector
# the model defined here is similar to the one defined in the original paper
n_features = X_train.shape[1]
latent_dim = 1
n_gmm = 2 # nb of components in GMM
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(60, activation=tf.nn.tanh),
Dense(30, activation=tf.nn.tanh),
Dense(10, activation=tf.nn.tanh),
Dense(latent_dim, activation=None)
])
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(10, activation=tf.nn.tanh),
Dense(30, activation=tf.nn.tanh),
Dense(60, activation=tf.nn.tanh),
Dense(n_features, activation=None)
])
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.tanh),
Dense(n_gmm, activation=tf.nn.softmax)
])
# initialize outlier detector
od = OutlierAEGMM(threshold=None, # threshold for outlier score
encoder_net=encoder_net, # can also pass AEGMM model instead
decoder_net=decoder_net, # of separate encoder, decoder
gmm_density_net=gmm_density_net, # and gmm density net
n_gmm=n_gmm,
recon_features=eucl_cosim_features) # fn used to derive features
# from the reconstructed
# instances based on cosine
# similarity and Eucl distance
# train
od.fit(X_train,
epochs=50,
batch_size=1024,
save_path=filepath,
verbose=True)
# save the trained outlier detector
save_detector(od, filepath)
# The warning tells us we still need to set the outlier threshold. This can be done with the `infer_threshold` method. We need to pass a batch of instances and specify what percentage of those we consider to be normal via `threshold_perc`. Let's assume we have some data which we know contains around 5% outliers. The percentage of outliers can be set with `perc_outlier` in the `create_outlier_batch` function.
np.random.seed(0)
perc_outlier = 5
threshold_batch = create_outlier_batch(kddcup.data, kddcup.target, n_samples=1000, perc_outlier=perc_outlier)
X_threshold, y_threshold = threshold_batch.data.astype('float32'), threshold_batch.target
X_threshold = (X_threshold - mean) / stdev
print('{}% outliers'.format(100 * y_threshold.mean()))
od.infer_threshold(X_threshold, threshold_perc=100-perc_outlier)
print('New threshold: {}'.format(od.threshold))
# Save outlier detector with updated threshold:
save_detector(od, filepath)
# ## Detect outliers
#
# We now generate a batch of data with 10% outliers and detect the outliers in the batch.
np.random.seed(1)
outlier_batch = create_outlier_batch(kddcup.data, kddcup.target, n_samples=1000, perc_outlier=10)
X_outlier, y_outlier = outlier_batch.data.astype('float32'), outlier_batch.target
X_outlier = (X_outlier - mean) / stdev
print(X_outlier.shape, y_outlier.shape)
print('{}% outliers'.format(100 * y_outlier.mean()))
# Predict outliers:
od_preds = od.predict(X_outlier, return_instance_score=True)
# ## Display results
# F1 score and confusion matrix:
labels = outlier_batch.target_names
y_pred = od_preds['data']['is_outlier']
f1 = f1_score(y_outlier, y_pred)
print('F1 score: {:.4f}'.format(f1))
cm = confusion_matrix(y_outlier, y_pred)
df_cm = pd.DataFrame(cm, index=labels, columns=labels)
sns.heatmap(df_cm, annot=True, cbar=True, linewidths=.5)
plt.show()
# Plot instance level outlier scores vs. the outlier threshold:
plot_instance_score(od_preds, y_outlier, labels, od.threshold, ylim=(None, None))
# We can also plot the ROC curve for the outlier scores of the detector:
roc_data = {'AEGMM': {'scores': od_preds['data']['instance_score'], 'labels': y_outlier}}
plot_roc(roc_data)
# ## Investigate results
# We can visualize the encodings of the instances in the latent space and the features derived from the instance reconstructions by the decoder. The encodings and features are then fed into the GMM density network.
enc = od.aegmm.encoder(X_outlier) # encoding
X_recon = od.aegmm.decoder(enc) # reconstructed instances
recon_features = od.aegmm.recon_features(X_outlier, X_recon) # reconstructed features
# +
df = pd.DataFrame(dict(enc=enc[:, 0].numpy(),
cos=recon_features[:, 0].numpy(),
eucl=recon_features[:, 1].numpy(),
label=y_outlier))
groups = df.groupby('label')
fig, ax = plt.subplots()
for name, group in groups:
ax.plot(group.enc, group.cos, marker='o',
linestyle='', ms=6, label=labels[name])
plt.title('Encoding vs. Cosine Similarity')
plt.xlabel('Encoding')
plt.ylabel('Cosine Similarity')
ax.legend()
plt.show()
# -
fig, ax = plt.subplots()
for name, group in groups:
ax.plot(group.enc, group.eucl, marker='o',
linestyle='', ms=6, label=labels[name])
plt.title('Encoding vs. Relative Euclidean Distance')
plt.xlabel('Encoding')
plt.ylabel('Relative Euclidean Distance')
ax.legend()
plt.show()
# A lot of the outliers are already separated well in the latent space.
# ## Use VAEGMM outlier detector
#
# We can again instantiate the pretrained VAEGMM detector from the [Google Cloud Bucket](https://console.cloud.google.com/storage/browser/seldon-models/alibi-detect). You can use the built-in ```fetch_detector``` function which saves the pre-trained models in a local directory ```filepath``` and loads the detector. Alternatively, you can train a detector from scratch:
load_outlier_detector = True
filepath = 'my_path' # change to directory (absolute path) where model is downloaded
if load_outlier_detector: # load pretrained outlier detector
detector_type = 'outlier'
dataset = 'kddcup'
detector_name = 'OutlierVAEGMM'
od = fetch_detector(filepath, detector_type, dataset, detector_name)
filepath = os.path.join(filepath, detector_name)
else: # define model, initialize, train and save outlier detector
# the model defined here is similar to the one defined in
# the OutlierVAE notebook
n_features = X_train.shape[1]
latent_dim = 2
n_gmm = 2
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(20, activation=tf.nn.relu),
Dense(15, activation=tf.nn.relu),
Dense(7, activation=tf.nn.relu)
])
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(7, activation=tf.nn.relu),
Dense(15, activation=tf.nn.relu),
Dense(20, activation=tf.nn.relu),
Dense(n_features, activation=None)
])
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
])
# initialize outlier detector
od = OutlierVAEGMM(threshold=None,
encoder_net=encoder_net,
decoder_net=decoder_net,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
latent_dim=latent_dim,
samples=10,
recon_features=eucl_cosim_features)
# train
od.fit(X_train,
epochs=50,
batch_size=1024,
cov_elbo=dict(sim=.0025), # standard deviation assumption
verbose=True) # for elbo training
# save the trained outlier detector
save_detector(od, filepath)
# Need to infer the threshold again:
od.infer_threshold(X_threshold, threshold_perc=100-perc_outlier)
print('New threshold: {}'.format(od.threshold))
# Save outlier detector with updated threshold:
save_detector(od, filepath)
# ## Detect outliers and display results
#
# Predict:
od_preds = od.predict(X_outlier, return_instance_score=True)
# F1 score and confusion matrix:
labels = outlier_batch.target_names
y_pred = od_preds['data']['is_outlier']
f1 = f1_score(y_outlier, y_pred)
print('F1 score: {:.4f}'.format(f1))
cm = confusion_matrix(y_outlier, y_pred)
df_cm = pd.DataFrame(cm, index=labels, columns=labels)
sns.heatmap(df_cm, annot=True, cbar=True, linewidths=.5)
plt.show()
# Plot instance level outlier scores vs. the outlier threshold:
plot_instance_score(od_preds, y_outlier, labels, od.threshold, ylim=(None, None))
# You can zoom in by adjusting the min and max values in `ylim`. We can also compare the VAEGMM ROC curve with AEGMM:
roc_data['VAEGMM'] = {'scores': od_preds['data']['instance_score'], 'labels': y_outlier}
plot_roc(roc_data)
| examples/od_aegmm_kddcup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Tf2_Py37
# language: python
# name: tf2_py37
# ---
# + [markdown] papermill={"duration": 0.037402, "end_time": "2020-10-01T01:21:25.713319", "exception": false, "start_time": "2020-10-01T01:21:25.675917", "status": "completed"} tags=[]
# # Python4DS Quick Review - 7
# Based on:
# - Kaggle Python Course, and
# - Free book: [A Whirlwind Tour of Python by <NAME> (O’Reilly)](https://jakevdp.github.io/WhirlwindTourOfPython/). Copyright 2016 O’Reilly Media, Inc., 978-1-491-96465-1.
#
# In this lesson, I'll be talking about **imports** in Python, giving some tips for working with unfamiliar libraries (and the objects they return), digging into the guts of Python just a bit to talk about **operator overloading**, and a quick preview of main libraries used with Data Science.
# + [markdown] papermill={"duration": 0.035485, "end_time": "2020-10-01T01:21:25.786614", "exception": false, "start_time": "2020-10-01T01:21:25.751129", "status": "completed"} tags=[]
# # Imports
#
# So far we've talked about types and functions which are built-in to the language.
#
# But one of the best things about Python (especially if you're a data scientist) is the vast number of high-quality custom libraries that have been written for it.
#
# Some of these libraries are in the "standard library", meaning you can find them anywhere you run Python. Others libraries can be easily added, even if they aren't always shipped with Python.
# -
# ## Importing from Third-Party Modules
#
# One of the things that makes Python useful, especially within the world of data science, is its ecosystem of third-party modules.
# These can be imported just as the built-in modules, but first the modules must be installed on your system.
# The standard registry for such modules is the Python Package Index (*PyPI* for short), found on the Web at http://pypi.python.org/.
# For convenience, Python comes with a program called ``pip`` (a recursive acronym meaning "pip installs packages"), which will automatically fetch packages released and listed on PyPI. For example, if you'd like to install the ``numpy``, all that is required is to type the following at the command line in your Terminal:
# - `pip install numpy`
#
# The source code for the package will be automatically downloaded from the PyPI repository, and the package installed in the standard Python path (assuming you have permission to do so on the computer you're using).
#
# For more information about PyPI and the ``pip`` installer, refer to the documentation at http://pypi.python.org/.
# ## Importing from Python's Standard Library
#
# Python's standard library contains many useful built-in modules, which you can read about fully in [Python's documentation](https://docs.python.org/3/library/).
# Any of these can be imported with the ``import`` statement, and then explored using the help function seen in the previous section.
# Here is an extremely incomplete list of some of the modules you might wish to explore and learn about:
#
# - ``os`` and ``sys``: Tools for interfacing with the operating system, including navigating file directory structures and executing shell commands
# - ``math`` and ``cmath``: Mathematical functions and operations on real and complex numbers
# - ``itertools``: Tools for constructing and interacting with iterators and generators
# - ``functools``: Tools that assist with functional programming
# - ``random``: Tools for generating pseudorandom numbers
# - ``pickle``: Tools for object persistence: saving objects to and loading objects from disk
# - ``json`` and ``csv``: Tools for reading JSON-formatted and CSV-formatted files.
# - ``urllib``: Tools for doing HTTP and other web requests.
#
# You can find information on these, and many more, in the Python standard library documentation: https://docs.python.org/3/library/.
# + [markdown] papermill={"duration": 0.035485, "end_time": "2020-10-01T01:21:25.786614", "exception": false, "start_time": "2020-10-01T01:21:25.751129", "status": "completed"} tags=[]
# Either way, we'll access this code with **imports**.
#
# We'll start our example by importing `math` from the standard library.
# + papermill={"duration": 0.052163, "end_time": "2020-10-01T01:21:25.874838", "exception": false, "start_time": "2020-10-01T01:21:25.822675", "status": "completed"} tags=[]
import math
print("It's math! It has type {}".format(type(math)))
# + [markdown] papermill={"duration": 0.03646, "end_time": "2020-10-01T01:21:25.952390", "exception": false, "start_time": "2020-10-01T01:21:25.915930", "status": "completed"} tags=[]
# `math` is a module. A module is just a collection of variables (a *namespace*, if you like) defined by someone else. We can see all the names in `math` using the built-in function `dir()`.
# + papermill={"duration": 0.04715, "end_time": "2020-10-01T01:21:26.036206", "exception": false, "start_time": "2020-10-01T01:21:25.989056", "status": "completed"} tags=[]
print(dir(math))
# + [markdown] papermill={"duration": 0.037389, "end_time": "2020-10-01T01:21:26.112542", "exception": false, "start_time": "2020-10-01T01:21:26.075153", "status": "completed"} tags=[]
# We can access these variables using dot syntax. Some of them refer to simple values, like `math.pi`:
# + papermill={"duration": 0.046929, "end_time": "2020-10-01T01:21:26.197754", "exception": false, "start_time": "2020-10-01T01:21:26.150825", "status": "completed"} tags=[]
print("pi to 4 significant digits = {:.4}".format(math.pi))
# + [markdown] papermill={"duration": 0.037766, "end_time": "2020-10-01T01:21:26.274493", "exception": false, "start_time": "2020-10-01T01:21:26.236727", "status": "completed"} tags=[]
# But most of what we'll find in the module are functions, like `math.log`:
# + papermill={"duration": 0.050159, "end_time": "2020-10-01T01:21:26.362552", "exception": false, "start_time": "2020-10-01T01:21:26.312393", "status": "completed"} tags=[]
math.log(32, 2)
# + [markdown] papermill={"duration": 0.038349, "end_time": "2020-10-01T01:21:26.439884", "exception": false, "start_time": "2020-10-01T01:21:26.401535", "status": "completed"} tags=[]
# Of course, if we don't know what `math.log` does, we can call `help()` on it:
# + papermill={"duration": 0.048612, "end_time": "2020-10-01T01:21:26.528002", "exception": false, "start_time": "2020-10-01T01:21:26.479390", "status": "completed"} tags=[]
help(math.log)
# + [markdown] papermill={"duration": 0.038698, "end_time": "2020-10-01T01:21:26.606028", "exception": false, "start_time": "2020-10-01T01:21:26.567330", "status": "completed"} tags=[]
# We can also call `help()` on the module itself. This will give us the combined documentation for *all* the functions and values in the module (as well as a high-level description of the module). Click the "output" button to see the whole `math` help page.
# + _kg_hide-output=true papermill={"duration": 0.068024, "end_time": "2020-10-01T01:21:26.713177", "exception": false, "start_time": "2020-10-01T01:21:26.645153", "status": "completed"} tags=[]
help(math)
# + [markdown] papermill={"duration": 0.039339, "end_time": "2020-10-01T01:21:26.792508", "exception": false, "start_time": "2020-10-01T01:21:26.753169", "status": "completed"} tags=[]
# ### Other import syntax
#
# If we know we'll be using functions in `math` frequently we can import it under a shorter alias to save some typing (though in this case "math" is already pretty short).
# + papermill={"duration": 0.049948, "end_time": "2020-10-01T01:21:26.881982", "exception": false, "start_time": "2020-10-01T01:21:26.832034", "status": "completed"} tags=[]
import math as mt
mt.pi
# + [markdown] papermill={"duration": 0.03959, "end_time": "2020-10-01T01:21:26.961828", "exception": false, "start_time": "2020-10-01T01:21:26.922238", "status": "completed"} tags=[]
# > You may have seen code that does this with certain popular libraries like Pandas, Numpy, Tensorflow, or Matplotlib. For example, it's a common convention to `import numpy as np` and `import pandas as pd`.
# + [markdown] papermill={"duration": 0.039467, "end_time": "2020-10-01T01:21:27.041227", "exception": false, "start_time": "2020-10-01T01:21:27.001760", "status": "completed"} tags=[]
# The `as` simply renames the imported module. It's equivalent to doing something like:
# + papermill={"duration": 0.048029, "end_time": "2020-10-01T01:21:27.129754", "exception": false, "start_time": "2020-10-01T01:21:27.081725", "status": "completed"} tags=[]
import math
mt = math
# + [markdown] papermill={"duration": 0.039729, "end_time": "2020-10-01T01:21:27.209417", "exception": false, "start_time": "2020-10-01T01:21:27.169688", "status": "completed"} tags=[]
# Wouldn't it be great if we could refer to all the variables in the `math` module by themselves? i.e. if we could just refer to `pi` instead of `math.pi` or `mt.pi`? Good news: we can do that.
# + papermill={"duration": 0.049938, "end_time": "2020-10-01T01:21:27.299168", "exception": false, "start_time": "2020-10-01T01:21:27.249230", "status": "completed"} tags=[]
from math import *
print(pi, log(32, 2))
# + [markdown] papermill={"duration": 0.039764, "end_time": "2020-10-01T01:21:27.379227", "exception": false, "start_time": "2020-10-01T01:21:27.339463", "status": "completed"} tags=[]
# `import *` makes all the module's variables directly accessible to you (without any dotted prefix).
#
# Bad news: some purists might grumble at you for doing this.
#
# Worse: they kind of have a point.
# + papermill={"duration": 0.087743, "end_time": "2020-10-01T01:21:27.507460", "exception": false, "start_time": "2020-10-01T01:21:27.419717", "status": "completed"} tags=["raises-exception"]
from math import *
from numpy import *
print(pi, log(32, 2))
# + [markdown] papermill={"duration": 0.040622, "end_time": "2020-10-01T01:21:27.589061", "exception": false, "start_time": "2020-10-01T01:21:27.548439", "status": "completed"} tags=[]
# What the what? But it worked before!
#
# These kinds of "star imports" can occasionally lead to weird, difficult-to-debug situations.
#
# The problem in this case is that the `math` and `numpy` modules both have functions called `log`, but they have different semantics. Because we import from `numpy` second, its `log` overwrites (or "shadows") the `log` variable we imported from `math`.
#
# A good compromise is to import only the specific things we'll need from each module:
# + papermill={"duration": 0.04982, "end_time": "2020-10-01T01:21:27.679612", "exception": false, "start_time": "2020-10-01T01:21:27.629792", "status": "completed"} tags=[]
from math import log, pi
from numpy import asarray
# + [markdown] papermill={"duration": 0.040595, "end_time": "2020-10-01T01:21:27.762410", "exception": false, "start_time": "2020-10-01T01:21:27.721815", "status": "completed"} tags=[]
# ### Submodules
#
# We've seen that modules contain variables which can refer to functions or values. Something to be aware of is that they can also have variables referring to *other modules*.
# + papermill={"duration": 0.051131, "end_time": "2020-10-01T01:21:27.854529", "exception": false, "start_time": "2020-10-01T01:21:27.803398", "status": "completed"} tags=[]
import numpy
print("numpy.random is a", type(numpy.random))
print("it contains names such as...",
dir(numpy.random)[-15:]
)
# + [markdown] papermill={"duration": 0.040868, "end_time": "2020-10-01T01:21:27.936932", "exception": false, "start_time": "2020-10-01T01:21:27.896064", "status": "completed"} tags=[]
# So if we import `numpy` as above, then calling a function in the `random` "submodule" will require *two* dots.
# + papermill={"duration": 0.054455, "end_time": "2020-10-01T01:21:28.032648", "exception": false, "start_time": "2020-10-01T01:21:27.978193", "status": "completed"} tags=[]
# Roll 10 dice
rolls = numpy.random.randint(low=1, high=6, size=10)
rolls
# + [markdown] papermill={"duration": 0.043149, "end_time": "2020-10-01T01:21:28.119071", "exception": false, "start_time": "2020-10-01T01:21:28.075922", "status": "completed"} tags=[]
# Oh the places you'll go, oh the objects you'll see
#
# So after 6 lessons, you're a pro with ints, floats, bools, lists, strings, and dicts (right?).
#
# Even if that were true, it doesn't end there. As you work with various libraries for specialized tasks, you'll find that they define their own types which you'll have to learn to work with. For example, if you work with the graphing library `matplotlib`, you'll be coming into contact with objects it defines which represent Subplots, Figures, TickMarks, and Annotations. `pandas` functions will give you DataFrames and Series.
#
# In this section, I want to share with you a quick survival guide for working with strange types.
#
# ### Three tools for understanding strange objects
#
# In the cell above, we saw that calling a `numpy` function gave us an "array". We've never seen anything like this before (not in this course anyways). But don't panic: we have three familiar builtin functions to help us here.
#
# **1: `type()`** (what is this thing?)
# + papermill={"duration": 0.052896, "end_time": "2020-10-01T01:21:28.216005", "exception": false, "start_time": "2020-10-01T01:21:28.163109", "status": "completed"} tags=[]
type(rolls)
# + [markdown] papermill={"duration": 0.042053, "end_time": "2020-10-01T01:21:28.300591", "exception": false, "start_time": "2020-10-01T01:21:28.258538", "status": "completed"} tags=[]
# **2: `dir()`** (what can I do with it?)
# + papermill={"duration": 0.051774, "end_time": "2020-10-01T01:21:28.394789", "exception": false, "start_time": "2020-10-01T01:21:28.343015", "status": "completed"} tags=[]
print(dir(rolls))
# + papermill={"duration": 0.054307, "end_time": "2020-10-01T01:21:28.492224", "exception": false, "start_time": "2020-10-01T01:21:28.437917", "status": "completed"} tags=[]
# What am I trying to do with this dice roll data? Maybe I want the average roll, in which case the "mean"
# method looks promising...
rolls.mean()
# + papermill={"duration": 0.053466, "end_time": "2020-10-01T01:21:28.589190", "exception": false, "start_time": "2020-10-01T01:21:28.535724", "status": "completed"} tags=[]
# Or maybe I just want to get back on familiar ground, in which case I might want to check out "tolist"
rolls.tolist()
# + [markdown] papermill={"duration": 0.043518, "end_time": "2020-10-01T01:21:28.677369", "exception": false, "start_time": "2020-10-01T01:21:28.633851", "status": "completed"} tags=[]
# **3: `help()`** (tell me more)
# + papermill={"duration": 0.052782, "end_time": "2020-10-01T01:21:28.773907", "exception": false, "start_time": "2020-10-01T01:21:28.721125", "status": "completed"} tags=[]
# That "ravel" attribute sounds interesting. I'm a big classical music fan.
help(rolls.ravel)
# + _kg_hide-output=true papermill={"duration": 0.098031, "end_time": "2020-10-01T01:21:28.916201", "exception": false, "start_time": "2020-10-01T01:21:28.818170", "status": "completed"} tags=[]
# Okay, just tell me everything there is to know about numpy.ndarray
# (Click the "output" button to see the novel-length output)
help(rolls)
# + [markdown] papermill={"duration": 0.047984, "end_time": "2020-10-01T01:21:29.016272", "exception": false, "start_time": "2020-10-01T01:21:28.968288", "status": "completed"} tags=[]
# (Of course, you might also prefer to check out [the online docs](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.ndarray.html))
# + [markdown] papermill={"duration": 0.048265, "end_time": "2020-10-01T01:21:29.114534", "exception": false, "start_time": "2020-10-01T01:21:29.066269", "status": "completed"} tags=[]
# ### Operator overloading
#
# What's the value of the below expression?
# + papermill={"duration": 0.067014, "end_time": "2020-10-01T01:21:29.233003", "exception": false, "start_time": "2020-10-01T01:21:29.165989", "status": "completed"} tags=["raises-exception"]
[3, 4, 1, 2, 2, 1] + 10
# + [markdown] papermill={"duration": 0.048799, "end_time": "2020-10-01T01:21:29.332154", "exception": false, "start_time": "2020-10-01T01:21:29.283355", "status": "completed"} tags=[]
# What a silly question. Of course it's an error.
#
# But what about...
# + papermill={"duration": 0.061793, "end_time": "2020-10-01T01:21:29.443122", "exception": false, "start_time": "2020-10-01T01:21:29.381329", "status": "completed"} tags=[]
rolls + 10
# + [markdown] papermill={"duration": 0.049692, "end_time": "2020-10-01T01:21:29.544381", "exception": false, "start_time": "2020-10-01T01:21:29.494689", "status": "completed"} tags=[]
# We might think that Python strictly polices how pieces of its core syntax behave such as `+`, `<`, `in`, `==`, or square brackets for indexing and slicing. But in fact, it takes a very hands-off approach. When you define a new type, you can choose how addition works for it, or what it means for an object of that type to be equal to something else.
#
# The designers of lists decided that adding them to numbers wasn't allowed. The designers of `numpy` arrays went a different way (adding the number to each element of the array).
#
# Here are a few more examples of how `numpy` arrays interact unexpectedly with Python operators (or at least differently from lists).
# + papermill={"duration": 0.060286, "end_time": "2020-10-01T01:21:29.654522", "exception": false, "start_time": "2020-10-01T01:21:29.594236", "status": "completed"} tags=[]
# At which indices are the dice less than or equal to 3?
rolls <= 3
# + papermill={"duration": 0.059574, "end_time": "2020-10-01T01:21:29.763696", "exception": false, "start_time": "2020-10-01T01:21:29.704122", "status": "completed"} tags=[]
xlist = [[1,2,3],[2,4,6],]
# Create a 2-dimensional array
x = numpy.asarray(xlist)
print("xlist = {}\nx =\n{}".format(xlist, x))
# + papermill={"duration": 0.061778, "end_time": "2020-10-01T01:21:29.874998", "exception": false, "start_time": "2020-10-01T01:21:29.813220", "status": "completed"} tags=[]
# Get the last element of the second row of our numpy array
x[1,-1]
# + papermill={"duration": 0.069748, "end_time": "2020-10-01T01:21:29.995747", "exception": false, "start_time": "2020-10-01T01:21:29.925999", "status": "completed"} tags=["raises-exception"]
# Get the last element of the second sublist of our nested list?
xlist[1,-1]
# + [markdown] papermill={"duration": 0.061051, "end_time": "2020-10-01T01:21:30.108353", "exception": false, "start_time": "2020-10-01T01:21:30.047302", "status": "completed"} tags=[]
# numpy's `ndarray` type is specialized for working with multi-dimensional data, so it defines its own logic for indexing, allowing us to index by a tuple to specify the index at each dimension.
# + [markdown] papermill={"duration": 0.061051, "end_time": "2020-10-01T01:21:30.108353", "exception": false, "start_time": "2020-10-01T01:21:30.047302", "status": "completed"} tags=[]
# **When does 1 + 1 not equal 2?**
#
# Things can get weirder than this. You may have heard of (or even used) tensorflow, a Python library popularly used for deep learning. It makes extensive use of operator overloading. You will cover it later when studyng Deep Learning.
# + [markdown] papermill={"duration": 0.051601, "end_time": "2020-10-01T01:21:37.238557", "exception": false, "start_time": "2020-10-01T01:21:37.186956", "status": "completed"} tags=[]
# It's important just to be aware of the fact that this sort of thing is possible and that libraries will often use operator overloading in non-obvious or magical-seeming ways.
#
# Understanding how Python's operators work when applied to ints, strings, and lists is no guarantee that you'll be able to immediately understand what they do when applied to a tensorflow `Tensor`, or a numpy `ndarray`, or a pandas `DataFrame`.
#
# Once you've had a little taste of DataFrames, for example, an expression like the one below starts to look appealingly intuitive:
#
# ```python
# # Get the rows with population over 1m in South America
# df[(df['population'] > 10**6) & (df['continent'] == 'South America')]
# ```
#
# But why does it work? The example above features something like **5** different overloaded operators. What's each of those operations doing? It can help to know the answer when things start going wrong.
# -
# # A Preview of Data Science Tools
# If you would like to spring from here and go farther in using Python for scientific computing or data science, there are a few packages that will make your life much easier.
# This section will introduce and preview several of the more important ones, and give you an idea of the types of applications they are designed for.
# If you're using the *Anaconda* or *Miniconda* environment, you can install the relevant packages with the following command at your Terminal:
#
# - `conda install numpy scipy pandas matplotlib scikit-lear`
#
# Otherwise, use PIP
#
# Let's take a brief look at each of these in turn.
# ## NumPy: Numerical Python
#
# NumPy provides an efficient way to store and manipulate multi-dimensional dense arrays in Python.
# The important features of NumPy are:
#
# - It provides an ``ndarray`` structure, which allows efficient storage and manipulation of vectors, matrices, and higher-dimensional datasets.
# - It provides a readable and efficient syntax for operating on this data, from simple element-wise arithmetic to more complicated linear algebraic operations.
#
# In the simplest case, NumPy arrays look a lot like Python lists.
# For example, here is an array containing the range of numbers 1 to 9 (compare this with Python's built-in ``range()``):
import numpy as np
x = np.arange(1, 10)
x
# NumPy's arrays offer both efficient storage of data, as well as efficient element-wise operations on the data.
# For example, to square each element of the array, we can apply the "``**``" operator to the array directly:
x ** 2
# Compare this with the much more verbose Python-style list comprehension for the same result:
[val ** 2 for val in range(1, 10)]
# Unlike Python lists (which are limited to one dimension), NumPy arrays can be multi-dimensional.
# For example, here we will reshape our ``x`` array into a 3x3 array:
M = x.reshape((3, 3))
M
# A two-dimensional array is one representation of a matrix, and NumPy knows how to efficiently do typical matrix operations. For example, you can compute the transpose using ``.T``:
M.T
# or a matrix-vector product using ``np.dot``:
np.dot(M, [5, 6, 7])
# and even more sophisticated operations like eigenvalue decomposition:
np.linalg.eigvals(M)
# Such linear algebraic manipulation underpins much of modern data analysis, particularly when it comes to the fields of machine learning and data mining.
#
# For more information on NumPy, see [NumPy.org](https://numpy.org).
# ## Pandas: Labeled Column-oriented Data
#
# Pandas is a much newer package than NumPy, and is in fact built on top of it.
# What Pandas provides is a labeled interface to multi-dimensional data, in the form of a DataFrame object that will feel very familiar to users of R and related languages.
# DataFrames in Pandas look something like this:
# +
import pandas as pd
df = pd.DataFrame({'label': ['A', 'B', 'C', 'A', 'B', 'C'],
'value': [1, 2, 3, 4, 5, 6]})
df
# -
# The Pandas interface allows you to do things like select columns by name:
df['label']
# Apply string operations across string entries:
df['label'].str.lower()
# Apply aggregates across numerical entries:
df['value'].sum()
# And, perhaps most importantly, do efficient database-style joins and groupings:
df.groupby('label').sum()
# Here in one line we have computed the sum of all objects sharing the same label, something that is much more verbose (and much less efficient) using tools provided in Numpy and core Python.
#
# For more information on using Pandas, see [Pandas](https://pandas.pydata.org) and the great tutorial: [10 minutes to pandas](https://pandas.pydata.org/docs/user_guide/10min.html#min).
# ## Matplotlib MatLab-style scientific visualization
#
# Matplotlib is currently the most popular scientific visualization packages in Python.
# Even proponents admit that its interface is sometimes overly verbose, but it is a powerful library for creating a large range of plots.
#
# To use Matplotlib, we can start by enabling the notebook mode (for use in the Jupyter notebook) and then importing the package as ``plt``"
# +
import matplotlib.pyplot as plt
plt.style.use('ggplot') # make graphs in the style of R's ggplot
# -
# Now let's create some data (as NumPy arrays, of course) and plot the results:
x = np.linspace(0, 10) # range of values from 0 to 10
y = np.sin(x) # sine of these values
plt.plot(x, y); # plot as a line
# This is the simplest example of a Matplotlib plot; for ideas on the wide range of plot types available, see [Matplotlib's online gallery](https://matplotlib.org/stable/).
# ## SciPy: Scientific Python
#
# SciPy is a collection of scientific functionality that is built on NumPy.
# The package began as a set of Python wrappers to well-known Fortran libraries for numerical computing, and has grown from there.
# The package is arranged as a set of submodules, each implementing some class of numerical algorithms.
# Here is an incomplete sample of some of the more important ones for data science:
#
# - ``scipy.fftpack``: Fast Fourier transforms
# - ``scipy.integrate``: Numerical integration
# - ``scipy.interpolate``: Numerical interpolation
# - ``scipy.linalg``: Linear algebra routines
# - ``scipy.optimize``: Numerical optimization of functions
# - ``scipy.sparse``: Sparse matrix storage and linear algebra
# - ``scipy.stats``: Statistical analysis routines
# - ``scipy.signal``: Signal Processing (as filtering)
#
# For example, let's take a look at interpolating a smooth curve between some data
# +
from scipy import interpolate
# choose eight points between 0 and 10
x = np.linspace(0, 10, 8)
y = np.sin(x)
# create a cubic interpolation function
func = interpolate.interp1d(x, y, kind='cubic')
# interpolate on a grid of 1,000 points
x_interp = np.linspace(0, 10, 1000)
y_interp = func(x_interp)
# plot the results
plt.figure() # new figure
plt.plot(x, y, 'o')
plt.plot(x_interp, y_interp);
# -
# What we see is a smooth interpolation between the points.
# ## Other Data Science Packages
#
# Built on top of these tools are a host of other data science packages, including general tools like [Scikit-Learn](http://scikit-learn.org) for machine learning, [Scikit-Image](http://scikit-image.org) for image analysis, and [Statsmodels](http://statsmodels.sourceforge.net/) for statistical modeling, as well as more domain-specific packages like [AstroPy](http://astropy.org) for astronomy and astrophysics, [NiPy](http://nipy.org/) for neuro-imaging, and many, many more.
#
# No matter what type of scientific, numerical, or statistical problem you are facing, it's likely there is a Python package out there that can help you solve it.
# # Exercises
# ## 1.
#
# Create a Notebook (using Pandas) where you, starting from a dataset of your choice, analise it, cleaning it if necessary and finally add some visualizations with matPlotLib.
| 00_Curso_Folder/1_Fundamentals/Class_4/notebooks/7_working-with-external-libraries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [py3]
# language: python
# name: Python [py3]
# ---
import numpy as np
np.random.seed(19)
# I scanned my deck using the Delver Lens app https://delverlab.com/. I then exported it into a csv keeping card name, edition and quantity as the only entries. Here I show how to import the deck into python and how to retrieve card information using `mtgsdk`.
# +
from mtgsdk import Card
import csv
cards = []
with open('decks/white_green.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[0] == 'Name':
continue
howmany = int(row[-1])
while howmany>0:
cards.append(row[0])
howmany -= 1
# -
# shuffle deck
cards = np.random.permutation(cards)
print(cards)
print('%20s:'%'Name', "\tType\t\tPower\tTough\tCost")
for c in cards[:10]:
cc = Card.where(name=c).array()[0]
try:
power = cc['power']
except KeyError:
power = None
try:
tough = cc['toughness']
except KeyError:
tough = None
try:
cost = cc['manaCost']
except KeyError:
cost = None
print('%20s:'%c, cc['types'], power, tough, cost, sep='\t')
# I haven't found a better way to retrieve all cards in the deck in one shot instead of querying the database for each name separately as I do in the above. There must be a way...
| load_csv_deck.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="4fc265b0"
# # NLP_TP Transformers
#
# + [markdown] id="145be2de"
# ## 01- Sentiment analysis
# + colab={"base_uri": "https://localhost:8080/"} id="583ef981" outputId="9d9f6111-4c40-43c4-faa3-fae61c43bb0d"
# !pip install transformers
# + colab={"base_uri": "https://localhost:8080/"} id="10e24977" outputId="24112ed9-83d9-41cd-a742-e1a0b7332d43"
from transformers import pipeline
nlp = pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment")
result = nlp("bien dit")[0]
print(result)
print(f"label: {result['label']}, with score: {round(result['score']*100, 2)}%")
result = nlp("mauvais travail")[0]
print(f"label: {result['label']}, with score: {round(result['score']*100, 2)}%")
# + [markdown] id="80f031f4"
# ## 02- Text generation
# + id="4668bfb2"
from transformers import pipeline
# + colab={"base_uri": "https://localhost:8080/"} id="63bf964e" outputId="7d975737-cdc1-4560-a6ad-55aa7224ba4a"
# Frensh
text_generator_fr = pipeline('text-generation', model='dbddv01/gpt2-french-small')
print(text_generator_fr("je lis un", max_length=50, do_sample=False))
# + colab={"base_uri": "https://localhost:8080/"} id="bc8556ac" outputId="67a9e4d0-e6d2-441b-9b41-bd0a0e530d82"
# Arabic
text_generator_Ar = pipeline('text-generation', model='akhooli/gpt2-small-arabic')
print(text_generator_Ar("في المغرب السياحة الجبلية ", max_length=50, do_sample=False))
# + [markdown] id="1af36c17"
# ## 03- Name entity recognition (NER)
# + colab={"base_uri": "https://localhost:8080/"} id="3a2364c9" outputId="4fef81d1-b475-412d-c977-8eb4595ce87d"
from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers import pipeline
ner_english_recognition = pipeline("ner", model="dslim/bert-base-NER", tokenizer="dslim/bert-base-NER")
ner_arabic_recognition = pipeline("ner", model="hatmimoha/arabic-ner", tokenizer="hatmimoha/arabic-ner")
ner_french_recognition = pipeline("ner", model="gilf/french-postag-model", tokenizer="gilf/french-postag-model")
print(ner_arabic_recognition("في المغرب السياحة الجبلية"))
print(ner_english_recognition("good time"))
print(ner_french_recognition("je lis un"))
# + [markdown] id="dff416e2"
# ## 04- Question answering
# + colab={"base_uri": "https://localhost:8080/"} id="511e712a" outputId="7c74ef42-77d4-493a-d166-98b633d117d3"
from transformers import pipeline
question_answering = pipeline("question-answering")
context = """
Le ville de Salé se trouve dans le plateau côtier large de 10 à 50 km, formé de plaines douces inclinées vers l’Océan Atlantique qui s'étend de Rabat-Salé à Skhirate-Témara, et du littoral atlantique au barrage Sidi Mohammed ben Abdellah9. L'altitude de la ville de Salé et du plateau côtier tout entier ne dépasse pas les 100 m10. Le fleuve Bouregreg qui sépare Rabat et Salé, donne une vallée plus ou moins large selon les endroits, pénétrant d’une quinzaine de kilomètres en amont de l’embouchure, surplombée par les plateaux de Bettana, Sala Al Jadida et de la commune rurale de Shoul du côté de Salé, et par ceux des quartiers de Hassan, El Youssoufia, Nahda et Akkrach du côté de Rabat. L'« arrière-pays » de Rabat-Salé est plutôt vert loin de l'urbanisation de masse, notamment grâce à la présence des forêts de la Mamora et de Témara, à proximité.
"""
question = "Quelle est Sala Al Jadida?"
result = question_answering(question=question, context=context)
print("Reponse:", result['answer'])
# + [markdown] id="c9a9c291"
# ## 05- Filling masked text
# + colab={"base_uri": "https://localhost:8080/"} id="3c913305" outputId="0b4dafe1-c4ab-4fee-fc0a-6bef5384d0b8"
from transformers import pipeline
nlp = pipeline("fill-mask")
from pprint import pprint
pprint(nlp(f"Les coronavirus sont des {nlp.tokenizer.mask_token} de la famille des Coronaviridae."))
# + colab={"base_uri": "https://localhost:8080/"} id="cc8ba494" outputId="77ed09b5-e13d-4096-80ac-99140e943bc8"
#Arabic
arabic_fill_mask = pipeline('fill-mask', model='CAMeL-Lab/bert-base-camelbert-ca')
pprint(arabic_fill_mask("جمعيات تدق ناقوس الخطر بشأن استنزاف الموارد[MASK] بالجنوب الشرقي ."))
# + [markdown] id="6f56d9cc"
# ## 06- Summarization
# + colab={"base_uri": "https://localhost:8080/"} id="6186b099" outputId="4ad638b3-9a4c-429e-de28-565f368c9993"
from transformers import pipeline
summarizer = pipeline("summarization")
ARTICLE = """
Le Maroc était connu sous le nom de royaume de Marrakech, sous les trois dynasties qui avaient cette ville comme capitale. Puis, sous le nom de royaume de Fès, sous les dynasties qui résidaient à Fès. Au xixe siècle, les cartographes européens mentionnaient toujours un « royaume de Maroc », en indiquant l'ancienne capitale « Maroc » (pour Marrakech). Sous la dynastie des Alaouites, toujours au pouvoir, le pays est passé de l'appellation d'« Empire chérifien » à celle de « royaume du Maroc » en 195725, le sultan <NAME> ben Youssef en devenant le roi, en tant que <NAME>. Il peut être aussi surnommé « Royaume chérifien », en référence au souverain alaouite, descendant du prophète de l'islam Mahomet, qualifié de « chérif ».
"""
print(summarizer(ARTICLE, max_length=130, min_length=30, do_sample=False))
# + [markdown] id="d3e5558b"
# ## 07- Translation
# + colab={"base_uri": "https://localhost:8080/"} id="5269606b" outputId="8cea9d0f-b82d-49c1-a27f-371f6af5d79c"
from transformers import pipeline
# English to french
translator = pipeline("translation_en_to_fr")
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
pprint(translator("This allows people to understand complex terms or phrases.", max_length=40))
# + colab={"base_uri": "https://localhost:8080/"} id="a7451bc5" outputId="c4901140-8429-444c-873f-7fd97b12285d"
# english to Arabic
from transformers import MarianTokenizer, MarianMTModel
tokenizer = MarianTokenizer.from_pretrained("marefa-nlp/marefa-mt-en-ar")
model = MarianMTModel.from_pretrained("marefa-nlp/marefa-mt-en-ar")
text = "Mountain tourism in Morocco"
translated_tokens = model.generate(**tokenizer.prepare_seq2seq_batch(text, return_tensors="pt"))
Output_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated_tokens]
print(Output_text)
# + colab={"base_uri": "https://localhost:8080/", "height": 181, "referenced_widgets": ["<KEY>", "<KEY>", "8ffc79499f63482b82122e66a9811d7a", "14fff933a0b24375804d082133a4dd59", "0beead98f5b1460d976ed506a1123129", "<KEY>", "98267d9b90f441508572dac91e2755e8", "<KEY>", "<KEY>", "c561e407f6754b68b093e96e0a93486c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "24338d0496b645a1a025f19037f18aa5", "09fabcb54c0e42568124f91f56a22288", "<KEY>", "2e36e37457d4439a91bf9ac538356ee5", "23ec9403ed1e4ca1848ffa302bffc0d9", "<KEY>", "<KEY>", "3fae572982f14e7094d947507f8de719", "<KEY>"]} id="e032ce8d" outputId="5076fd6e-f965-436f-c65d-58ace5647822"
# Arabic to English
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
text_ar = "السياحة الجبلية في المغرب"
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer.src_lang = "ar_AR"
encoded_ar = tokenizer(text_ar, return_tensors="pt")
generated_tokens = model.generate(**encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
pprint(tokenizer.batch_decode(generated_tokens, skip_special_tokens=True))
# + [markdown] id="7531300a"
# ## 08- Feature extraction
# + colab={"base_uri": "https://localhost:8080/"} id="f397f772" outputId="65ce663d-7f02-4a76-cbd3-2deac6026905"
from sklearn.feature_extraction.text import CountVectorizer
# sentences.
sentences = [
"This is a sample sentence",
"I am interested in politics",
"You are a very good software engineer, engineer.",]
vectorizer = CountVectorizer(stop_words='english')
vectorizer.fit(sentences)
vectorizer.get_feature_names()
| NLP_TP-Transformers.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nanomesh
# language: python
# name: nanomesh
# ---
# %config InlineBackend.rc = {'figure.figsize': (10,6)}
# %matplotlib inline
# ## Poisson equation with getfem
#
# This example solves the Poisson problem using [getfem](https://getfem.org/) using data generated by Nanomesh. We solve the Poisson problem $-\Delta u = 1$ with the boundary condition $u=0$.
#
# This an adaptation of [Python getfem demo](https://getfem-examples.readthedocs.io/en/latest/demo_unit_disk.html).
#
# ### Setup getfem
#
# First, we must setup the path to the python module ([link](https://getfem.org/python/pygf.html#introduction)), so that getfem can be used in our Nanomesh environment.
#
# We import getfem and generate a mesh to test if it works.
# +
import sys
sys.path.append('../../../getfem/interface/src/python/')
import getfem
m = getfem.Mesh('cartesian', range(0, 3), range(0,3))
print(m)
# -
# ### Generate some data
#
# We use the 2D binary blobs data to generate a triangle mesh. The triangles that belong to the blobs are removed.
# +
from skimage.morphology import disk
from nanomesh.data import binary_blobs2d
data = binary_blobs2d(length=100, seed=96)
from nanomesh import Mesher
mesher = Mesher(data)
mesher.generate_contour(max_edge_dist=3, precision=1)
mesh = mesher.triangulate(opts='q30a25')
triangles = mesh.get('triangle')
triangles.remove_cells(label=2, key='physical')
triangles.plot()
# -
# ### Convert to getfem mesh type
#
# We use the *2D triangulation* [mesh type](https://getfem-examples.readthedocs.io/en/latest/ball_eigen.html?highlight=mesh#Mesh-generation) described by passing the argument `pt2D`. Note that the points and cells arrays must be transposed.
#
# +
import getfem as gf
p = triangles.points.T
t = triangles.cells.T
mesh = gf.Mesh('pt2D', p, t)
mesh
# -
# ### Poisson's equation
#
# The next cell shows how to solve the Poisson equation. This code was re-used from [here](https://getfem-examples.readthedocs.io/en/latest/demo_unit_disk.html).
# +
import getfem as gf
import numpy as np
OUTER_BOUND = 1
outer_faces = mesh.outer_faces()
mesh.set_region(OUTER_BOUND, outer_faces)
sl = gf.Slice(("none",), mesh, 1)
elements_degree = 2
mfu = gf.MeshFem(mesh, 1)
mfu.set_classical_fem(elements_degree)
mim = gf.MeshIm(mesh, pow(elements_degree, 2))
F = 1.0
md = gf.Model("real")
md.add_fem_variable("u", mfu)
md.add_Laplacian_brick(mim, "u")
md.add_fem_data("F", mfu)
md.set_variable("F", np.repeat(F, mfu.nbdof()))
md.add_source_term_brick(mim, "u", "F")
md.add_Dirichlet_condition_with_multipliers(mim, "u", elements_degree - 1, OUTER_BOUND)
md.solve()
# -
# ### Display result using PyVista
#
# The data can be visualized by saving to a vtk file, and loading that with [PyVista](https://docs.pyvista.org/).
# +
import pyvista as pv
U = md.variable("u")
sl.export_to_vtk("u.vtk", "ascii", mfu, U, "U")
m = pv.read("u.vtk")
contours = m.contour()
p = pv.Plotter()
p.add_mesh(m, show_edges=False)
p.add_mesh(contours, color="black", line_width=1)
p.add_mesh(m.contour(8).extract_largest(), opacity=0.1)
p.show(cpos="xy", jupyter_backend='static')
| notebooks/finite_elements/how_to_poisson_with_getfem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf_gpu_kernel
# language: python
# name: tf_gpu
# ---
# %%time
import numpy as np
# from tempfile import TemporaryFile
import os
import pickle
import random
import operator
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import IPython.display as ipd
import time
# %%time
import librosa
import librosa.display
import tensorflow as tf
# +
# # %%time
# directory = "../data/"
# data = []
# classes = []
# for folder in os.listdir(directory):
# if folder=="mf_files":
# continue
# for file in os.listdir(directory+folder):
# sig, rate = librosa.load(directory+folder+"/"+file)
# # MFCC
# mfcc_feat = librosa.feature.mfcc(y =sig, sr= rate)
# mfcc_mean = mfcc_feat.mean(1)
# mfcc_var = mfcc_feat.var(1)
# # Spectral Centroid
# spec_centroid = librosa.feature.spectral_centroid(y= sig, sr = rate)
# spec_centroid_mean = spec_centroid.mean(1)
# spec_centroid_var = spec_centroid.var(1)
# # Spectral-Bandwidth
# spec_band = librosa.feature.spectral_bandwidth(y=sig, sr= rate)
# spec_band_mean = spec_band.mean(1)
# spec_band_var = spec_band.var(1)
# # Zero-Crossing Rate
# zero_cr = librosa.feature.zero_crossing_rate(sig)
# zero_cr_mean = zero_cr.mean(1)
# zero_cr_var = zero_cr.var(1)
# # Spectral-Rolloff
# rolloff = librosa.feature.spectral_rolloff(y = sig, sr = rate)
# rolloff_mean = rolloff.mean(1)
# rolloff_var = rolloff.var(1)
# # Tempo
# onset_env = librosa.onset.onset_strength(y=sig, sr=rate)
# tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=rate)
# # Chroma
# chroma = librosa.feature.chroma_stft(y=sig, sr = rate)
# chroma_mean = chroma.mean(1)
# chroma_var = chroma.var(1)
# # RMS
# S, phase = librosa.magphase(librosa.stft(sig))
# rms = librosa.feature.rms(S=S)
# rms_mean = rms.mean(1)
# rms_var = rms.var(1)
# data.append(np.array([mfcc_mean, mfcc_var,
# spec_centroid_mean, spec_centroid_var,
# spec_band_mean, spec_band_var,
# zero_cr_mean, zero_cr_var,
# rolloff_mean, rolloff_var,
# tempo,
# chroma_mean, chroma_var,
# rms_mean, rms_var]).flatten())
# classes.append(folder)
# df = np.array([np.array(np.hstack(row)) for row in data])
# pd.DataFrame(df).to_csv("all_features.csv")
# -
# %%time
directory = "../data/"
data = []
classes = []
for folder in os.listdir(directory):
if folder=="mf_files":
continue
for file in os.listdir(directory+folder):
sig, rate = librosa.load(directory+folder+"/"+file)
mel = librosa.feature.melspectrogram(y=sig, sr=rate)
# scale to between 0 and 1
mel_db = (librosa.power_to_db(mel, ref= np.max)+80)/80
data.append(mel_db.T)
classes.append(folder)
poor_data = []
for i in range(len(data)):
# print(data[i].shape)
if data[i].shape != (1293,128):
print(i, "\t", data[i].shape)
poor_data.append(i)
print(len(poor_data))
# +
usable_data1 = np.stack([element for i,element in enumerate(data) if i not in poor_data])
usable_classes = np.array([element for i,element in enumerate(classes) if i not in poor_data])
usable_data = usable_data1[:,:,:, np.newaxis]
print(usable_data.shape)
print(usable_classes.shape)
# -
label_dict = {}
for i, element in enumerate(np.unique(usable_classes)):
label_dict[element] = i
print(label_dict)
# convert from string to int
usable_labels = np.array([label_dict[element] for element in list(usable_classes)])
inv_labels = {v: k for k, v in label_dict.items()}
from sklearn.model_selection import train_test_split
trainX, testX, trainY, testY = train_test_split(usable_data,
usable_labels,
test_size = 0.25,
random_state=29,
stratify = usable_labels)
trainX.shape
# ### MFCC: No Scaling
# Overfitting:
# * Regularization
# * Batch Normalization
# * Dropout Layers:
# * tf.keras.layers.Dropout(rate=0.2)
# * 0.20-0.3 in RNN and 0.4-.5 in CNN
#
# Try with ReLU layers
# or Elu or SELU layers:
# * tf.keras.layers.Dense(64, kernel_initializer='lecun_normal', activation='selu')
# * tf.keras.layers.Dense(64, kernel_initializer='he_normal',
# activation= 'elu')
#
#
def plot_results(fit):
plt.figure(figsize=(20,15))
fig, ax = plt.subplots(2)
# accuracy subplot
ax[0].plot(fit.history["accuracy"], label="train")
ax[0].plot(fit.history["val_accuracy"], label="test")
ax[0].set_ylabel("Accuracy")
ax[0].legend(loc="lower right")
ax[0].set_title("Accuracy")
# Error subplot
ax[1].plot(fit.history["loss"], label="train error")
ax[1].plot(fit.history["val_loss"], label="test error")
ax[1].set_ylabel("Error")
ax[1].set_xlabel("Epoch")
ax[1].legend(loc="upper right")
ax[1].set_title("Error")
plt.tight_layout()
plt.show()
train_times = []
trainX.shape
# ### Baseline Metrics
# ---
# Following Book
#
# +
# %%time
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape = [trainX.shape[1], trainX.shape[2], trainX.shape[3]]),
tf.keras.layers.Dense(10)
])
## get output layer as logits for each class
##(we can use softmax to get probabilities, but doing so makes model unstable:
# https://www.tensorflow.org/tutorials/quickstart/beginner
model.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=50)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
start = time.time()
fit = model.fit(trainX, trainY,
validation_data = (testX,testY),
epochs = 250,
callbacks = [callback])
stop = time.time()
train_times.append(stop-start)
plot_results(fit)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(testX)
pred_labels = [np.argmax(prediction) for i,prediction in enumerate(predictions)]
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
ConfusionMatrixDisplay.from_predictions(
y_true = [inv_labels[e] for i,e in enumerate(testY)],
y_pred = [inv_labels[e] for i,e in enumerate(pred_labels)],
xticks_rotation= "vertical")
print(accuracy_score(testY,pred_labels))
# +
# %%time
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape = [trainX.shape[1], trainX.shape[2], trainX.shape[3]]),
tf.keras.layers.Dense(10)
])
## get output layer as logits for each class
##(we can use softmax to get probabilities, but doing so makes model unstable:
# https://www.tensorflow.org/tutorials/quickstart/beginner
model.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=50)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
start = time.time()
fit = model.fit(trainX, trainY,
validation_data = (testX,testY),
epochs = 250,
callbacks = [callback])
stop = time.time()
train_times.append(stop-start)
plot_results(fit)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(testX)
pred_labels = [np.argmax(prediction) for i,prediction in enumerate(predictions)]
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
ConfusionMatrixDisplay.from_predictions(
y_true = [inv_labels[e] for i,e in enumerate(testY)],
y_pred = [inv_labels[e] for i,e in enumerate(pred_labels)],
xticks_rotation= "vertical")
print(accuracy_score(testY,pred_labels))
# -
# ### SimpleRNN
# ---
#
trainX.shape
trainY.shape
type(trainX)
# +
# %%time
model = tf.keras.Sequential([
tf.keras.layers.SimpleRNN(64, input_shape=trainX.shape[1:3],
return_sequences=True),
tf.keras.layers.SimpleRNN(64),
tf.keras.layers.Dense(10)])
model.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
start = time.time()
fit = model.fit(trainX, trainY,
validation_data = (testX,testY),
epochs = 250,
callbacks = [callback])
stop = time.time()
train_times.append(stop-start)
plot_results(fit)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(testX)
pred_labels = [np.argmax(prediction) for i,prediction in enumerate(predictions)]
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
ConfusionMatrixDisplay.from_predictions(
y_true = [inv_labels[e] for i,e in enumerate(testY)],
y_pred = [inv_labels[e] for i,e in enumerate(pred_labels)],
xticks_rotation= "vertical")
print(accuracy_score(testY,pred_labels))
# +
# %%time
model = tf.keras.Sequential([
tf.keras.layers.SimpleRNN(64, input_shape=trainX.shape[1:3],
return_sequences=True),
tf.keras.layers.SimpleRNN(64),
tf.keras.layers.Dense(10)])
model.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
start = time.time()
fit = model.fit(trainX, trainY,
validation_data = (testX,testY),
epochs = 100, batch_size =10,
callbacks = [callback])
stop = time.time()
train_times.append(stop-start)
plot_results(fit)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(testX)
pred_labels = [np.argmax(prediction) for i,prediction in enumerate(predictions)]
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
ConfusionMatrixDisplay.from_predictions(
y_true = [inv_labels[e] for i,e in enumerate(testY)],
y_pred = [inv_labels[e] for i,e in enumerate(pred_labels)],
xticks_rotation= "vertical")
print(accuracy_score(testY,pred_labels))
# -
# ### LSTM
# +
# %%time
model = tf.keras.Sequential([
tf.keras.layers.LSTM(64, input_shape=trainX.shape[1:3],
return_sequences=True),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dense(64, activation = "relu"),
tf.keras.layers.Dense(10),
])
model.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate = 0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
start = time.time()
fit = model.fit(trainX, trainY,
validation_data = (testX,testY),
epochs = 100, batch_size = 32,
callbacks = [callback])
stop = time.time()
train_times.append(stop-start)
plot_results(fit)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(testX)
pred_labels = [np.argmax(prediction) for i,prediction in enumerate(predictions)]
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
ConfusionMatrixDisplay.from_predictions(
y_true = [inv_labels[e] for i,e in enumerate(testY)],
y_pred = [inv_labels[e] for i,e in enumerate(pred_labels)],
xticks_rotation= "vertical")
print(accuracy_score(testY,pred_labels))
# +
# %%time
model = tf.keras.Sequential([
tf.keras.layers.LSTM(64, input_shape=trainX.shape[1:3],
return_sequences=True),
tf.keras.layers.LSTM(64, dropout=0.05, recurrent_dropout=0.1),
tf.keras.layers.Dense(64, activation = "relu",
kernel_regularizer = tf.keras.regularizers.l2(0.001)),
tf.keras.layers.Dropout(rate=0.01),
tf.keras.layers.Dense(32, activation = "relu",
kernel_regularizer = tf.keras.regularizers.l2(0.001)),
tf.keras.layers.Dropout(rate=0.01),
tf.keras.layers.Dense(10, activation = "linear")
])
model.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate = 0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
start = time.time()
fit = model.fit(trainX, trainY,
validation_data = (testX,testY),
epochs = 100, batch_size = 32,
callbacks = [callback])
stop = time.time()
train_times.append(stop-start)
plot_results(fit)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(testX)
pred_labels = [np.argmax(prediction) for i,prediction in enumerate(predictions)]
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
ConfusionMatrixDisplay.from_predictions(
y_true = [inv_labels[e] for i,e in enumerate(testY)],
y_pred = [inv_labels[e] for i,e in enumerate(pred_labels)],
xticks_rotation= "vertical")
print(accuracy_score(testY,pred_labels))
# +
# %%time
model = tf.keras.Sequential([
tf.keras.layers.LSTM(64, dropout=0.05, recurrent_dropout=0.1,
input_shape=trainX.shape[1:3],
return_sequences=True),
tf.keras.layers.LSTM(64, dropout=0.05, recurrent_dropout=0.1),
tf.keras.layers.Dense(64, activation = "relu",
kernel_regularizer = tf.keras.regularizers.l2(0.001)),
tf.keras.layers.Dropout(rate=0.01),
tf.keras.layers.Dense(32, activation = "relu",
kernel_regularizer = tf.keras.regularizers.l2(0.001)),
tf.keras.layers.Dropout(rate=0.01),
tf.keras.layers.Dense(10, activation = "linear")
])
model.summary()
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate = 0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
start = time.time()
fit = model.fit(trainX, trainY,
validation_data = (testX,testY),
epochs = 100, batch_size = 32,
callbacks = [callback])
stop = time.time()
train_times.append(stop-start)
plot_results(fit)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(testX)
pred_labels = [np.argmax(prediction) for i,prediction in enumerate(predictions)]
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
ConfusionMatrixDisplay.from_predictions(
y_true = [inv_labels[e] for i,e in enumerate(testY)],
y_pred = [inv_labels[e] for i,e in enumerate(pred_labels)],
xticks_rotation= "vertical")
print(accuracy_score(testY,pred_labels))
# -
| models/LSTM_MelSpectrogram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## 9.3 Foundations of Data-Flow Analysis
# ### 9.3.1
#
# > How is your lattice diagram related to that in Fig. 9.22?
# Same structure.
| 09/9.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp wrangle
# -
# # wrangle
#
# > Some helper functions to '[wrangle](https://en.wikipedia.org/wiki/Data_wrangling)' the data once in pandas.
#hide
from nbdev.showdoc import *
#hide
#export
import pandas as pd
# +
#export
def drop_low_uniqueness_cols(df: pd.DataFrame, nunique_thold=0.05) -> pd.DataFrame:
"""Drop columns with a low number of unique values.
##### Parameters:
- **df** `pd.DataFrame` A pandas dataframe.
- **nunique_thold** `float` or `int` If a float then will drop cols with a uniqueness rate below `nunique_thold`, if is an int then will use counts instead.
##### Returns:
- **df** `pd.DataFrame` A pandas dataframe.
"""
if isinstance(nunique_thold, int):
df = df.loc[:, df.nunique() > nunique_thold]
elif isinstance(nunique_thold, float) and nunique_thold < 1.0 :
df = df.loc[:, df.nunique() / len(df) > nunique_thold]
return df
# +
# tests
df = pd.DataFrame([
[1,2,3,4],
[1,20,30,4],
[1,200,300,40],
[1,2000,3000,40],
[1,20000,30000,400],
[10,200000,300000,400],
], columns=['col0','col1','col2','col3'])
# check that col0 is removed as it only has 2 unique values
assert 'col0' not in drop_low_uniqueness_cols(df, nunique_thold=2).columns
# check that col3 is removed as it only has 50% unique values
assert 'col3' not in drop_low_uniqueness_cols(df, nunique_thold=0.5).columns
# +
#export
def drop_low_std_cols(df: pd.DataFrame, std_thold=0.05) -> pd.DataFrame:
"""Drop columns with a low standard deviation value.
##### Parameters:
- **df** `pd.DataFrame` A pandas dataframe.
- **std_thold** `float` Standard deviation threshold for columns below which they will be dropped.
##### Returns:
- **df** `pd.DataFrame` A pandas dataframe.
"""
df = df.loc[:, df.std() > std_thold]
return df
# +
#tests
df = pd.DataFrame([
[1,2,3,4],
[1,20,30,4],
[1,200,300,40],
[1,2000,3000,40],
[1,20000,30000,400],
[1.1,200000,300000,400],
], columns=['col0','col1','col2','col3'])
# check that col0 is removed as it only has 2 unique values and a low std value (0.040825)
assert 'col0' not in drop_low_std_cols(df, std_thold=0.05).columns
# -
| 01_wrangle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=SocialStudies/BubonicPlague/bubonic-plague-and-SIR-model.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# # Bubonic Plage - SIR Model
#
# ### Grade 11 Social Studies
#
# We are interested in modelling a bubonic plague outbreak. We part from the assumption that the total population can be subdivided into a set of classes, each of which depends on the state of the infection. The [**SIR Model**](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology) is the simplest one and, as its name suggests, it divides the population into three classes.
# **Outcomes:**
# * Examine and visualize concepts and examples related to the bubonic plague.
# * Examine the timeline/map of the Black Death.
# * Visualize mathematical model that shows the recovery, infection, and removal rates.
# ## The SIR Outbreak Model
# ### Population Parameters
#
# In this model, the total population is divided into three groups:
#
# * Susceptible: individuals that can become infected but haven't been yet
# * Infected: individuals that are infected
# * Removed: individuals that are either dead or recovered
#
# We are looking at the changes, over the course of an outbreak, of the numbers of these individuals, represented by $S$, $I$, and $R$. In other words we want to understand how, as time passes, the number of individuals in each group changes.
#
# Having a realistic model might be useful for predicting the long-term outcome of an outbreak and informing public health interventions.
#
# If we can predict that the number of removed people will stay low and the number of infected people will quickly go down to zero, then there is no need to intervene and we can let the outbreak end by itself while only providing medical attention to the infected people.
#
# Conversely if we predict a large increase of the numbers of infected and removed individuals, then the outbreak needs a quick intervention before it results in a large number of casualties. In a plague outbreak this intervention would, for example, be to make sure there is no contact between infected and susceptible people.
#
# We now describe the SIR (Susceptible, Infected, Removed) mathematical model of an outbreak over time (for example every week). We write $S_t, I_t, R_t$ to denote the number of susceptible, infected, and removed individuals at time point $t$. $t=1$ is the first recorded time point, $t=2$ is the second and so on. We call *time unit* the time elapsed between two time points, for example a day or a week.
#
# In this model, we assume that the **total population is constant** (so births and deaths are ignored) for the duration of the model simulation. We represent the total population size by $N$, and so at any time point $t$ we have $$N=S_t + I_t + R_t$$
# ### Modelling the disease progression
#
# We assume that transmission requires contact between an infected individual and a susceptible individual. We also assume that the disease takes a constant amount of time to progress within an infected individual until they are removed (die or recover). We need to define these two processes (infection and removal) and model how they impact the transition from the time $t = (S_t,I_t,R_t)$ to the next state $t + 1 = (S_{t+1},I_{t+1},R_{t+1})$.
#
# 
#
# The occurrences of new infections of is modelled using a parameter $\beta$, that gives the proportion of contacts between susceptible people and infected people, during one time unit, that results in infection. Then we can describe the number of newly infected people as $\dfrac{\beta S_t I_t}{N}$, where the term $S_t I_t$ represents the set of all possible contacts between susceptible and infected individuals. We discuss this term later.
#
# The occurrence of removals of infected people is modelled using a parameter denoted by $\gamma$. It is defined to be proportion of infected individuals that die or recover between two time points. If we are given that the duration of an infection is $T$ (i.e. how many time points it takes for an individual between infection and removal), then $\gamma = \dfrac{1}{T}$.
#
# 
#
# Taking into account the rate of contact $\beta$ and rate of removal $\gamma$, then each group population changes within one unit of time as follows
#
# $$
# \begin{align}
# S_{t+1} &= S_t - \dfrac{{\beta} S_t I_t}{N}\\
# I_{t+1} &= I_t + \dfrac{{\beta} S_t I_t}{N} - \gamma I_t \\
# R_{t+1} &= R_t + \gamma I_t\\
# N&=S_t + I_t + R_t
# \end{align}
# $$
#
# These equations form the SIR model. They allow, from knowing the parameters of the model ($\beta$ and $\gamma$) and the current state ($S_t,I_t,R_t$) of a population to predict the next states of the population for later time points. Such models are critical in our days for monitoring and controlling infectious diseases outbreaks.
# ##### Technical remarks.
# First, note that the SIR model does not enforce that the values $S_t,I_t,R_t$ at a given time point are integers. As $\beta$ and $\gamma$ are actually floating numbers, these values are actually most of the time not integers. This is fine as the SIR model is an approximate model and aims mostly at predicting the general dynamics of an outbreak, not the precise values for the number of susceptible, infected, and removed individuals.
#
# Next, one can ask how to find the values of the parameters $\beta$ and $\gamma$ that are necessary to have a full SIR model.
#
# As discussed above, the parameter $\gamma$ is relatively easy to find from knowing how the disease progress in a patient, as it is mostly the inverse of the average time a patient is sick.
#
# The parameter $\beta$ is less easy to obtain. Reading the equations we can see that during a time point, out of the $S_t$ susceptible individuals, the number that get infected is $(\dfrac{{\beta}}{N}) S_t I_t$. As mentioned above, the product $S_t I_t$ can be interpreted as the set of all possible contacts between the $S_t$ susceptible individuals and the $I_t$ infected individuals and is often a large number, much larger than $S_t$ and in the order of $N^2$. The division by $N$ aims to lower this number, mostly to normalize it by the total population, to make sure it is in order of $N$ and not quadratic in $N$. So in order for the number of newly infected individuals during a time unit to be reasonable, $\beta$ is generally a small number between $0$ and $1$. But formally, if we pick a value for $\beta$ that is too large, then the SIR model will predict value for $S_t$ that can be negative, which is inconsistent with the modelled phenomenon. So choosing the value of $\beta$ is the crucial step in modelling an outbreak.
# +
# This function takes as input a vector y holding all initial values,
# t: the number of time points (e.g. days)
# beta: proportion of contacts that result in infections
# gamma: proportion of infected that are removed
# S1,I1,R1 = initial population sizes
def discrete_SIR(S1,I1,R1,t,beta,gamma):
# Empy arrays for each class
S = [] # susceptible population
I = [] # infected population
R = [] # removed population
N = S1+I1+R1 # the total population
# Append initial values
S.append(S1)
I.append(I1)
R.append(R1)
# apply SIR model: iterate over the total number of days - 1
for i in range(t-1):
S_next = S[i] - (beta/N)*((S[i]*I[i]))
S.append(S_next)
I_next = I[i] + (beta/N)*((S[i]*I[i])) - gamma*I[i]
I.append(I_next)
R_next = R[i] + gamma * I[i]
R.append(R_next)
# return arrays S,I,R whose entries are various values for susceptible, infected, removed
return((S,I,R))
# -
# ## Modelling an outbreak related to the Great Plague of London
#
# The last major epidemic of the bubonic plague in England occurred between 1665 and 1666 ([click here for further reading](https://www.britannica.com/event/Great-Plague-of-London)). This epidemic did not kill as many people as the Black Death (1347 - 1351), however it is remembered as the "Great Plague of London" as it was the last widespread outbreak that affected England.
#
# "City records indicate that some 68,596 people died during the epidemic, though the actual number of deaths is suspected to have exceeded 100,000 out of a total population estimated at 460,000. " [Great Plague of London"; Encyclopædia Britannica; Encyclopædia Britannica, inc.; September 08, 2016](https://www.britannica.com/event/Great-Plague-of-London)
#
# When the bubonic plague outbreak hit London, people started to leave the city and go to the countryside, hoping to avoid the disease. But as can be expected, some of these people were already infected when they left London, and so carried the disease to start other outbreaks in some nearby villages. This happened in the village of Eyam.
#
# When Eyam authorities realized a plague outbreak had started, they took the difficult decision to close the village in order to avoid to spread the disease further. So nobody was allowed to enter or leave the village and people stayed there hoping the outbreak would end by itself without too many casualties; note that from a mathematical point of view, that implies that the assumption that the sum of the numbers of susceptible, infected and removed individuals, the population, is constant.
#
# Also the village authorities recorded regularly the number of infected and dead people; these data are described in the table below, for the period from June 19 1665 to October 19 1665, with data being recorded every 2 weeks. Obviously these data are imperfect (some people did not declare they were sick by fear of being ostracized, some people died too fast for the plague to be diagnosed, etc.), but nevertheless, they provide us with interesting data to see if the SIR model is an appropriate model for such a plague outbreak.
#
#
# | Date |Day Number |Susceptible | Infected |
# |-------||-------------|----------|
# |June 19 1665|0|254|7|
# |July 3 1665|14|235|14|
# |July 19 1665|28|201|22|
# |Aug 3 1665|42|153|29|
# |Aug 19 1665|56|121| 21|
# |Sept 3 1665|70|108|8|
# |Sept 19 1665|84|121|21|
# |Oct 3 1665| 98|NA | NA|
# |Oct 19 1665|112| 83 | 0|
#
# The average time an infected individual remains infected by the bubonic plague is 11 days.
#
# With the information above, we will be able to get the parameters of the SIR model for this outbreak and observe if indeed what this model predicts generates results corresponding to what happened in reality.
# ### Question 1:
#
# Assuming that on June 19 no individuals had died, i.e. no one was in the Removed class, what is the value of $N$, i.e. the number of individuals in the total population?
# ### Question 2:
#
# We know that the average time an individual remained infected is 11 days. What is the rate of removal ($\gamma$)?
# ### Question 3:
#
# We are now trying something more difficult but more interesting. We introduced a mathematical model for outbreaks, but nothing so far shows that this SIR model is appropriate to model an outbreak such as the Eyam plague outbreak. We want to answer this question now.
#
# From questions 1 and 2 above we know the values of $N$ and $\gamma$ (check your answers at the bottom of this notebook). From the data table we also know $S_1,I_1,R_1$, the state of the population at the start of the outbreak. So if we want to apply the SIR model we need to find a value for $\beta$ the parameter, the number susceptible people becoming infected during a time unit. We consider here that a time unit is 1 day; the Eyam outbreak spanned 112 days, so 112 time units, even if data were only recorded every 2 weeks.
#
# A standard scientific approach for the problem of finding $\beta$ is to try various values and see if there is one that leads to predicted values for $S_n,I_n,R_n$ that match the observed data. In order to evaluate this match, we focus on the number of infected people, the most important element of an outbreak.
#
# The code below allows you to do this: you can choose a value of $\beta$, click on the "Run interact" button and it will show on the same graph a set of 8 blue dots (the observed number of infected people from the data table) and a set of 112 red dots, corresponding to the predicted number of infected individuals for the chosen value of $\beta$.
#
# While there are several mathematical ways to define what would be the *best fit*, here we are not getting into this and you are just asked to try to find a value of $\beta$ that generated blue dots being approximately on the graph defined by the red dots. Pay particular attention to the first four blue dots.
#
# Note that in this case $0 < \beta < 1$.
#
# ##### Warning:
# The SIR model is a very simple approximation of the dynamics of a true outbreak, so don't expect to find a value of $\beta$ that generates a graph that contains exactly all observed data points (blue dots).
#
# In particular note that the data from September 3 and 19 seem to be somewhat of an anomaly as we observe a sharp decrease in the number of infected followed by a surge. This could be due to many reasons, for example poor statistics recording (we are considering a group of people under heavy stress likely more motivated by trying to stay alive than to record accurate vital statistics).
#
# So here we are interested in finding a parameter $\beta$ that captures the general dynamics (increase followed by a post-peak decrease) of the outbreak. You can expect to find a reasonable value for $\beta$ but be aware that many values, especially too high, will result in a very poor match between observed data and model predictions.
# +
from ipywidgets import interact_manual, interact,widgets
import matplotlib.pyplot as plt
# set style
s = {'description_width': 'initial'}
# Set interact manual box widget for beta
@interact(answer=widgets.FloatText(value=0.50, description='Enter beta ',
disabled=False, style=s, step=0.01
))
# define function to find the appropriate value of beta
# this function takes as input a floating value and outputs a plot with the best fit curve
def find_beta(answer):
# set initial values for SIR model
S1,I1,R1 = 254,7,0
# Use original data on Number of infected from table in the notebook
ori_data = [7,14,22,29,21,8,21,0]
# use days, time data was provided biweekly, we transform to days here
ori_days = [1,14,28,42,56,70,84,112]
# set number of days as the second to last entry on the ori_days array
n = ori_days[len(ori_days)-1]-ori_days[0]+1
# get beta from answer - to be sure transform to float
beta = float(answer)
# Gamma was obtained from disease
gamma = 1/11
# Compute SIR values using our discrete_SIR function
(S,I,R) = discrete_SIR(S1,I1,R1,n,beta,gamma)
# Figure
#fig,ax = plt.subplot(figsize=(10,10))
fig = plt.figure(facecolor='w',figsize=(17,5))
ax = fig.add_subplot(111,facecolor = '#ffffff')
# Scatter plot of original number of infected in the course of 112 days
plt.scatter(ori_days,ori_data,c="blue", label="Original Data")
# Scatter plot of infected obtained from SIR mode, in the course of 112 days
plt.scatter(range(n),I,c="red",label="SIR Model Predictions")
# Make the plot pretty
plt.xlabel('Time (days)')
plt.ylabel('Infected Individuals')
plt.title('Real Data vs Model')
#legend = ax.legend()
plt.show()
# -
# ## Simulating a Disease Outbreak
#
# To conclude we will use the widgets below to simulate a disease outbreak using the SIR model.
# You can choose the values of all the elements of the model (sizes of the compartments of the population at the beginning of the outbreak, parameters $\gamma$ and $\beta$, and duration in time units (days) of the outbreak. The default parameters are the ones from the Eyam plague outbreak.
#
# The result is a series of three graphs that shows how the three components of the population change during the outbreak. It allows you to see the impact of changes in the parameters $\gamma$ and $\beta$, such as increasing $\beta$ (making the outbreak progress faster) or reducing $\gamma$ (decreasing the removal rate).
#
# You can use this interactive tool to try to fit the SIR model to match the observed data.
# +
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
# This function takes as input initial values of susceptible, infected and removed, number of days, beta and gamma
# it plots the SIR model with the above conditions
def plot_SIR(S1,I1,R1,n,beta,gamma):
# Initialize figure
fig = plt.figure(facecolor='w',figsize=(17,5))
ax = fig.add_subplot(111,facecolor = '#ffffff')
# Compute SIR values for our initial data and parameters
(S_f,I_f,R_f) = discrete_SIR(S1,I1,R1,n,beta,gamma)
# Set x axis
x = [i for i in range(n)]
# Scatter plot of evolution of susceptible over the course of x days
plt.scatter(x,S_f,c= 'b',label='Susceptible')
# Scatter plot of evolution of infected over the course of x days
plt.scatter(x,I_f,c='r',label='Infected')
# Scatter plot of evolution of removed over the course of x days
plt.scatter(x,R_f,c='g',label='Removed')
# Make the plot pretty
plt.xlabel('Time (days)')
plt.ylabel('Number of individuals')
plt.title('Simulation of a Disease Outbreak Using the SIR Model')
legend = ax.legend()
plt.show()
# Print messages to aid student understand and interpret what is happening in the plot
print("SIMULATION DATA\n")
print("Beta: " + str(beta))
print("Gamma: " + str(gamma))
print("\n")
print("Initial Conditions:")
print("Total number of Susceptible: " + str(ceil(S_f[0])))
print("Total number of Infected: " + str(ceil(I_f[0])))
print("Total number of Removed: " + str(ceil(R_f[0])))
print("\n")
print("After " + str(n) + " days:")
print("Total number of Susceptible: " + str(ceil(S_f[n-1])))
print("Total number of Infected: " + str(ceil(I_f[n-1])) )
print("Total number of Removed: " + str(ceil(R_f[n-1])))
# Tweaking initial Values
from ipywidgets import widgets, interact, interact_manual
# Set function above so that the user can set all parameters and manually start simulation
s = {'description_width': 'initial'}
interact(plot_SIR,
S1 = widgets.IntSlider(value=254, min=200, max=1000, step=1, style=s, description="Susceptible Initial",
disabled=False, orientation='horizontal', readout=True),
I1 = widgets.IntSlider(value=7, min=0, max=500, step=1, style=s, description="Infected Initial",
disabled=False, orientation='horizontal', readout=True),
R1 = widgets.IntSlider(value=0, min=0, max=500, step=1, style=s, description="Removed Initial",
disabled=False, orientation='horizontal', readout=True),
n = widgets.IntSlider(value=112, min=0, max=500, step=1, style=s, description="Time (days)",
disabled=False, orientation='horizontal', readout=True),
beta = widgets.FloatText(value=1.50, description=r'$ \beta$ parameter',
disabled=False, style = s, step=0.01),
gamma = widgets.FloatText(value=1.50, description= r'$ \gamma$ parameter',
disabled=False, style=s, step=0.01)
);
# -
# ### Answer 1
# Since we are assuming the population is constant, and since $S_1 = 254, I_1 = 7, R_1 = 0$, then $S_1 + I_1 + R_1 = 254 + 7 + 0 = 261$.
#
# ### Answer 2
# We know that, on average, an individual will remain infected for approximately 11 days. This means that one individual moves to the removed class for every 11 days, and the rate of removal is $\gamma = \frac{1}{11} = 0.0909...$.
#
# ### Answer 3
# The best value is approximately $\beta = 0.14909440503418078$.
# <h2 align='center'>Conclusion</h2>
#
# In this notebook we learned about the SIR discrete model to model an outbreak. We learned that this model is one of the simplest ones and that it separates the total population $N$ (a constant) into three categories: Infected, Susceptible and Removed. We learned about rates of infection and removal and how this affects the number of individuals in each class.
#
# We also ran a basic but realistic simulation of a bubonic plague outbreak of the Great Plague of London that took place in the village Eyam in 1665 and learned about the devastating effect this had on the population.
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/jupyter_execute/curriculum-notebooks/SocialStudies/BubonicPlague/bubonic-plague-and-SIR-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting Using Matplotlib
# Often text is the best way to communicate information, but sometimes there is a
# lot of truth to the Chinese proverb,
#
# **图片的意义可以表达近万字**
#
# >A picture's meaning can express ten thousand words
#
# **Matplotlib**
#
# http://matplotlib.org/
#
# Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.
#
# Matplotlib Developers on Github: https://github.com/matplotlib
#
# User's Guide: http://matplotlib.org/users/index.html
#
# ## 1 Matplotlib.pyplot
#
#
# [Matplotlib.pyplot](https://matplotlib.org/2.0.2/api/pyplot_api.html) provides a `MATLAB`-like plotting framework.
#
# ### 1.1 The Simple Example
#
# Let’s start with a simple example that uses `pyplot.plot` to produce the plot.
# +
# %%file ./code/python/plt111.py
import matplotlib.pyplot as plt
plt.figure() #create figure
plt.plot([1,2,3,4], [1,7,3,5]) #draw on figure 1 <x,y> list/array
plt.show() #show figure on screen
# -
# ```
# >python plt111.py
# ```
# 
#
# +
import matplotlib.pyplot as plt
plt.figure() #create figure 1
x=[1,2,3,4]
y=[1,7,3,5]
plt.plot(x,y) # plot x and y using default line style and color
plt.show() #show figure on screen
# -
# ### 1.2 The Basic Method of PyPlot
#
# * pyplot.figure()
#
# * pyplot.plot(x,y)
#
# * pyplot.show()
#
# #### 1.2.1 [pyplot.figure ](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.figure.html#matplotlib.pyplot.figure)
#
# Create a new figure.
#
# ```python
# matplotlib.pyplot.figure(num=None)
# ```
#
# **num** : integer or string, optional, default: ```None```
#
# The example,the num is not provided, a new figure will be created,
#
# #### 1.2.2 [ pyplot.plot]
# (https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot)
#
# Plot <x,y>(y versus x) as lines and/or markers
# ```python
# matplotlib.pyplot.plot(x, y)
# ```
# 
#
#
# #### 1.2.3 [pyplot.show](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.show.html)
#
# **Display a figure**.
#
#
plt.figure(1) # create figure with number 1
x=[1,2,3,4]
y=[1,7,3,5]
plt.plot(x,y) # plot x and y using blue circle markers
plt.show() # show figure on screen
# ### 1.3 Multiple figures & write them to files
#
# #### 1.3.1 Multiple figures
#
# Create a new figure.
#
# ```python
# matplotlib.pyplot.figure(num=None)
# ```
#
# **num** : integer or string, optional, default: ```None```
#
# * If not provided, a new figure will be created, and the figure number will be incremented. The figure objects holds this number in a number attribute.
#
# * If num is provided,
# * If this figure does not exists, create it and returns it.
# * If a figure with this id already exists, make it active, and returns a reference to it.
# * If num is a string, the window title will be set to this figure's num
#
# It is possible to produce **multiple figures**
#
# Tne next example produces tow figures:**1,2**
# +
import matplotlib.pyplot as plt
# create figure 1
plt.figure(1)
plt.plot([1,2,3,4], [1,2,3,4]) # plot on figure 1
# create figure 2
plt.figure(2)
plt.plot([1,4,2,3], [5,6,7,8]) # plot on figure 2
# figure 1 id already exists, make figure 1 active
# and returns a reference to it
# Go back to figure 1 and plotting again
plt.figure(1)
# Plot again on figure 1
plt.plot([5,6,10,3]) # plot(y) on figure 1
plt.show()
# -
# 1. create figure 1: ```plt.figure(1)```
#
# 2. create figure 2: ```plt.figure(2)```
#
# 3. Go back and plotting on figure 1 ```plt.figure(1)```
#
# ```python
# plot(y)
# ```
# ##### pyplot.plot(y)
#
# plot $y$ using $x$ as index array $0..N-1$,using default line style and color
#
# * `pyplot.plot([5,6,10,3]) # plot again on figure 1`
#
# The corresponding $x$ values default to `range(len([5, 6, 10, 3]))`( 0 to 3 in this case plot $y$ using $x$ as index array$ 0..N-1$
#
# **Figure 1**
#
# Two lines:
# ```python
# plt.plot([1,2,3,4], [1,2,3,4])
#
# # Go back and plotting on figure 1
# plt.plot([5,6,10,3])
# ```
# 
#
# **Figure 2**
#
# One line:
#
# ```python
# plt.plot([1,4,2,3], [5,6,7,8])
# ```
#
# 
# #### 1.3.2 Write figure to files
#
# ```python
# plt.savefig(figurefilename)
# ```
#
# These files can have any name you like.
#
# They will all have the file extension` .png` in the **default**.
#
# * `.png` indicates that the file is in the `Portable Networks Graphics` format. This is a public domain standard for representing images
#
# You can set the figure file format,for example,
#
# **To save the plot as an SVG**
#
# [Scalable Vector Graphics (SVG)](https://en.wikipedia.org/wiki/Scalable_Vector_Graphics) is an XML-based vector image format for two-dimensional graphics with support for interactivity and animation. The SVG specification is an open standard developed by the World Wide Web Consortium (W3C) since 1999.
#
# All major modern web browsers—including Mozilla Firefox, Internet Explorer, Google Chrome, Opera, Safari, and Microsoft Edge—have SVG rendering support.
# +
import matplotlib.pyplot as plt
plt.figure(1) #create figure 1
plt.plot([1,2,3,4], [1,2,3,4]) # plot on figure 1
plt.figure(2) #create figure 2
plt.plot([1,4,2,3], [5,6,7,8]) # plot on figure 2
#save figure 2 without extension,to the default .png
plt.savefig('./img/Figure2')
#go back to plot working on figure 1
plt.figure(1)
# plot again on figure 1
plt.plot([5,6,10,3]) # # plot y using x as index array 0..N-1,using default line style and color
# save figure 1 as an SVG
plt.savefig('./img/Figure11.svg')
# -
# !dir .\img\Figure*
# ### 1.4 title,xlabel,ylabel
#
# Let’s look at the example:
#
# * the growth of an initial investment of $10,000 at an annually 5%
#
#
# +
import matplotlib.pyplot as plt
principal = 10000 #initial investment
interestRate = 0.05
years = 20
values = []
for i in range(years + 1):
values.append(principal)
principal += principal*interestRate
plt.plot(values) # plot y using x as index array 0..N-1,using default line style and color
plt.show()
# -
# If we **look at the code**,
#
# **The growth of an initial investment of $10,000 at an annually**
#
# * cannot be easily inferred by looking **only at the plot `itself`**. That’s a bad thing.
#
# **All plots should have**
#
# * `informative` **titles**
#
# * all **axes** should be `labeled`.
#
# If we add to the end of our the code the lines
# ```
# plt.title('5% Growth, Compounded Annually')
# plt.xlabel('Years of Compounding')
# plt.ylabel('Value of Principal ($)')
# ```
# +
principal = 10000 #initial investment
interestRate = 0.05
years = 20
values = []
for i in range(years + 1):
values.append(principal)
principal += principal*interestRate
plt.plot(values) # plot y using x as index array 0..N-1,using default line style and color
# add tile,xlabel,ylable
plt.title('5% Growth, Compounded Annually')
plt.xlabel('Years of Compounding')
plt.ylabel('Value of Principal ($)')
plt.show()
# -
# ### 1.5 Formating plotted curve
#
# #### 1.5.1 Line and marker
#
# ##### 1.5.1.1 The color, type, marker symbols
# For every plotted curve, there is an optional argument that is **a format string** indicating
#
# **the `color`,line `type` and marker `symbols` of the plot**
#
# 1. The first character is <b style="color:red">color</b>: example: <b style="color:blue"> b </b> blue
#
# 2. The second characters are <b style="color:red">line type</b>: example: <b style="color:blue"> - </b> solid line
#
# 3. The third characters are <b style="color:red">marker symbols</b>: example:<b style="color:blue"> + </b> symbol
#
# The **default format** string is <b style="color:blue">'b-'</b>, which produces a <b style="color:blue">blue solid line</b>(蓝色实线).
#
# ```python
# pyplot.plot(values)
# ```
#
# If you want to plot the above with <b style="color:green">green</b>, **dashed** line with **circle** marker symbol(绿色虚线圆点). one would replace the call by
#
# ```python
# pyplot.plot(values, 'g--o')
# ```
#
# 
#
#
# ##### 1.5.1.2 line: width
#
# To change the line width, we can use the `linewidth` or `lw` keyword argument.
#
# ```python
# plt.plot(values, linewidth =2)
# ```
# +
principal = 10000 #initial investment
interestRate = 0.05
years = 20
values = []
for i in range(years + 1):
values.append(principal)
principal += principal*interestRate
# green dashed line, circle marker,width = 2
plt.plot(values,'g--o',linewidth = 2)
#If we add to the end of our the code the lines
plt.title('5% Growth, Compounded Annually')
plt.xlabel('Years of Compounding')
plt.ylabel('Value of Principal ($)')
plt.show()
# -
# #### 1.5.2 type size
#
# It’s also possible to change the type `size` used in plots.
#
# For example,set `fontsize`
# ```python
# plt.xlabel('Years of Compounding', fontsize = 'x-small')``
#
# +
principal = 10000 #initial investment
interestRate = 0.05
years = 20
values = []
for i in range(years + 1):
values.append(principal)
principal += principal*interestRate
#blue dashed line ,width =3
plt.plot(values,'b--', lw = 3)
# fontsize
plt.title('5% Growth, Compounded Annually', fontsize = 'x-large')
# fontsize
plt.xlabel('Years of Compounding', fontsize = 'x-small')
plt.ylabel('Value of Principal ($)')
plt.show()
# -
# ## Fuether Reading
#
# **Matplotlib Tutorials:** https://matplotlib.org/tutorials/index.html
#
# **Pyplot tutorial** https://matplotlib.org/tutorials/introductory/pyplot.html#sphx-glr-tutorials-introductory-pyplot-py
#
| notebook/Unit2-1-Matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IA006 - Exercícios de Fixação de Conceitos
#
# $~$
#
# ## EFC1 - 2s2019
#
# $~$
#
# ### Parte 1 - Atividades teóricas
#
# $~$
# #### **Exercício 1**
#
# $~$
#
# **Distribuição:**
#
# | X/Y | Y=0 | Y=1 | Marg. X |
# |:-------:|:----:|:-----:|:-------:|
# | X=0 | 1/6 | 3/8 | 13/24 |
# | X=1 | 1/8 | 1/3 | 11/24 |
# | Marg. Y | 7/24 | 17/24 | 1 |
#
# $~$
# $~$
#
# **a)** $P(X)$ e $P(Y)$
#
# $~$
# $~$
#
# **Resposta:**
#
# - $P(X=x) = \{\frac{13}{24}, \frac{11}{24}\}$
#
# - $P(Y=y) = \{\frac{7}{24}, \frac{17}{24}\}$
#
# $~$
# $~$
#
# **b)** $P(X=0|Y=0)$
#
# $\frac{P({X=0}, {Y=0})}{P(Y=0)} = \frac{1}{6}\times\frac{24}{7} = \frac{24}{42} = \frac{4}{7}$
#
# $\frac{P({X=1}, {Y=0})}{P(Y=0)} = \frac{1}{8}\times\frac{24}{7} = \frac{24}{56} = \frac{3}{7}$
#
# $~$
# $~$
#
# **Resposta:**
#
# - $P(X=0|Y=0) = \frac{4}{7}$
#
# $~$
# $~$
#
# **c)** $E[X]$ e $E[Y]$
#
# $E[X] = \sum_kx_kP(x_k)$
#
# $E[X] = 0 \times \frac{13}{24} + 1 \times \frac{11}{24}$
#
# $E[Y] = 0 \times \frac{7}{24} + 1 \times \frac{17}{24}$
#
# $~$
# $~$
#
# **Resposta:**
#
# - $E[X] = \frac{11}{24}$
#
# - $E[Y] = \frac{17}{24}$
#
# $~$
# $~$
#
# **d)** São independentes? Por quê?
#
# Resposta:
#
# X e Y NÃO são independentes, pois a probabilidade do evento Y não afeta X, de acordo com a formulação:
#
# $P(X,Y) = P(X)P(Y)$
#
# Verificamos:
#
# $P(X=x,Y=0) = P(X=x)P(Y=0)$
#
# Por fim temos:
#
# $P(X=0,Y=0) = P(X=0)P(Y=0) => \frac{1}{6} = \frac{13}{24}\times\frac{7}{24} => \frac{1}{6} \neq \frac{91}{576}$
#
# $P(X=1,Y=0) = P(X=1)P(Y=0) => \frac{1}{8} = \frac{11}{24}\times\frac{7}{24} => \frac{1}{8} \neq \frac{77}{576}$
#
# ---
# #### **Exercício 2**
#
# $~$
#
# **Distribuição:**
#
# | X/Y | Y=0 | Y=1 | Marg. X |
# |:-------:|:---:|:---:|:-------:|
# | X=0 | 0 | 1/4 | 1/4 |
# | X=1 | 3/8 | 3/8 | 3/4 |
# | Marg. Y | 3/8 | 5/8 | 1 |
#
# $~$
# $~$
#
# **a)** $H(X), H(Y), H(X,Y)$
#
# Sendo: $H(X) = -\sum_x p(x)log_2[p(x)]$
#
# $H(X) = H(\frac{1}{4}, \frac{3}{4})$
#
# $H(X) = -((\frac{1}{4} \times log_2[\frac{1}{4}]) + (\frac{3}{4} \times log_2[\frac{3}{4}]))$
#
# $H(X) = -((\frac{1}{4} \times -2) + (\frac{3}{4} (log_2[3]-2)))$
#
# $H(X) = -((-\frac{1}{2}) + (\frac{3}{4} (log_2[3]-2)))$
#
# $H(X) = 0.8112$
#
# $~$
#
# $H(Y) = H(\frac{3}{8}, \frac{5}{8})$
#
# $H(Y) = -((\frac{3}{8} \times log_2[\frac{3}{8}]) + (\frac{5}{8} \times log_2[\frac{5}{8}]))$
#
# $H(Y) = -((\frac{3}{8} (log_2(3)-3)) + (\frac{5}{8} (log_2(5)-3)))$
#
# $H(Y) = 0.9544$
#
# $~$
# $~$
#
# Calculando $H(X, Y)$
#
# Sendo: $H(X, Y) = -\sum_x\sum_y p(x, y) log_2[p(x, y)]$
#
# $H(X, Y) = -((\frac{1}{4} log_2(\frac{1}{4}))+(\frac{3}{8} log_2(\frac{3}{8}))+(\frac{3}{8} log_2(\frac{3}{8})))$
#
# $H(X, Y) = 1.5612$
#
# $~$
#
# **Resposta:**
#
# $H(X) = 0.8112$
#
# $H(Y) = 0.9544$
#
# $H(X, Y) = 1.5612$
#
# $~$
# $~$
#
# **b)** $H(X|Y)$ e $H(Y|X)$
#
# $H(Y|X) = -\sum_x\sum_y p(x, y) log_2[p(y|x)]$
#
# $H(Y|X) = H(X, Y) - H(X)$
#
# $H(X|Y) = H(X, Y) - H(Y)$
#
# $~$
#
# $P(Y=1 | X=0) = \frac{P(X=0, Y=1)}{P(X)} => \frac{1}{4}\times\frac{4}{1} = 1$
#
# $P(Y=0 | X=1) = \frac{P(X=1, Y=0)}{P(X)} => \frac{3}{8}\times\frac{4}{3} = \frac{1}{2}$
#
# $P(Y=1 | X=1) = \frac{P(X=1, Y=1)}{P(X)} = \frac{1}{2}$
#
# $~$
# $~$
#
# $H(Y|X) = -( (\frac{1}{4} log_2(1)) + (\frac{3}{8} log_2(\frac{1}{2})) + (\frac{3}{8} log_2(\frac{1}{2})) )$
#
# $H(Y|X) = -( (\frac{3}{8} \times -1) + (\frac{3}{8} \times -1) )$
#
# $H(Y|X) = -( (-\frac{3}{8}) + (-\frac{3}{8}) )$
#
# $H(Y|X) = 0.75$
#
# $~$
# $~$
#
# $H(X|Y) = 1.5612 - 0.9544$
#
# $H(X|Y) = 0.6068$
#
# $~$
#
# **Resposta:**
#
# $H(Y|X) = 0.75$
#
# $H(X|Y) = 0.6068$
#
#
# $~$
# $~$
#
# **c)** $I(X, Y)$
#
# Dado que,
#
# $I(X, Y) = H(X) - H(X|Y)$
#
# temos portanto,
#
# $I(X, Y) = 0.8112 - 0.6068$
#
# $I(X, Y) = 0.2044$
#
# $~$
#
# **Resposta:**
#
# $I(X, Y) = 0.2044$
#
# ---
# #### **Exercício 3**
import csv
import time
from datetime import datetime
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
import pandas as pd
# **a)**
#
# $C_1 =>\mu = -1, \sigma^2 = 1$
#
# $C_2 =>\mu = 1, \sigma^2 = 1$
# +
def pdf(x, m, s):
return (1/(np.sqrt(2*np.pi*s))) * np.exp(-((np.power((x-(m)), 2) / 2*s)))
def intersection(m1, s1, m2, s2, p1=1, p2=1):
pdf1 = pdf(0, m1, s1)
pdf2 = pdf(0, m2, s2)
return (np.log(pdf1 / pdf2) + np.log(p1/p2)) / 2
m1, m2, s1, s2 = -1, 1, 1, 1
N = 500
space = np.linspace(-4, 5, N)
px1 = norm(m1, np.sqrt(s1)).pdf(space)
px2 = norm(m2, np.sqrt(s2)).pdf(space)
fig = plt.figure(1, figsize=(17, 5))
plt.subplot(121)
plt.title('Gráfico da Distribuição de Probabilidades de cada Classe')
plt.xlabel('x')
plt.ylabel('y(x)')
plt.xticks(np.arange(-6, 7))
plt.yticks(np.round(np.linspace(0, 0.45, 9), 2))
plt.plot(space, px1, color='C0', markersize=3, label='$C_1 : \mu$=-1, $\sigma^2$=1')
plt.plot(space, px2, color='C1', markersize=3, label='$C_2 : \mu$=1, $\sigma^2$=1')
plt.axvline(x=intersection(m1, m2, s1, s2), label='', ls='--', color='g')
plt.legend()
plt.tight_layout()
plt.show()
# -
# Função de probabilidade de densidade da Distribuição Normal é dada por:
#
# $f(x|\mu,\sigma^2) = \frac{1}{\sqrt{2\pi\sigma^2}} exp(-\frac{(x - \mu)^2)}{2\sigma^2})$
#
# Dado que MLE propõe:
#
# $\theta_{MLE} = argmax_{\theta} log [p(x|\theta)]$
#
# Sendo $\theta = (\mu, \sigma^2)$, portanto a MLE pode ser calculada usando:
#
# $L(x|\mu,\sigma^2) = log [p(x|\mu,\sigma^2)]$
#
# Usando a distribuição acima e a regra do estimado de máxima verossimilhança, calcula-se:
#
# $L(x|\mu, \sigma^2) = -\frac{n}{2} log (2\pi\sigma^2) - \frac{1}{2\sigma^2}\sum_{i=1}^{n}(x - \mu)^2$
#
# Aplicando acima, sendo $n=1$:
#
# $L(x|\mu=-1,\sigma^2=1) = -\frac{1}{2} log (2\pi) - \frac{1}{2} (x + 1)^2$
#
# $L(x|\mu=-1,\sigma^2=1) = -\frac{1}{2} log (2\pi) - \frac{1}{2} (x - 1)^2$
#
# Dada as fórmulas acima podemos concluir portanto que quando $x = 0$, as equações terão valores iguais, definindo a fronteira no valor 0, sendo 0 indecisão (ambas as classes poderiam ser escolhidas).
#
# Para demonstrar, podemos definir 2 (dois) valores para x, consideremos $x=(0, 1).$
#
# Assim temos:
#
# $L(x=1|\mu=-1,\sigma^2=1) = -\frac{1}{2} log (2\pi) - \frac{1}{2} (1 + 1)^2$
#
# $L(x=1|\mu=-1,\sigma^2=1) = -0.9189 - \frac{1}{2} (2)^2$
#
# $L(x=1|\mu=-1,\sigma^2=1) = -0.9189 - 2$
#
# $L(x=1|\mu=-1,\sigma^2=1) = -2.9189$
#
# $~$
#
# $L(x=1|\mu=1,\sigma^2=1) = -\frac{1}{2} log (2\pi) - \frac{1}{2} (1 - 1)^2$
#
# $L(x=1|\mu=1,\sigma^2=1) = -0.9189 - \frac{1}{2} (0)^2$
#
# $L(x=1|\mu=1,\sigma^2=1) = -0.9189$
#
#
# $~$
#
# Definindo $x=0$:
#
# $L(x=0|\mu=1,\sigma^2=1) = -\frac{1}{2} log (2\pi) - \frac{1}{2} (0 - 1)^2$
#
# $L(x=0|\mu=1,\sigma^2=1) = -0.9189 - \frac{1}{2} (-1)^2$
#
# $L(x=0|\mu=1,\sigma^2=1) = -1.4189$
#
# $~$
#
# $L(x=0|\mu=-1,\sigma^2=1) = -\frac{1}{2} log (2\pi) - \frac{1}{2} (0 - (-1))^2$
#
# $L(x=0|\mu=-1,\sigma^2=1) = -0.9189 - \frac{1}{2} (1)^2$
#
# $L(x=0|\mu=-1,\sigma^2=1) = -1.4189$
#
# $~$
#
# **Resposta:**
#
# Dessa maneira, pode-se concluir que (conforma apresentado pelo gráfico também), amostras menores que 0 (zero) poderão ser classificados como sendo da classe $C_1$ e valores acima de 0 (zero) sendo da classe $C_2$, e 0 (zero) sendo a fronteira onde encontraremos indecisão.
#
# $C_1 : x < 0$
#
# $C_2 : x > 0$
#
# $~$
# $~$
# **b)** $P(C_1) = 0,7, P(C_2) = 0,3$
#
# Tendo a probabilidade a priori e utilizando o MAP cuja formulação apresenta:
#
# $\theta_{MAP} = argmax_{\theta} log [p(x|\theta)] + log [p(\theta)]$
#
# Sendo $\theta = (\mu, \sigma^2)$ e para o caso da classe $C_1$:
#
# $f(x|\mu=-1, \sigma^2=1) = log [p(x|\mu=-1, \sigma^2=1)] + log [p(\mu=-1, \sigma^2=1)]$
#
# Podemos definir para $x=0$:
#
# $log [p(x=0|\mu=-1, \sigma^2=1)] = -1.4189$
#
# $p(\mu=-1, \sigma^2=1) = 0.7$
#
# Dessa maneira temos:
#
# $f(x=0|\mu=-1, \sigma^2=1) = -1.4189 + log[0.7]$
#
# $f(x=0|\mu=-1, \sigma^2=1) = -1.7755$
#
# Para a classe $C_2$ temos:
#
# $f(x=0|\mu=1, \sigma^2=1) = -1.4189 + log[0.3]$
#
# $f(x=0|\mu=1, \sigma^2=1) = -2.6228$
#
# $~$
#
# **Resposta**:
#
# Portanto no caso a amostra de valor 0 (zero) já não representa mais a região de indecisão do novo modelo dado as probabilidades.
#
# Caso as distribuições sejam uniformes com média equidistantes e variâncias iguais a média, como o exercício fornece, pode-se calcular o ponto de intersecção, indiferente da densidade de probabilidades tendo valores a posteriori usando:
#
#
# $\frac{P(C_1∣x)}{P(C_2∣x)} = \frac{f1(x)}{f2(x)}\times\frac{P(C_1)}{P(C_2)}$
#
# $\frac{P(C_1∣x)}{P(C_2∣x)} = log\frac{f1(x)}{f2(x)} + log\frac{P(C_1)}{P(C_2)}$
#
# $\frac{P(C_1∣x=-1)}{P(C_2∣x=1)} = log\frac{f1(x=-1)}{f2(x=1)} + log\frac{P(C_1)}{P(C_2)}$
#
# $\frac{P(C_1∣x=-1)}{P(C_2∣x=1)} = log\frac{0.3989}{0.3989} + log\frac{0.7}{0.3}$
#
# $\frac{P(C_1∣x=-1)}{P(C_2∣x=1)} = 0 + 0.8472$
#
# Como temos 2 classes:
#
# $\frac{P(C_1∣x=-1)}{P(C_2∣x=1)} = \frac{0.8472}{2}$
#
# $\frac{P(C_1∣x=-1)}{P(C_2∣x=1)} = 0.4236$
#
# Neste caso a fronteira de decisão será igual a $0.4236$.
#
# $~$
# $~$
#
# Portanto:
#
# $C_1 : x < 0.436$
#
# $C_2 : x > 0.436$
# +
m1, m2, s1, s2, p1, p2 = -1, 1, 1, 1, .7, .3
N = 500
space = np.linspace(-4, 5, N)
px1 = norm(m1, np.sqrt(s1)).pdf(space) * p1
px2 = norm(m2, np.sqrt(s2)).pdf(space) * p2
fig = plt.figure(1, figsize=(17, 5))
plt.subplot(121)
plt.title('Gráfico da Distribuição de Probabilidades de cada Classe')
plt.xlabel('x')
plt.ylabel('y(x)')
plt.xticks(np.arange(-6, 7))
plt.yticks(np.round(np.linspace(0, 0.45, 9), 2))
plt.plot(space, px1, color='C0', markersize=3, label='$C_1 : \mu$=-1, $\sigma^2$=1')
plt.plot(space, px2, color='C1', markersize=3, label='$C_2 : \mu$=1, $\sigma^2$=1')
plt.axvline(x=intersection(m1, s1, m2, s2, p1, p2), label='', ls='--', color='g')
plt.legend()
plt.tight_layout()
plt.show()
# -
# ---
# ### Parte 2 – Atividade computacional
#
# $~$
#
# Importação dos dados do *Australian Bureau of Meteorology* e sua apresentação.
#
# Os dados, são definidos como uma série temporal onde em determinada data é apresentada a temperatura. Abaixo é apresentado os primeiros 10 registros dos 3650 itens.
# +
def convert(dt, tmp):
date = datetime.strptime(dt, "%Y-%m-%d")
return (
datetime.strftime(date, "%d/%m/%Y"),
float(tmp),
datetime.timestamp(date),
float(tmp),
date.day,
date.month,
date.year,
)
def import_data():
data = None
with open('daily-minimum-temperatures.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
data = [convert(*row) for i, row in enumerate(csv_reader) if i > 0]
return np.array(data)
def kfold(data, k=1, shuffle=True):
# np.split(x, 3)
k = k + 1
folds = []
if shuffle:
permutation = np.random.permutation(data.shape[0])
data = data[permutation, :]
mb = np.ceil(data.shape[0] / k).astype(np.int32)
for r, _ in enumerate(range(k)):
ini, end = r * mb, mb * (r+1)
folds.append(data[ini:end, :])
return np.array(folds)
class Scaler:
def __init__(self):
self._mu = 0
self._std = 0
def fit(self, x):
self._mu = np.mean(x)
self._std = np.std(x)
def transform(self, x):
y = np.copy(x)
return (y - self._mu) / self._std
def inverse(self, x):
y = np.copy(x)
return (y * self._std) + self._mu
def __repr__(self):
return f"{self._mu}, {self._std}"
class Normalizer:
def __init__(self):
self._min = 0
self._max = 0
def fit(self, x):
self._min = np.min(x)
self._max = np.max(x)
def transform(self, x):
y = np.copy(x)
return (y - self._min) / (self._max - self._min)
def inverse(self, x):
y = np.copy(x)
return (y * (self._max - self._min)) + self._min
def __repr__(self):
return f"{self._min}, {self._max}"
class LinearRegressionImpl:
def __init__(self, fit_intercept=True, reg=1e-20, weights=None):
self.w = weights
self.fit_intercept = fit_intercept
self._reg = reg
def mse(self, y, yh):
N = y.shape[0]
L2 = (self._reg / (2 * N)) * np.sum([np.sum(np.square(w)) for w in self.w])
return ((1 / N) * np.sum(np.square(y - yh))) + L2
def _correct_X(self, X):
if self.fit_intercept:
X = np.c_[np.ones(X.shape[0]), X]
return X
def train(self, X, y):
X = self._correct_X(X)
pinv = np.dot(X.T, X)
reg = np.eye(pinv.shape[0], dtype=int) * self._reg
reg[0][0] = 1
nX = np.dot(np.linalg.inv(pinv + reg), X.T)
self.w = np.dot(nX, y)
yh = np.dot(X, self.w)
J = self.mse(y, yh)
return self, J
def predict(self, X):
X = self._correct_X(X)
return np.dot(X, self.w)
class PolynomialRegressionImpl(LinearRegressionImpl):
def __init__(self, fit_intercept=True, reg=1e-20, weights=None, degree=1):
super().__init__(fit_intercept, reg, weights)
self.degree = degree
def _correct_X(self, X):
M = np.copy(X)
for degree in range(2, self.degree):
M = np.c_[M, np.power(X, degree)]
X = np.copy(M)
if self.fit_intercept:
X = np.c_[np.ones(X.shape[0]), X]
return X
# +
data = import_data()
df_data = np.copy(data)
data = data[:, 2:].astype(np.float32)
dts = pd.DataFrame(df_data)
dts1990 = dts[dts[0].str.contains("1990")]
dts1990 = dts1990[dts1990[0].str.contains("(01\/)(01|02|03|04|05|06|07|08|09|10|11|12)", regex=True)]
dts = dts[dts[0].str.contains("01/01")]
df = pd.DataFrame(df_data[:, 0:2], columns=["Data", "Temperature"]).head(10)
print(df)#.style.hide_index()
# -
plt.figure(figsize=(8, 5))
plt.plot(data[:, 0], data[:, 1])
plt.title("Todos os dados : Série Temporal")
plt.xticks(dts[2].to_numpy().astype(np.float32), dts[0].to_numpy(), rotation=90)
plt.xlabel("Data")
plt.ylabel("Temperatura")
plt.tight_layout()
plt.show()
# Divisão dos dados em treinamento e teste. Conforme solicitado os dados até 1990 serão usado para treinamento e os posteriores para teste.
# +
dt_1990 = datetime.timestamp(datetime.strptime("1990-01-01 00:00:00", "%Y-%m-%d %H:%M:%S"))
train, test = data[data[:, 0] < dt_1990], data[data[:, 0] >= dt_1990]
X_train, y_train = train[:, 2:], train[:, 1]
X_test, y_test = test[:, 2:], test[:, 1]
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(train[:, 0], y_train)
plt.title("Dados até 1990 : Treinamento")
plt.xticks(dts[2].to_numpy().astype(np.float32), dts[0].to_numpy(), rotation=90)
plt.xlabel("Data")
plt.ylabel("Temperatura")
plt.subplot(1, 2, 2)
plt.plot(test[:, 0], y_test)
plt.title("Dados após 1990 : Teste")
plt.xticks(dts1990[2].to_numpy().astype(np.float32), dts1990[0].to_numpy(), rotation=90)
plt.xlabel("Data")
plt.ylabel("Temperatura")
plt.tight_layout()
plt.show()
# -
# Utilização de K-Folds para dividir os dados de treinamento em pequenas "pastas" para verificar melhor configuração de treinamento dado os dados.
#
# Conforme solicitado, os dados serão divididos em até 30 pastas, além disso, será testado a possibilidade de cada pasta conter dados randomicamente misturados de diferentes épocas para avaliar se o modelo se comporta de modo melhor ou pior em questão a temporalidade das informações.
# +
folds = kfold(train, k=8, shuffle=True)
plt.figure(figsize=(8, 5))
plt.title("Dados Randomizados (8 pastas : apresentando pasta 1) : Treinamento")
plt.xlabel("Timestamp")
plt.ylabel("Temperatura")
plt.plot(folds[0][:, 0], folds[0][:, 1], 'bo')
plt.tight_layout()
plt.show()
# +
folds = kfold(train, k=8, shuffle=False)
plt.figure(figsize=(8, 5))
plt.title("Dados NÃO Randomizados (8 pastas : apresentando pasta 1) : Treinamento")
plt.xlabel("Timestamp")
plt.ylabel("Temperatura")
plt.plot(folds[0][:, 0], folds[0][:, 1])
plt.tight_layout()
plt.show()
# -
# #### **Exercício 1**
#
# Calcular a melhor predição de acordo com os dados usando Quadrados Mínimos.
#
# $w = \phi^T(\phi \phi^T)^{-1}y$
#
# Usando K-Fold Cross Validation, o dataset foi dividido e executado para cada parâmetro de K. Sendo k a quantidade de atrasos.
#
# Conforme discutido em aula, os atrasos da série, aqueles cujas valores começam a posição inicial poderiam ser preenchidos com 0 (zero). Entretanto, tentanto evitar um desvio inicial muito grande, essa série atrasada inicial foi preenchidas com valores de uma distribuição uniforme variando do valor mínimo e máximo contido dentro do dataset, conforme abaixo.
# +
alld = data[:, 1]
nmin = np.round(np.min(alld).astype(np.float), 2)
nmax = np.round(np.max(alld).astype(np.float), 2)
print("Valores:")
print("-" * 20)
print(f"Min: {nmin}")
print(f"Max: {nmax}")
# -
# Dessa maneira foi executado um modelo de Regressão Linear nos dados, partindo de uma séria de K=1 até K=30 e usando K-Fold (variando até 20 folds).
#
# O resultados obtidos são apresentados abaixo.
# +
def sparse_temporal_data(data, nmin, nmax):
dados = data[:, 1]
size = len(dados)
std = []
for k in range(1, 32):
x = []
for i in range(size):
r = 0 if i-k < 0 else i-k
ds = dados[r:i]
if len(ds) < k:
# np.random.rand
before = np.round(np.random.uniform(nmin, nmax, size=(k-len(ds), )), 2)
# np.zeros((k-len(ds), ), dtype=np.float32)
ds = np.concatenate((before, ds))
x.append(np.concatenate((ds, np.array([dados[i]]))))
std.append(x)
return std
train_k = sparse_temporal_data(train, nmin, nmax)
test_k = sparse_temporal_data(test, nmin, nmax)
# -
J_k = []
for k in range(1, 32):
for kf in range(1, 20):
folds = kfold(np.array(train_k[k-1]), k=kf, shuffle=False)
J = []
better_J = 1e+10
kf_val = 0
for i in range(0, kf):
# Validation set
X_val, y_val = folds[i][:, :k], folds[i][:, k]
# Training set
nF = np.append(folds[i+1:, ], folds[:i, ], axis=0)
X, y = nF[0][:, :k], nF[0][:, k]
# Train
linreg, _ = LinearRegressionImpl().train(X, y)
# Predict
yh = linreg.predict(X_val)
# MSE
e = linreg.mse(y_val, yh)
J.append(e)
if e < better_J:
kf_val = i
better_J = e
J_k.append((k, kf, kf_val, np.mean(J), folds))
J_k = np.array(J_k)
best_result = None
best_J = 1e+10
for m in range(1, 32):
c = J_k[(J_k[:, 0] == m)]
mean = np.mean(c[:, 3])
if best_J > mean:
best_J = mean
best_result = c[c[:, 3] == np.min(c[:, 3])][0]
# +
# Encontramos o melhor K e Validation Set.
k, ft, i, mse, folds = best_result
k = int(k)
print("Melhores valores")
print("-" * 20)
print("K :", k)
print("K-Fold:", ft, "/", i+1)
minor_J0 = 1e+10
best_weights = None
# Validation set
X_val, y_val = folds[i][:, :k], folds[i][:, k]
# Training set
nF = np.append(folds[i+1:, ], folds[:i, ], axis=0)
X, y = nF[0][:, :k], nF[0][:, k]
# Train
linreg, _ = LinearRegressionImpl().train(X, y)
best_weights = linreg.w
# Running again for the best result
lr = LinearRegressionImpl(weights=best_weights)
test_f = np.array(test_k[k-1])
X_test, y_test = test_f[:, :k], test_f[:, k]
yh_test = lr.predict(X_test)
mse = lr.mse(y_test, yh_test)
# Data table
df = pd.DataFrame([Jk[:4] for Jk in J_k], columns=["K", "K-Fold", "Validation Fold", "Média MSE"])
df["Validation Fold"] += 1
k_mse_mean = []
for m in range(1, 32):
c = J_k[(J_k[:, 0] == m)]
k_mse_mean.append(np.mean(c[:, 3]))
plt.figure(figsize=(10, 9))
plt.subplot(2, 1, 1)
plt.title(f"Best K: {k}, MSE: {np.round(mse, 4)}")
plt.xticks(dts1990[2].to_numpy().astype(np.float32), dts1990[0].to_numpy(), rotation=90)
plt.xlabel("Data")
plt.ylabel("Temperatura")
plt.plot(test[:, 0], y_test, label="Test")
plt.plot(test[:, 0], yh_test, 'r', label="Predicted")
plt.legend()
plt.subplot(2, 1, 2)
plt.title(f"Error : Média MSE x K")
plt.xlabel("K")
plt.ylabel("Média MSE")
plt.plot(np.arange(1, 32), k_mse_mean, "g")
plt.tight_layout()
plt.show()
# -
# Os gráficos acima, apresentam os valores após filtro pós-processamento para escolher o melhor valor de K.
#
# Abaixo, são apresentados os primeiros 10 itens da iteração total executada. O primeiro item não representa a melhor opção, pois para escolha da melhor opção foi calculada a média dos valores.
df = df.sort_values(by=["Média MSE", "K"]).head(10)
# df.style.hide_index()
print(df)
# É possível também, usar de outra alternativa no método de K-Fold... no caso estamos embaralhando os dados antes de passar para o método e consequentemente o modelo. Por fim, chegamos aproximadamente no mesmo resultado, entretanto tomando um caminho de certa maneira diferente... Neste sentido, podemos encontrar os melhores valores $W$ para o modelo em folds totalmente direfentes.
fold_N, fold_val = 0, 0
J_k = []
for k in range(1, 32):
for kf in range(1, 20):
folds = kfold(np.array(train_k[k-1]), k=kf, shuffle=True)
J = []
better_J = 1e+10
val_fold = 0
for i in range(kf):
# Validation set
X_val, y_val = folds[i][:, :k], folds[i][:, k]
# Training set
nF = np.append(folds[i+1:, ], folds[:i, ], axis=0)
X, y = nF[0][:, :k], nF[0][:, k]
# Train
linreg, _ = LinearRegressionImpl().train(X, y)
# Predict
yh = linreg.predict(X_val)
# MSE
e = linreg.mse(y_val, yh)
J.append(e)
if e < better_J:
val_fold = i
better_J = e
J_k.append((k, kf, val_fold, np.mean(J), folds))
J_k = np.array(J_k)
best_result = None
best_J = 1e+10
for m in range(1, 32):
c = J_k[(J_k[:, 0] == m)]
mean = np.mean(c[:, 3])
if best_J > mean:
best_J = mean
best_result = c[c[:, 3] == np.min(c[:, 3])][0]
# +
# Encontramos o melhor K e Validation Set.
k, kf, i, mse, folds = best_result
k = int(k)
print("Melhores valores")
print("-" * 20)
print("K :", k)
print("K-Fold:", kf, "/", i+1)
minor_J0 = 1e+10
best_weights = None
# Validation set
X_val, y_val = folds[i][:, :k], folds[i][:, k]
# Training set
nF = np.append(folds[i+1:, ], folds[:i, ], axis=0)
X, y = nF[0][:, :k], nF[0][:, k]
# Train
linreg, _ = LinearRegressionImpl().train(X, y)
best_weights = linreg.w
# Running again for the best result
lr = LinearRegressionImpl(weights=best_weights)
test_f = np.array(test_k[k-1])
X_test, y_test = test_f[:, :k], test_f[:, k]
yh_test = lr.predict(X_test)
mse = lr.mse(y_test, yh_test)
# Data table
df = pd.DataFrame([Jk[:4] for Jk in J_k], columns=["K", "K-Fold", "Validation Fold", "Média MSE"])
df["Validation Fold"] += 1
k_mse_mean = []
for m in range(1, 32):
c = J_k[(J_k[:, 0] == m)]
k_mse_mean.append(np.mean(c[:, 3]))
plt.figure(figsize=(10, 9))
plt.subplot(2, 1, 1)
plt.title(f"Best K: {k}, MSE: {np.round(mse, 4)}")
plt.xticks(dts1990[2].to_numpy().astype(np.float32), dts1990[0].to_numpy(), rotation=90)
plt.xlabel("Data")
plt.ylabel("Temperatura")
plt.plot(test[:, 0], y_test, label="Test")
plt.plot(test[:, 0], yh_test, 'r', label="Predicted")
plt.legend()
plt.subplot(2, 1, 2)
plt.title(f"Error : Média MSE x K")
plt.xlabel("K")
plt.ylabel("Média MSE")
plt.plot(np.arange(1, 32), k_mse_mean, "g")
plt.tight_layout()
plt.show()
# -
# Abaixo, são apresentados os primeiros 10 itens da iteração total executada. O primeiro item não representa a melhor opção, pois para escolha da melhor opção foi calculada a média dos valores.
df = df.sort_values(by=["Média MSE", "K"]).head(10)
print(df)
# #### **Exercício 2**
#
# No exercício 2 usando o mesmo dataset usando anteriormente com a mesma questão de atraso, passaremos cada um dos itens por uma Rede Neural, usando como função de ativação a função hiperbólica.
#
# Para validar a quantidade de unidades ( ou neurônios ) faremos a geração dessas unidades variando de 1 até 100 com seus pesos dentro de uma distribuição uniforme variando de -1 até 1.
#
# Como valores para $\lambda$ (regularização) será utilizado o seguinte range: 1e+1 até 1e-6, dando espaçamentos de 0.1. Para visualmente ficar mais legível (devido a grande variação), os dados (os valores de regularização) são apresentados em escala logarítimica.
#
# Para a normalização dos dados, evitando a saturação da tangente hiperbólica, os dados serão normalizados entre os valores de mínimo e máximo dos dados (os quais já foram apresentados acima).
#
# Valores de K, estão dentro da faixa de 5 até 20 e o K-Fold utilizado foi de 1 até 10 folds.
# +
all_data = np.concatenate((np.copy(train[:, 1]), np.copy(test[:, 1])))
scaler = Scaler()
scaler.fit(all_data)
normalizer = Normalizer()
normalizer.fit(all_data)
N_k = 10
N_T = 101
reg = np.array([
1e+1, 1e+0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6
])
N_R = len(reg)
print(f"Valores de regularização testados: {len(reg)}")
print(reg)
# +
import cupy as cp
def gpu(x, w):
x = cp.asarray(x)
x = cp.tanh(cp.dot(x, w))
return cp.asnumpy(x)
fold_N, fold_val = 0, 0
J_k = []
for k in range(5, 21):
start = time.time()
for kf in range(1, N_k):
folds = kfold(np.array(train_k[k-1]), k=kf, shuffle=False)
for T in range(N_T):
best_reg = 1e-10
val_fold = 0
best_w = None
J = []
better_J = 1e+10
for r in range(N_R):
# Weights
w = cp.random.uniform(-1, 1, size=(k, T + 1)) * cp.sqrt(1 / (k + T + 1))
for i in range(kf):
# Regularization
regu = reg[r]
# Validation set
X_val, y_val = folds[i][:, :k], folds[i][:, k]
X_val = normalizer.transform(X_val)
X_val = gpu(X_val, w)
# Training set
nF = np.append(folds[i+1:, ], folds[:i, ], axis=0)
X, y = nF[0][:, :k], nF[0][:, k]
X = normalizer.transform(X)
X = gpu(X, w)
# Train
linreg, _ = LinearRegressionImpl(reg=regu).train(X, y)
# Predict
yh = linreg.predict(X_val)
# MSE
e = linreg.mse(y_val, yh)
# Hold information
J.append(e)
if e < better_J:
val_fold = i
best_w = cp.asnumpy(w)
best_reg = regu
better_J = e
J_k.append((k, kf, val_fold, T + 1, best_reg, np.mean(J), best_w, folds))
print(f"K: {k} <=> Time to run: {np.round(time.time() - start, 2)} secs")
J_k = np.array(J_k)
# -
best_result = None
best_T_J = 1e+10
best_w = None
for m in range(1, N_T):
c = J_k[(J_k[:, 3] == m)]
mean = np.mean(c[:, 5])
if best_T_J > mean:
min_J = np.min(c[:, 5])
h = c[c[:, 5] == min_J]
T, reg = m, h[:, 4][0]
best_T_J = mean
best_result = J_k[(J_k[:, 3] == m) & (J_k[:, 4] == reg)][0]
# +
# Encontramos o melhor K e Validation Set.
k, kf, i, T, reg, mse, best_w, folds = best_result
k = int(k)
print("Melhores resultados:")
print("-" * 20)
print("K-Fold : ", kf, "/", i+1)
print("K : ", k)
print("T : ", T)
print("lambda : ", reg)
print("MSE da validação : ", mse)
# Training set
nF = np.append(folds[i+1:, ], folds[:i, ], axis=0)
X, y = nF[0][:, :k], nF[0][:, k]
Xl = normalizer.transform(X)
Xl = np.tanh(np.dot(Xl, best_w))
# Train
linreg, _ = LinearRegressionImpl(reg=reg).train(Xl, y)
best_weights = linreg.w
# Running again for the best result
test_f = np.array(test_k[k-1])
X_test, y_test = test_f[:, :k], test_f[:, k]
Xl_test = normalizer.transform(X_test)
Xl_test = np.tanh(np.dot(Xl_test, best_w))
lr = LinearRegressionImpl(reg=reg, weights=best_weights)
yh_test = lr.predict(Xl_test)
mse = lr.mse(y_test, yh_test)
# +
# Data table
df = pd.DataFrame([Jk[:6] for Jk in J_k], columns=["K", "K-Fold", "Validation Fold", "T", "Regularizacao", "Média MSE"])
k_mse_mean = []
for m in range(5, 21):
c = J_k[(J_k[:, 0] == m)]
k_mse_mean.append(np.mean(c[:, 5]))
T_mse_mean, reg_min = [], []
for m in range(1, N_T):
c = J_k[(J_k[:, 3] == m)]
T_mse_mean.append(np.mean(c[:, 5]))
min_J = np.min(c[:, 5])
h = c[c[:, 5] == min_J]
reg_min.append(h[:, 4][0])
plt.figure(figsize=(10, 17))
plt.subplot(4, 1, 1)
plt.title(f"Best K:{k}, T:{T}, $\lambda$:{reg}, MSE: {np.round(mse, 4)}")
plt.xticks(dts1990[2].to_numpy().astype(np.float32), dts1990[0].to_numpy(), rotation=90)
plt.xlabel("Data")
plt.ylabel("Temperatura")
plt.plot(test[:, 0], y_test, label="Test")
plt.plot(test[:, 0], yh_test, 'r', label="Predicted")
plt.legend()
plt.subplot(4, 1, 2)
plt.title(f"Error : Média MSE x K")
plt.xlabel("K")
plt.ylabel("Média MSE")
plt.plot(range(5, 21), k_mse_mean, "g")
plt.subplot(4, 1, 3)
plt.title(f"Error : Média MSE x T")
plt.xlabel("T")
plt.ylabel("Média MSE")
plt.plot(range(1, N_T), T_mse_mean, "b")
plt.subplot(4, 1, 4)
plt.title(f"log($\lambda$) x T")
plt.xlabel("T")
plt.ylabel("log($\lambda$)")
plt.plot(range(1, N_T), np.log(reg_min), "r")
plt.tight_layout()
plt.show()
# -
# Abaixo, são apresentados os primeiros 10 itens da iteração total executada. O primeiro item não representa a melhor opção, pois para escolha da melhor opção foi calculada a média dos valores.
df = df.sort_values(by=["Média MSE", "T", "K"]).head(10)
print(df)
# O resultado do modelo acima ficou bem próximo do executado usando apenas a Regressão Linear simples (sem uma camada intermediára entre as entradas e o Regressor). Neste sentido, pela natureza dos dados, mesmo usando modelos mais complexos podemos acabar por chegar no mesmo resultado.
# ---
#
# <NAME> - 208911
#
# Todo o código deste relatório esta disponível em: https://github.com/rdenadai/ia006c
| notebooks/EFC01/EFC01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/npgeorge/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/Assignment_Nicholas_George_LS_DS_121_Join_and_Reshape_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="pmU5YUal1eTZ"
# _Lambda School Data Science_
#
# # Join and Reshape datasets
#
# Objectives
# - concatenate data with pandas
# - merge data with pandas
# - understand tidy data formatting
# - melt and pivot data with pandas
#
# Links
# - [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
# - [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)
# - Combine Data Sets: Standard Joins
# - Tidy Data
# - Reshaping Data
# - Python Data Science Handbook
# - [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append
# - [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join
# - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping
# - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables
#
# Reference
# - Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)
# - Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
# + id="5MsWLLW4Xg_i" colab_type="code" outputId="94d1e005-1fda-40d8-90d1-0db088bc0852" colab={"base_uri": "https://localhost:8080/", "height": 232}
# !wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
# + id="gfr4_Ya0XkLI" colab_type="code" outputId="e822df4d-d0a7-4e71-c8ec-007abb31f204" colab={"base_uri": "https://localhost:8080/", "height": 247}
# !tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
# + id="N4YyGPNdXrT0" colab_type="code" outputId="1c6f1bbe-1f08-4a45-e62d-19a9f14cc76b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd instacart_2017_05_01
# + id="b26wmLUiXtlM" colab_type="code" outputId="d5835080-6cfa-4285-aae1-9cb3aa9fa00e" colab={"base_uri": "https://localhost:8080/", "height": 123}
# !ls -lh *.csv
# + [markdown] colab_type="text" id="kAMtvSQWPUcj"
# # Assignment
#
# ## Join Data Practice
#
# These are the top 10 most frequently ordered products. How many times was each ordered?
#
# 1. Banana
# 2. Bag of Organic Bananas
# 3. Organic Strawberries
# 4. Organic Baby Spinach
# 5. Organic Hass Avocado
# 6. Organic Avocado
# 7. Large Lemon
# 8. Strawberries
# 9. Limes
# 10. Organic Whole Milk
#
# First, write down which columns you need and which dataframes have them.
#
# Next, merge these into a single dataframe.
#
# Then, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products.
# + id="3SpiL6HmHsu6" colab_type="code" colab={}
##### YOUR CODE HERE #####
import pandas as pd
#need the product name column and how many times someone ordered the item, for instance if they ordered 2 of one product
#columns we need
#product_name
#product_id
#order_id
#reordered
# + id="zDYLj-V7K-JM" colab_type="code" outputId="eb867e06-37f2-4089-be29-13f072db411d" colab={"base_uri": "https://localhost:8080/", "height": 647}
aisles = pd.read_csv('aisles.csv')
aisles.head(20)
#probably not useful for this assignment
# + id="LQOaLIu0MaHL" colab_type="code" outputId="8497476d-2400-4335-af7a-d5cc48940c83" colab={"base_uri": "https://localhost:8080/", "height": 34}
aisles.shape
# + id="EUzXgTUjLHXd" colab_type="code" outputId="3d4661d6-f299-42a4-c8dd-a9cb3135072d" colab={"base_uri": "https://localhost:8080/", "height": 197}
departments = pd.read_csv('departments.csv')
departments.head()
#departments.shape
#also not useful for this question
# + id="QJZOaIE_LZsA" colab_type="code" outputId="c1767988-c959-42b8-b2cd-e4433f8af6e0" colab={"base_uri": "https://localhost:8080/", "height": 197}
order_products__prior = pd.read_csv('order_products__prior.csv')
order_products__prior.head()
#ok so add the reodered column, ensure it aligns with the proper product id
# + id="wSdlA0p7LsNe" colab_type="code" outputId="59f1b2e4-7012-4d71-dfc2-bb75bacabd00" colab={"base_uri": "https://localhost:8080/", "height": 197}
order_products__train = pd.read_csv('order_products__prior.csv')
order_products__train.head()
#training data set, so going to use the "prior" for the reordered column
# + id="sJ6rD-5CHwdK" colab_type="code" outputId="26db0de0-1ad8-4227-9faa-5bb8ef1f082f" colab={"base_uri": "https://localhost:8080/", "height": 367}
orders = pd.read_csv('orders.csv')
orders.head(10)
# + id="o2HmkbL2Kw6J" colab_type="code" outputId="4092ce97-4428-4fb4-9356-727f8f91c583" colab={"base_uri": "https://localhost:8080/", "height": 647}
products = pd.read_csv('products.csv')
products.head(20)
#products data frame as the product name, not sure if this will be useful.
# + id="km_Y7BxCMr_G" colab_type="code" outputId="b132181a-fe2b-4738-f22b-0efab3941cb3" colab={"base_uri": "https://localhost:8080/", "height": 1000}
products['product_name']
# + id="L2kVLMXQPBLN" colab_type="code" outputId="1fc3cd3d-c14c-4320-945e-697a992c4b24" colab={"base_uri": "https://localhost:8080/", "height": 197}
#so, I need product_name, product_id, order_id, and whether or not they reordered
#first i'll grab the products columns I need
prod_columns = ['product_id','product_name']
products = products_sub = products[prod_columns]
products.head()
# + id="yt5SOxNmQxQq" colab_type="code" outputId="1869e9b5-492c-403c-fb01-89b53f41238a" colab={"base_uri": "https://localhost:8080/", "height": 34}
products.shape
# + id="XgqCk-VFQRiF" colab_type="code" outputId="73e79f7c-32fa-4ab3-d6fa-a6b716de9647" colab={"base_uri": "https://localhost:8080/", "height": 197}
#just need to grab the order_id from the orders column
orders_column = ['order_id']
orders = orders_sub = orders[orders_column]
orders.head()
# + id="6WUotlpgQuoC" colab_type="code" outputId="9d5ee54e-c00a-4946-9d57-c37d02961744" colab={"base_uri": "https://localhost:8080/", "height": 34}
orders.shape
# + id="CfMvB-QXQ7Bu" colab_type="code" colab={}
#and then lastly need to grab whether or not they reordered
# + id="i6JrH_3_P7UF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="caebbc67-eeea-4715-e5c1-a6930d2a1bb3"
#columns = ['product_id','product_name','order_id', 'reordered']
#merged = pd.merge(______, ____________)
#merged.head()
# + [markdown] id="RsiWi4DuXPLP" colab_type="text"
# ## Reshape Data Section
#
# - Replicate the lesson code
# - Complete the code cells we skipped near the beginning of the notebook
# - Table 2 --> Tidy
# - Tidy --> Table 2
# - Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.
# + id="fgxulJQq0uLw" colab_type="code" colab={}
import seaborn as sns
flights = sns.load_dataset('flights')
# + id="1qKc88WI0up-" colab_type="code" outputId="55ca2c65-5d1d-49b6-a906-436a6ca0af59" colab={"base_uri": "https://localhost:8080/", "height": 232}
##### YOUR CODE HERE #####
# !wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
# + id="-BYjKPGbWQ1T" colab_type="code" outputId="e4f91991-6370-410f-8906-85d8bc3956cc" colab={"base_uri": "https://localhost:8080/", "height": 247}
# !tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
# + id="y_wecn9UWWCK" colab_type="code" outputId="ce347259-61e5-4257-f343-ee284199a8b0" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd /content/
# + id="NWgW7tkBWaZ_" colab_type="code" outputId="f38c5252-4c4f-4d61-cf12-6800c0d9620a" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !ls -lh *.csv
# + id="T3SDh_-jWevN" colab_type="code" outputId="2d566faa-ab17-484a-e64c-921fbce1190c" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd instacart_2017_05_01
# + id="kPytkjIkWjnn" colab_type="code" outputId="9c996d82-2f16-4bcf-b6d3-73734e781ff9" colab={"base_uri": "https://localhost:8080/", "height": 123}
# !ls -lh *.csv
# #!ls is used to list the current files in the directory
# #rm is used to remove/delete files
#the *.csv is used to show ANY filename that ends with .csv
# + id="5MUcS0l9XReY" colab_type="code" colab={}
# !rm -rf instacart_2017_05_01/
# + id="DD5w-jHSXV5-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="a2bd672e-3987-459b-8236-dddf4d6f5ff2"
# !rm instacart_online_grocery_shopping_2017_05_01.tar.gz
# + [markdown] id="YZqCc8W6XZoc" colab_type="text"
# Download with Python
# + id="FOslrd0fXbL9" colab_type="code" outputId="43535c59-4dda-453d-ea0e-61903a845e7a" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd /content/
# + id="YVZJINU9Xd7j" colab_type="code" outputId="2de58463-8b32-40e7-b2a0-f20c9b2ba59e" colab={"base_uri": "https://localhost:8080/", "height": 52}
import urllib.request
url = 'https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz'
file_name = 'instacart_online_grocery_shopping_2017_05_01.tar.gz'
urllib.request.urlretrieve(url, file_name)
# + id="fwIzuUIWYFiK" colab_type="code" colab={}
import tarfile
tar = tarfile.open(file_name, 'r:gz')
tar.extractall()
tar.close()
# + id="vH80rOI_YRDe" colab_type="code" colab={}
import os
# + id="H5-rp7k_YTPT" colab_type="code" outputId="3fea2e08-0107-455c-e1a9-5b0d3182066b" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(os.getcwd())
# + id="BPRuR91WYeng" colab_type="code" colab={}
os.chdir('/content/instacart_2017_05_01/')
# + id="SE0yzsPqYnoG" colab_type="code" outputId="813930ff-7463-44d1-cb8a-cdb1b5a0e3ba" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(os.getcwd())
# + id="YYkhlUVvYxv6" colab_type="code" outputId="30f2e558-81a9-47fd-a3dd-1327b5d3cebd" colab={"base_uri": "https://localhost:8080/", "height": 123}
import glob
glob.glob('/content/instacart_2017_05_01/*.csv')
# + [markdown] id="fYX3g5LOY-Mh" colab_type="text"
# # **Join Datasets**
# + [markdown] id="ZKMLCjpGZH82" colab_type="text"
# ## **Goal: Reproduce this example**
# + [markdown] id="WGNawB-xZLkv" colab_type="text"
# The first two order for user id 1:
# + id="WIEyEgneY9Mn" colab_type="code" outputId="306cd4f6-8120-4d23-95d1-d0f569a6b578" colab={"base_uri": "https://localhost:8080/", "height": 311}
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'
example = Image(url=url, width=600)
display(example)
# + [markdown] id="Mxjj2XiDZwwe" colab_type="text"
# # **Load Data**
# + [markdown] id="ctyw-QmaZ0Ir" colab_type="text"
# Here's a list of all six CSV filenames
# + id="fCJ52S9OZ3Of" colab_type="code" outputId="39c19bc1-3c30-4b81-f311-591802bd1455" colab={"base_uri": "https://localhost:8080/", "height": 123}
# !ls -lh *.csv
# + [markdown] id="0oVblucHZ8QR" colab_type="text"
# For each CSV
# - Load it with pandas
# - Look at the dataframe's shape
# - Look at its head (first rows)
# - display (example)
# - Which columns does it have in common with the example we want to reproduce?
# + id="c0Hp8IVVaNxl" colab_type="code" colab={}
import pandas as pd
# + [markdown] id="GlXRivVFaQ2T" colab_type="text"
# ## **aisles**
# + id="QvZ6pDh-aT_0" colab_type="code" outputId="189745b2-0882-421b-af47-8ac3490f9d6e" colab={"base_uri": "https://localhost:8080/", "height": 197}
aisles = pd.read_csv('aisles.csv')
aisles.head()
# + id="7304fOTcapty" colab_type="code" outputId="9db6811e-4ebe-4d4e-bf8f-5319afca162d" colab={"base_uri": "https://localhost:8080/", "height": 34}
aisles.shape
# + id="b0BquWX7arVb" colab_type="code" outputId="739b8fb7-baf0-49ed-8baa-77708a35c128" colab={"base_uri": "https://localhost:8080/", "height": 311}
display(example)
# + id="DPdbam7SauKn" colab_type="code" outputId="4c169a3c-16be-4fd1-98d2-4cde82f52b5e" colab={"base_uri": "https://localhost:8080/", "height": 287}
aisles.describe()
# + id="LFMRdM2DauBl" colab_type="code" outputId="9f35c948-bd6a-4396-bb8a-f57793bb4881" colab={"base_uri": "https://localhost:8080/", "height": 167}
aisles.describe(exclude='number')
# + [markdown] id="xJFAhX1oa012" colab_type="text"
# ## **departments**
# + id="Ua7vN7tQa3mD" colab_type="code" outputId="6b508755-536b-4e34-b43b-d80670bf9a32" colab={"base_uri": "https://localhost:8080/", "height": 197}
departments = pd.read_csv('departments.csv')
departments.head()
# + id="2pY1v8wLa3wm" colab_type="code" outputId="6801ed84-6fc1-49da-cd8f-1ee3239778bc" colab={"base_uri": "https://localhost:8080/", "height": 34}
departments.shape
# + id="PN2CHhTSbBd2" colab_type="code" outputId="25bde837-8868-4e31-9e1e-88ce0d494fa5" colab={"base_uri": "https://localhost:8080/", "height": 311}
display(example)
# + [markdown] id="NKu6aCpEbEYz" colab_type="text"
# ## **order_products__prior**
# + id="8poEdBXobI6V" colab_type="code" outputId="23c969bd-8e3f-4a95-c4a2-718273cd198d" colab={"base_uri": "https://localhost:8080/", "height": 197}
order_products__prior = pd.read_csv('order_products__prior.csv')
order_products__prior.head()
# + id="P9_50YBvbXNw" colab_type="code" outputId="ea06cde8-5b2c-40b3-a595-c9b909e51091" colab={"base_uri": "https://localhost:8080/", "height": 34}
order_products__prior.shape
# + [markdown] id="prQU24b_baOT" colab_type="text"
# We need:
# - order_id
# - product_id
# - add_to_cart_order
# + [markdown] id="n38zOoGEbgDd" colab_type="text"
# ## **order_products__train**
# + id="MO9xFkcobldV" colab_type="code" outputId="70ce1e39-a46d-46be-c74a-1e655b1e8912" colab={"base_uri": "https://localhost:8080/", "height": 197}
order_products__train = pd.read_csv('order_products__train.csv')
order_products__train.head()
# + id="Vty7VxsEbxOw" colab_type="code" outputId="2efa9f9c-6531-4fb2-b13c-a3bda81f8eb7" colab={"base_uri": "https://localhost:8080/", "height": 34}
order_products__train.shape
# + [markdown] id="8zO-Ri-Cb0_m" colab_type="text"
# ## **orders**
# + id="fTw9LmxJb0Ru" colab_type="code" outputId="878021e1-f6bf-4584-d516-ba7ca93091cf" colab={"base_uri": "https://localhost:8080/", "height": 217}
orders = pd.read_csv('orders.csv')
orders.head()
# + id="almgE73hcB0V" colab_type="code" outputId="6a1cbf50-e449-46ac-98d4-c8da2acce26f" colab={"base_uri": "https://localhost:8080/", "height": 311}
display(example)
# + [markdown] id="z7YBhT4ncHD2" colab_type="text"
# We need:
# - order_id
# - user_id
# - order_number
# - order_dow
# - order_hour_of_day
# + [markdown] id="knGfsa2KcPj3" colab_type="text"
# ## **products**
# + id="ycB3IfcecV-b" colab_type="code" outputId="b6a322d1-9efe-4d02-cc24-eceed1f90760" colab={"base_uri": "https://localhost:8080/", "height": 197}
products = pd.read_csv('products.csv')
products.head()
# + id="dnywODgjcc-0" colab_type="code" outputId="6b9712fd-f841-4f50-e7f4-6bb6ee7f7f70" colab={"base_uri": "https://localhost:8080/", "height": 34}
products.shape
# + [markdown] id="ZFy105rIcgbm" colab_type="text"
# ## **Concatenate order_productsprior and order_productstrain**
#
# + id="vLzXjsfxcsqz" colab_type="code" outputId="c6b1cc71-1b81-44b2-9197-6a442072abeb" colab={"base_uri": "https://localhost:8080/", "height": 34}
order_products = pd.concat([order_products__prior, order_products__train])
order_products.shape
# + id="eFZn6KwldP2A" colab_type="code" outputId="56d24a32-835c-4d23-a968-71285e53f4f6" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(order_products__prior.shape, order_products__train.shape, order_products.shape)
# + id="WGAQnxs9daiF" colab_type="code" colab={}
assert len(order_products__prior) + len(order_products__train) == len(order_products)
# + id="nbUaR90tdjmb" colab_type="code" outputId="4179ca9a-1839-4d8f-8c8c-72801b6def10" colab={"base_uri": "https://localhost:8080/", "height": 311}
display(example)
# + [markdown] id="gLCkPIPmdplV" colab_type="text"
# Short groupby example
# + id="7L9UH4oXdso2" colab_type="code" outputId="b7efe7fd-4ed0-407e-9af2-fc1bb73afbad" colab={"base_uri": "https://localhost:8080/", "height": 34}
order_products.groupby('order_id')['product_id'].count().mean()
# + id="41Mn8Agdehmf" colab_type="code" colab={}
grouped_orders = order_products.groupby('order_id')
# + id="yZTkEYRKemqU" colab_type="code" outputId="03faccc2-8cbd-4280-98a4-ff25914ee580" colab={"base_uri": "https://localhost:8080/", "height": 197}
grouped_orders.get_group(2539329)
# + id="KxdUNJ5TeptE" colab_type="code" outputId="67bdb780-01ea-4d73-8742-395067e42c22" colab={"base_uri": "https://localhost:8080/", "height": 197}
order_products[order_products['order_id'] == 2539329]
# + id="dVzycqYVes2Q" colab_type="code" outputId="cd1c33db-8ee1-4327-9b25-a84acbb442e0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
grouped_orders['product_id'].count()
# + id="eO6J7A8IexkJ" colab_type="code" outputId="1485fc0e-1d84-49ef-b9e5-982f4eb1a1d4" colab={"base_uri": "https://localhost:8080/", "height": 268}
grouped_orders['product_id'].count().hist();
# + id="GBdzK8Fdez43" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 268} outputId="455e8245-e7e6-49ea-ef88-bb380f8b71f1"
grouped_orders['product_id'].count().hist(bins=50);
# + id="vB5oviPurlnF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="e35af228-3486-45bb-9eb6-47a38f316422"
orders.head()
# + id="CUT3gA8Xrvl3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="63ce4670-2b0b-4f5c-a82d-110dd7287352"
orders.shape
# + id="FQm5CqQdrxeX" colab_type="code" colab={}
condition = (orders['user_id'] == 1) & (orders['order_number'] <= 2)
columns = ['order_id','user_id', 'order_number', 'order_dow', 'order_hour_of_day']
subset = orders[condition][columns]
# + id="BVf9GDNmr2gg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="a2884e30-ccd6-4409-adc9-a8115ccb4148"
subset.head()
# + [markdown] id="xRCQvadLr9cF" colab_type="text"
# # **Merge Dataframes**
# + id="MnXXyh23r72u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="57a5e962-8fcc-4b3d-c12c-178b06563071"
columns = ['order_id','product_id','add_to_cart_order']
merged = pd.merge(subset, order_products[columns])
merged.head()
# + id="Z8sh1YzKsYnl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="b6529daf-8a28-4eb1-f268-8bcd609a2896"
display(example)
# + id="53udVGs_sfdm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 301} outputId="aaedce14-e671-4a5f-df46-4f00dbcfc826"
final = pd.merge(merged, products[['product_id', 'product_name']])
final.head()
# + id="1idrXdbMsjT1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 614} outputId="ce54e70c-f05d-44b6-e74d-2cd0e4a9c2e0"
columns = ['user_id', 'order_id', 'order_number','order_dow','order_hour_of_day','add_to_cart_order', 'product_id','product_name']
final = final[columns]
final
# + id="r6BwxmgtsnqU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 614} outputId="0bb6f669-6ddf-4bf8-b23a-cbb0a3f26ee2"
final = final.sort_values(by=['order_number', 'add_to_cart_order'])
final
# + id="TZRb1UqNs0Gi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 158} outputId="7795926e-ea64-422c-a953-df4681b6fecc"
columns = [col.replace('_', ' ') for col in final.columns]
columns
# + id="FWZsYBX0s6-2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 461} outputId="96a840ff-57cf-4bbb-93ad-e5db8b1a045c"
final.columns = columns
final
# + id="69FJSrQns977" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="9f40027f-f0df-47d1-bfde-89d6510f86aa"
display(example)
# + [markdown] id="mnOuqL9K0dqh" colab_type="text"
# ## Join Data Stretch Challenge
#
# The [Instacart blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2) has a visualization of "**Popular products** purchased earliest in the day (green) and latest in the day (red)."
#
# The post says,
#
# > "We can also see the time of day that users purchase specific products.
#
# > Healthier snacks and staples tend to be purchased earlier in the day, whereas ice cream (especially Half Baked and The Tonight Dough) are far more popular when customers are ordering in the evening.
#
# > **In fact, of the top 25 latest ordered products, the first 24 are ice cream! The last one, of course, is a frozen pizza.**"
#
# Your challenge is to reproduce the list of the top 25 latest ordered popular products.
#
# We'll define "popular products" as products with more than 2,900 orders.
#
#
# + id="B-QNMrVkYap4" colab_type="code" colab={}
##### YOUR CODE HERE #####
# + [markdown] id="Ij8S60q0YXxo" colab_type="text"
# ## Reshape Data Stretch Challenge
#
# _Try whatever sounds most interesting to you!_
#
# - Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"
# - Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing "Number of Purchases" vs "Percent Reorder Purchases"
# - Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)
# - Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
# + id="_d6IA2R0YXFY" colab_type="code" colab={}
##### YOUR CODE HERE #####
| Assignment_Nicholas_George_LS_DS_121_Join_and_Reshape_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Ws10ljqALYWy"
#using tutorial https://www.tensorflow.org/tutorials/text/text_generation#advanced_customized_training
# + id="cuoIaLhMy2VI"
import pandas as pd
import numpy as np
#more imports
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop
import random
import sys
import os
import time
import tensorflow as tf
import tensorflow_datasets as tfds
# + id="q_tf0I3kgLJ0"
#for saving the model
# !pip install -q pyyaml h5py
# + id="uNpVhQWDzXHc"
# Read in the data
df = pd.read_csv("ufo_update.csv")
text_ = " "
# + colab={"base_uri": "https://localhost:8080/", "height": 289} id="Je5J_-mSzYwj" outputId="dffdc089-a349-411d-e0de-7dd20eccf405"
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="0b9TbJ5Jzbuj" outputId="95e03f72-ad86-4860-a046-df4669d0ee33"
df['text'][0]
# + colab={"base_uri": "https://localhost:8080/"} id="QJdd_5bojrR3" outputId="b1d0d2fc-7705-4226-d9f1-f067d0b63a46"
df.shape
# + id="ZdkQ-pj7cOBk"
#sub = df.sample(frac=.5)
sub = df.iloc[:1000]
# + colab={"base_uri": "https://localhost:8080/", "height": 289} id="mzR5bdKNhU0T" outputId="114dc7ea-e459-4b07-eab6-192b10c41941"
sub.head()
# + colab={"base_uri": "https://localhost:8080/"} id="kfAFO-C159TY" outputId="8052e46d-c7e6-42b1-aad3-5cb7be0dc18c"
sub['text'] = sub['text'].astype(str)
# + id="02GIXVWUQCV4"
text = ' '.join(sub['text'].tolist())
# + id="4ble0euwOTRC"
#Clean text
import re
from bs4 import BeautifulSoup
def cleanText(txt):
txt = BeautifulSoup(txt).get_text()
txt = txt.replace('\t', '')
txt = txt.replace('\x9d', '')
txt = txt.replace('\xa0', '')
#remove all non letters from text
txt = re.sub("[^A-Za-z0-9_.,!'/$]", " ", txt)
txt = txt.split()
txt = " ".join(txt)
return txt
# + id="2CsUOo6RQAYy"
text = cleanText(text)
# + id="Rk1NK8bx8q5k" colab={"base_uri": "https://localhost:8080/"} outputId="374159bb-f847-4822-ba6c-0a8e6b46208e"
# Encode the data as Chars
#get all unique chars
chars = sorted(list(set(text)))
print('unique characters', format(len(chars)))
# + id="deut8LJ3MHiU"
#create a mapping from unique characters to indices
char_indices = {u:i for i, u in enumerate(chars)}
indices_char = np.array(chars)
text_as_int = np.array([char_indices[c] for c in text])
# + colab={"base_uri": "https://localhost:8080/"} id="6il9wQF1Nw4M" outputId="ce0dc02b-57c3-477f-f4b9-921ef02dc778"
print('{')
for char,_ in zip(char_indices, range(70)):
print(' {:4s}: {:3d},'.format(repr(char), char_indices[char]))
print(' ...\n}')
# + colab={"base_uri": "https://localhost:8080/"} id="pGmV5v-JQZ_h" outputId="d823c0a7-31d4-4a2e-cc27-ccaacb602427"
# The maximum length sentence you want for a single input in characters
seq_length = 100 #try adjusting sequence length
examples_per_epoch = len(text)//(seq_length+1)
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
for i in char_dataset.take(5):
print(indices_char[i.numpy()]) #do not alias numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="_enOcFOcUce6" outputId="17d0d874-fddf-44aa-dd12-9a9042029294"
#The batch method lets us easily convert these individual characters to sequences of the desired size.
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
for item in sequences.take(5):
print(repr(''.join(indices_char[item.numpy()]))) #do not alias numpy as np
# + id="8EsQHBrsWw-z"
#For each sequence, duplicate and shift it to form the input and target text by
#using the map method to apply a simple function to each batch:
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
# + colab={"base_uri": "https://localhost:8080/"} id="tBe2CejSW-nA" outputId="7a354a19-0b52-4f28-e8f0-7bd3e1419010"
#Print the first example input and target values:
for input_example, target_example in dataset.take(1):
print('Input data: ', repr(''.join(indices_char[input_example.numpy()])))
print('Target data:', repr(''.join(indices_char[target_example.numpy()])))
# + colab={"base_uri": "https://localhost:8080/"} id="GwEK0pH0YU_S" outputId="120f11fc-df37-42f3-98e0-0e97d6c6b812"
#each index of these vectors is processed as a one time step. For the input at
#time step 0, the model receives the index for 'T' and tries to predict the
#index for i as the next character. At the next timestep, it does the same thing
#but the RNN considers the previous step context in addition to the current input
#character
for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):
print("Step {:4d}".format(i))
print(" input: {} ({:s})".format(input_idx, repr(indices_char[input_idx])))
print(" expected output: {} ({:s})".format(target_idx, repr(indices_char[target_idx])))
# + colab={"base_uri": "https://localhost:8080/"} id="xgA8opVIZKD1" outputId="38ce232e-eb25-49f9-c792-1d75804f09f1"
#Create training batches - you can use tf.data to split the text into manageable
#sequences. But before feeding this data into the model, you need to shuffle the
#data and pack it into batches
# Batch size
BATCH_SIZE = 64
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
dataset
# + [markdown] id="OyIAOv_Dirgy"
# Build the model
# For each character the model looks up the embedding, runs the GRU one timestep
# with the embedding as input, and applies the dense layer to generate logits
# predicting the log-likelihood of the next character
# + id="-svJLMj4Zh9h"
# Length of the vocabulary in chars
vocab_size = len(chars)
# The embedding dimension
embedding_dim = 256
# Number of RNN units
rnn_units = 1024
# + id="2yhDhLqxdEtV"
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
tf.keras.layers.GRU(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
# + id="G0CpuU3idJMq"
model = build_model(
vocab_size=len(chars),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
# + colab={"base_uri": "https://localhost:8080/"} id="_CsdktNrdviW" outputId="fdd94c8d-be0f-4807-cc9c-469f5c3f4ead"
#check the shape of the output
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length,vocab_size)")
# + colab={"base_uri": "https://localhost:8080/"} id="PC6h-PPsd_Y6" outputId="f1bbec48-3c4a-4555-9408-56cc2125daa4"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="vZj_eLAueXkk" outputId="a9ca5528-1424-4c44-c6a2-0aba06569610"
#to get prediction from the model you need to sample from the output
#distribution to get actual character indices
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()
sampled_indices
# + colab={"base_uri": "https://localhost:8080/"} id="9jLZvLQwfAqI" outputId="f06e201b-3222-4d3f-8188-20eb2663c04f"
#decode these to see the text predicted by this untrained model
print("Input: \n", repr("".join(indices_char[input_example_batch[0]])))
print()
print("Next Char Predictions: \n", repr("".join(indices_char[sampled_indices ])))
# + colab={"base_uri": "https://localhost:8080/"} id="GXHl4ugAfbRa" outputId="77eea393-1dee-4a6b-fa8e-729dcf38b847"
#train the model - at this point problem can be treated as a standard
#classification problem. Given the previous RNN state, and the input this time
#step, predict the class of the next character
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)")
print("scalar_loss: ", example_batch_loss.numpy().mean())
# + id="u0EXdpb8f2on"
#Configure the training procedure using the tf.keras.Model.compile method.
#Use tf.keras.optimizers.Adam with default arguments and the loss function.
model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])
# + id="ViJLYLS_gFU9"
#Use a tf.keras.callbacks.ModelCheckpoint to ensure that checkpoints
#are saved during training:
# Directory where the checkpoints will be saved
checkpoint_dir = './training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
# + colab={"base_uri": "https://localhost:8080/"} id="N3cFqo3CgO-e" outputId="65a05291-7b29-4d89-b827-e819733985aa"
EPOCHS = 30
history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
# + [markdown] id="bxmnObfkimYm"
# Generate text - to keep prediction step simple use a batch size of 1
# because of the way the RNN state is passed from timestep to timestep, the
# model only accepts a fixed batch size once built. To run the model with a diff
# batch size, you need to rebuild the model and restore the weights from the checkpoint.
#
# Looking at the genearated text, the model knows when to capitalize, make
# paragraphs and imitate the vocabulary. With a small # of epochs it has not learned to form coherent sentences
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="gD_RtjpdgdIt" outputId="00cf38c6-e63f-4506-b830-2631623ff04b"
tf.train.latest_checkpoint(checkpoint_dir)
# + id="5eFBGQ54g8h7"
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))
# + colab={"base_uri": "https://localhost:8080/"} id="OX_mOUmWg_NT" outputId="57a8585e-c96a-499d-95a4-70b5ea4cdfe3"
model.summary()
# + id="FhPmP1N0oyB6"
#save those weights
model.save_weights(checkpoint_dir.format(epoch=0))
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="2TK6VB9-pRaV" outputId="79023906-36f0-41d5-c2f2-29c468a29c7a"
latest = tf.train.latest_checkpoint(checkpoint_dir)
latest
# + id="WsGIuqTepuwd"
#create and train new model instance
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
# + id="RytWF0d4t3yT"
model.fit(dataset, epochs=5)
# + id="gYI8VctBs4Yu"
#save entire model to HDF5 file
#the '.h5' extension indicates that the model should be saved to HDF5
model.save('my_model.hf')
# + [markdown] id="nAUjuX7qif9o"
# The prediction loop: the following code generates the text.
# begin by choosing a start string, initializing the RNN state and setting the
# number of characters to generate. Get the prediction distribution of the next
# character using the start string and the RNN state. Then use a categorical
# distribution to calculate the index of the predicted character. Use this
# predicted character as our next input to the model. The RNN state returned by
# the model is fed back into the model so that it now has more context, instead
# only one character. After predicting the next character, the modified RNN states
# are again fed back into the model, which is how it learns more context from the
# previously predicted characters
# + id="aQkwgTwJiPlB"
def generate_text(model, start_string):
# Evaluation step (generating text using the learned model)
# Number of characters to generate
num_generate = 1000
# Converting our start string to numbers (vectorizing)
input_eval = [char_indices[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Low temperature results in more predictable text.
# Higher temperature results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
# Here batch size == 1
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a categorical distribution to predict the character returned by the model
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
# Pass the predicted character as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(indices_char[predicted_id])
return (start_string + ''.join(text_generated))
# + id="4r5WyUgMjGMc"
print(generate_text(model, start_string=u"I saw a little green man "))
# + [markdown] id="dKXXEvYSbsXg"
# SAVE the model
# + id="FmGZXJ-Oj9DU"
# checkpoint_path = "training_1/cp.ckpt"
# checkpoint_dir = os.path.dirname(checkpoint_path)
# + id="ffxBOjKAktPj"
# Create a callback that saves the model's weights
# cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
# save_weights_only=True,
# verbose=1)
# + id="26QM3YpklhPz"
model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])
# + id="aM5AF8LSlDMl"
# Train the model with the new callback
model.fit(dataset,
epochs=EPOCHS,
callbacks=[checkpoint_callback]) # Pass callback to training
# + id="FZdwkQJ2lrwz"
# model = build_model(
# vocab_size=len(chars),
# embedding_dim=embedding_dim,
# rnn_units=rnn_units,
# batch_size=BATCH_SIZE)
# + id="7-Y94pZYl2W1"
# optimizer = tf.keras.optimizers.Adam()
# + id="E7OSbDbsl6Ey"
# @tf.function
# def train_step(inp, target):
# with tf.GradientTape() as tape:
# predictions = model(inp)
# loss = tf.reduce_mean(
# tf.keras.losses.sparse_categorical_crossentropy(
# target, predictions, from_logits=True))
# grads = tape.gradient(loss, model.trainable_variables)
# optimizer.apply_gradients(zip(grads, model.trainable_variables))
# return loss
# + id="DLMpdM5Xl-PC"
# Training step
# EPOCHS = 10
# for epoch in range(EPOCHS):
# start = time.time()
# # resetting the hidden state at the start of every epoch
# model.reset_states()
# for (batch_n, (inp, target)) in enumerate(dataset):
# loss = train_step(inp, target)
# if batch_n % 100 == 0:
# template = 'Epoch {} Batch {} Loss {}'
# print(template.format(epoch + 1, batch_n, loss))
# # saving (checkpoint) the model every 5 epochs
# if (epoch + 1) % 5 == 0:
# model.save_weights(checkpoint_prefix.format(epoch=epoch))
# print('Epoch {} Loss {:.4f}'.format(epoch + 1, loss))
# print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
# model.save_weights(checkpoint_prefix.format(epoch=epoch))
# + id="BC4yhUp0MwXr"
# from keras.optimizers import RMSprop
# from keras.layers import Dense, Activation #move later
# model = Sequential()
# model.add(LSTM(128, input_shape=(maxlen, len(chars))))
# model.add(Dense(len(chars)))
# model.add(Activation('softmax'))
# optimizer = RMSprop(lr=0.01)
# model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# + id="vwDl7MlnsBaA"
# def sample(preds, temperature=1.0):
# # helper function to sample an index from a probability array
# preds = np.asarray(preds).astype('float64')
# preds = np.log(preds) / temperature
# exp_preds = np.exp(preds)
# preds = exp_preds / np.sum(exp_preds)
# probas = np.random.multinomial(1, preds, 1)
# return np.argmax(probas)
# + id="t1W3L_YCsFOM"
# def on_epoch_end(epoch, _):
# # Function invoked at end of each epoch. Prints generated text.
# print()
# print('----- Generating text after Epoch: %d' % epoch)
# start_index = random.randint(0, len(text) - maxlen - 1)
# for diversity in [0.2, 0.5, 1.0, 1.2]:
# print('----- diversity:', diversity)
# generated = ''
# sentence = text[start_index: start_index + maxlen]
# generated += sentence
# print('----- Generating with seed: "' + sentence + '"')
# sys.stdout.write(generated)
# for i in range(400):
# x_pred = np.zeros((1, maxlen, len(chars)))
# for t, char in enumerate(sentence):
# x_pred[0, t, char_indices[char]] = 1.
# preds = model.predict(x_pred, verbose=0)[0]
# next_index = sample(preds, diversity)
# next_char = indices_char[next_index]
# generated += next_char
# sentence = sentence[1:] + next_char
# sys.stdout.write(next_char)
# sys.stdout.flush()
# print()
# print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
# + id="IGcVaeWzJzYf"
# history_2 = model.fit(x, y,
# batch_size=128,
# epochs=100,
# callbacks=[print_callback])
# + id="FAXL6PvPsIO-"
# history_3 = model.fit(x, y,
# batch_size=128,
# epochs=150,
# callbacks=[print_callback])
| notebooks/ufo_by_char.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.12 64-bit (''rl'': conda)'
# metadata:
# interpreter:
# hash: accc2f43be2e1f6f04b542269f945672579f4d887acf3251fbd55070909a0f05
# name: python3
# ---
# +
import torch
import torch.nn as nn
from torch.distributions import Categorical
import gym, os
from itertools import count
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
from math import log2
import pdb
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# -
class Model(nn.Module):
def __init__(self, state_dim, action_dim, n_latent_var):
super(Model, self).__init__()
self.affine = nn.Linear(state_dim, n_latent_var)
# actor
self.action_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, action_dim),
nn.Softmax(dim = -1)
)
# critic
self.value_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, 1)
)
# Memory:
self.actions = []
self.states = []
self.logprobs = []
self.state_values = []
self.rewards = []
def forward(self, state, action=None, evaluate=False):
# if evaluate is True then we also need to pass an action for evaluation
# else we return a new action from distribution
if not evaluate:
state = torch.from_numpy(state).float().to(device)
state_value = self.value_layer(state)
action_probs = self.action_layer(state)
action_distribution = Categorical(action_probs)
if not evaluate:
action = action_distribution.sample()
self.actions.append(action)
self.logprobs.append(action_distribution.log_prob(action))
self.state_values.append(state_value)
if evaluate:
return action_distribution.entropy().mean()
if not evaluate:
return action.item()
def clearMemory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.state_values[:]
del self.rewards[:]
class PPO:
def __init__(self, state_dim, action_dim, n_latent_var, lr, betas, gamma, K_epochs, eps_clip):
self.lr = lr
self.betas = betas
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.policy = Model(state_dim, action_dim, n_latent_var).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(),
lr=lr, betas=betas)
self.policy_old = Model(state_dim, action_dim, n_latent_var).to(device)
self.MseLoss = nn.MSELoss()
self.kl = 0
def update(self):
# Monte Carlo estimate of state rewards:
rewards = []
discounted_reward = 0
for reward in reversed(self.policy_old.rewards):
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards:
rewards = torch.tensor(rewards).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# convert list in tensor
old_states = torch.tensor(self.policy_old.states).to(device).detach()
old_actions = torch.tensor(self.policy_old.actions).to(device).detach()
old_logprobs = torch.tensor(self.policy_old.logprobs).to(device).detach()
# Optimize policy for K epochs:
for _ in range(self.K_epochs):
# Evaluating old actions and values :
dist_entropy = self.policy(old_states, old_actions, evaluate=True)
# Finding the ratio (pi_theta / pi_theta__old):
logprobs = self.policy.logprobs[0].to(device)
ratios = torch.exp(logprobs - old_logprobs.detach())
self.kl =F.kl_div(logprobs - old_logprobs.detach())
# Finding Surrogate Loss:
state_values = self.policy.state_values[0].to(device)
advantages = rewards - state_values.squeeze().detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values, rewards) - 0.01*dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.policy.clearMemory()
self.policy_old.clearMemory()
# Copy new weights into old policy:
self.policy_old.load_state_dict(self.policy.state_dict())
# +
import fruit.envs.games.milk_factory.engine as mf
from fruit.envs.juice import FruitEnvironment
game = mf.MilkFactory(render=False, speed=6000, max_frames=200, frame_skip=1, number_of_milk_robots=2, number_of_fix_robots=1, number_of_milks=2, seed=None, human_control=False, error_freq=0.01, human_control_robot=0, milk_speed=3, debug=False, action_combined_mode=False, show_status=False,number_of_exits=2)
env = FruitEnvironment(game)
print(env.get_number_of_objectives())
print(env.get_number_of_agents()) # here number of agents is the number of objectives
# +
state_dim = 300*360*3
action_dim = 5
n_obj = env.get_number_of_objectives()
n_episodes = 30
max_timesteps = 500
kl_param = 0.1
log_interval = 10
n_latent_var = 64 # number of variables in hidden layer
lr = 0.0007
betas = (0.9, 0.999)
gamma = 0.99 # discount factor
K_epochs = 4 # update policy for K epochs
eps_clip = 0.2 # clip parameter for PPO
random_seed = None
if random_seed:
torch.manual_seed(random_seed)
env.seed(random_seed)
filename = "PPO_MilkCollector.pth"
directory = "./preTrained/"
# +
ppo = PPO(state_dim, action_dim, n_latent_var, lr, betas, gamma, K_epochs, eps_clip)
running_reward = 0
rewards = np.zeros((n_episodes,n_obj))
for ep in range(1, n_episodes+1):
state = np.random.rand((300*360*3))
for t in range(max_timesteps):
# Running policy_old:
action = ppo.policy_old(state)
reward = env.step(action)
state_n = env.get_state()
done = env.is_terminal()
# Saving state and reward:
ppo.policy_old.states.append(state)
ppo.policy_old.rewards.append(reward)
state = np.array([state_n]).reshape(1,300*360*3)
running_reward +=reward
if done:
# save model
torch.save(ppo.policy.state_dict(), directory+filename+str(0))
rewards[ep-1,0]= running_reward
break
running_reward = 0
for k in range(1,n_obj):
state = np.random.rand((300*360*3))
for t in range(max_timesteps):
# Running policy_old:
action = ppo.policy_old(state)
reward = env.step(action)
state_n = env.get_state()
done = env.is_terminal()
kl = ppo.kl
new_reward = reward + (kl_param*kl)
# Saving state and reward:
ppo.policy_old.states.append(state)
ppo.policy_old.rewards.append(new_reward)
state = np.array([state_n]).reshape(1,300*360*3)
running_reward += new_reward
if done:
# save model
torch.save(ppo.policy.state_dict(), directory+filename+str(k))
rewards[ep-1,k]= running_reward
break
print('Episode: {}\tReward: {}'.format(ep, rewards[ep-1,:]))
running_reward = 0
plt.plot(np.arange(len(rewards[:,0])), rewards[:,0], label = 'reward1')
plt.plot(np.arange(len(rewards[:,1])), rewards[:,1], label = 'reward2')
plt.ylabel('Total Reward')
plt.xlabel('Episode')
plt.savefig('milkfactory',bbox_inches='tight',facecolor="#FFFFFF")
plt.show()
# +
ppo = PPO(state_dim, action_dim, n_latent_var, lr, betas, gamma, K_epochs, eps_clip)
running_reward = 0
rewards = []
for ep in range(1, n_episodes+1):
state = np.random.rand((300*325*3))
for t in range(max_timesteps):
# Running policy_old:
action = ppo.policy_old(state)
reward = env.step(action)
state_n = env.get_state()
done = env.is_terminal()
# Saving state and reward:
ppo.policy_old.states.append(state)
ppo.policy_old.rewards.append(reward)
state = np.array([state_n]).reshape(1,300*360*3)
running_reward +=reward
if done:
# save model
torch.save(ppo.policy.state_dict(), directory+filename)
rewards.append(running_reward)
break
print('Episode: {}\tReward: {}'.format(ep, int(running_reward)))
running_reward = 0
plt.plot(np.arange(len(rewards)), rewards, label = 'reward1')
plt.ylabel('Total Reward')
plt.xlabel('Episode')
plt.savefig('foodcollector_base',bbox_inches='tight',facecolor="#FFFFFF")
plt.show()
| ppo_milkfactory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # <center> Train the LSTM network with word2vec embeddings </center>
# This notebook is very similar to the first lstm training. This time we will train the lstm network on the training and validation data and predict its performance on the test data. My solution is again based on these two guidelines.
# https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings
# https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
#
#
# Okay now let's import all of the necessary modules:
# +
import os
import gensim
import numpy as np
import pandas as pd
from IPython.display import display
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from unidecode import unidecode
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from sklearn.metrics import log_loss, accuracy_score
from capstone_project import utility
from capstone_project.models import neural_nets
# %matplotlib inline
# -
# Set important constants and load data:
MAX_SEQUENCE_LENGTH = 30 # Maximum length of input for lstm the (maximum number of tokens is 103)
EMBEDDING_DIM = 300 # Length of the used word2vec/glove implementation
USE_RAW_TEXT = True # Use raw text as input instead of tokens
USE_WORD2VEC = True # Use word2vec embedding instead of glove embedding
VALIDATION_SPLIT = 0.1 # Size of the second validation set that is used during the training of the lstm
# +
file_directory = "../output/data/"
prefix = "tokenized_"
train_data_1 = utility.load_pickle(file_directory, prefix+"train_data.pkl")
train_data_2 = utility.load_pickle(file_directory, prefix+"val_data.pkl")
test_data = utility.load_pickle(file_directory, prefix+"test_data.pkl")
joined_data = pd.concat([train_data_1, train_data_2])
labels = joined_data["is_duplicate"].values
test_labels = test_data["is_duplicate"].values
# -
# Make sure that the concatenate worked:
assert len(train_data_1) + len(train_data_2) == len(joined_data)
display(joined_data.head(1))
# Prepare the tokenized question as input for keras:
# +
# Decode again and join strings because keras tokenizer crashes when using unicode while spacy uses it
if USE_RAW_TEXT:
q1 = joined_data["question1"].values
q2 = joined_data["question2"].values
else:
q1 = joined_data["q1_tokens"].apply(lambda x: unidecode(u" ".join(x))).values
q2 = joined_data["q2_tokens"].apply(lambda x: unidecode(u" ".join(x))).values
all_questions = np.concatenate([q1, q2])
tokenizer = Tokenizer()
tokenizer.fit_on_texts(all_questions)
word_index = tokenizer.word_index
number_words = len(word_index)+1 # Needed for embedding layer
print("Found {} unique tokens".format(len(word_index)))
q1_sequences = tokenizer.texts_to_sequences(q1)
q2_sequences = tokenizer.texts_to_sequences(q2)
q1_data = pad_sequences(q1_sequences, maxlen=MAX_SEQUENCE_LENGTH)
q2_data = pad_sequences(q2_sequences, maxlen=MAX_SEQUENCE_LENGTH)
# -
# Split second validation and training set for validation at every epoch. The dataset size is also doubled by switching the order of the questions. This is done in order to avoid symmetry issues.
# +
# Credit: https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings
perm = np.random.permutation(len(q1_data))
idx_train = perm[:int(len(q1_data)*(1-VALIDATION_SPLIT))]
idx_val = perm[int(len(q1_data)*(1-VALIDATION_SPLIT)):]
q12_train = np.concatenate((q1_data[idx_train], q2_data[idx_train]), axis=0)
q21_train = np.concatenate((q2_data[idx_train], q1_data[idx_train]), axis=0)
double_train_labels = np.concatenate((labels[idx_train], labels[idx_train]), axis=0)
q12_val_epochs = np.concatenate((q1_data[idx_val], q2_data[idx_val]), axis=0)
q21_val_epochs = np.concatenate((q2_data[idx_val], q1_data[idx_val]), axis=0)
double_val_epochs_labels = np.concatenate((labels[idx_val], labels[idx_val]), axis=0)
# -
# Prepare test set:
# +
#create correct embeddings for validation data
if USE_RAW_TEXT:
q1_test = test_data["question1"].values
q2_test = test_data["question2"].values
else:
q1_test = test_data["q1_tokens"].apply(lambda x: unidecode(u" ".join(x))).values
q2_test = test_data["q2_tokens"].apply(lambda x: unidecode(u" ".join(x))).values
q1_test_sequences = tokenizer.texts_to_sequences(q1_test)
q2_test_sequences = tokenizer.texts_to_sequences(q2_test)
q1_test_data = pad_sequences(q1_test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
q2_test_data = pad_sequences(q2_test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
# -
# Load the pretrained vectorization model and create the embedding matrix:
if USE_WORD2VEC:
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format("../data/GoogleNews-vectors-negative300.bin.gz",
binary=True)
embedding_matrix = neural_nets.create_embedding_matrix(vec_model=word2vec_model, embedding_dim=EMBEDDING_DIM,
word_index=word_index, number_words=number_words)
else:
# Credit: https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
embeddings_index = {}
f = open('../data/glove.42B.300d.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# Set the parameters of the lstm and create a keras model:
# +
batch_size = 2048
nn_parameters = {"max_sequence_length": MAX_SEQUENCE_LENGTH,
"num_lstm": 230,
"dropout_lstm": 0.35,
"num_dense": 128,
"dropout_dense": 0.35}
stamp = "{}_{:.2f}_{}_{:.2f}".format(nn_parameters["num_lstm"],
nn_parameters["dropout_lstm"],
nn_parameters["num_dense"],
nn_parameters["dropout_dense"])
model = neural_nets.create_lstm(embedding_matrix=embedding_matrix,
embedding_dim=EMBEDDING_DIM,
number_words=number_words,
**nn_parameters)
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc'])
model.summary()
print stamp
# -
# Train the data and check the performance on the second validation set every epoch. with early stopping:
# +
#Credit: https://www.kaggle.com/lystdo/lstm-with-word2vec-embeddings
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
best_model_path = "../output/models/final_lstm_val_epochs_" + stamp + '.h5'
model_checkpoint = ModelCheckpoint(best_model_path, save_best_only=True, save_weights_only=True)
hist = model.fit([q12_train, q21_train], double_train_labels,
validation_data=([q12_val_epochs, q21_val_epochs], double_val_epochs_labels),
epochs=200, batch_size=batch_size, shuffle=True,
callbacks=[early_stopping, model_checkpoint])
# -
# Display training histoty of LSTM:
# +
# Credit: http://machinelearningmastery.com/display-deep-learning-model-training-history-in-keras/
import matplotlib.pyplot as plt
plt.style.use("seaborn-white")
# list all data in history
print(hist.history.keys())
# summarize history for accuracy
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('LSTM Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Validation'], loc='upper left')
plt.savefig("../output/figures/final_lstm_hist_acc_plot.png")
plt.show()
# summarize history for loss
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('LSTM Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Validation'], loc='upper right')
plt.savefig("../output/figures/final_lstm_hist_loss_plot.png")
plt.show()
# -
# Load the trained model and calculate logloss and accuarcy on the validation set:
# +
model.load_weights(best_model_path)
predictions = model.predict([q1_test_data, q2_test_data], batch_size=batch_size, verbose=1)
predictions += model.predict([q2_test_data, q1_test_data], batch_size=batch_size, verbose=1)
predictions /= 2
# Quick fix: Log loss returns nan if any of the predictions are very confident(==1.)
predictions[predictions == 1] = 0.9999999
utility.save_pickle(predictions, file_directory, "final_lstm_preds_on_val.pkl")
loss = log_loss(test_labels, predictions)
acc = accuracy_score(test_labels, np.rint(predictions))
print "Test scores of Lstm model\n LogLoss: {:.4f}\n Accuracy: {:.2f} ".format(loss, acc)
# -
# Create roc plot and save it:
plt = utility.create_roc_plot(test_labels, predictions, "lstm")
plt.savefig("../output/figures/final_lstm_roc_plot.png")
plt.show()
| notebooks/train_lstm-02-als.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
import numpy as np
import openrtdynamics2.lang as dy
import openrtdynamics2.py_execute as dyexe
import openrtdynamics2.targets as tg
from helper_fn import *
# -
# # Loop and yield output samples
# +
dy.clear()
system = dy.enter_system()
# the diagram
yield_event2 = dy.signal_periodic_impulse(period=4, phase=2)
with dy.sub_loop( max_iterations=1000 ) as system:
cnt = dy.counter()
system.set_outputs([ cnt ])
# execute the counter above until yield_event. Theb continue the main system for one time-instant
# and continue inside this subsystem again.
yield_event = dy.signal_periodic_impulse(period=4, phase=2)
system.loop_yield( yield_event )
cnt = system.outputs[0]
# define output(s)
dy.append_output(cnt, 'cnt')
dy.append_output(yield_event2, 'yield_event2')
# generate code
code_gen_results = dy.generate_code(template=tg.TargetCppMinimal())
#
# -
compiled_system = dyexe.CompiledCode(code_gen_results)
sim_results = dyexe.run_batch_simulation(dyexe.SystemInstance(compiled_system), input_data={}, N=30)
sim_results['cnt']
assert_equal( sim_results['cnt'] , [ 2., 6., 10., 14., 18., 22., 26., 30., 34., 38., 42.,
46., 50., 54., 58., 62., 66., 70., 74., 78., 82., 86.,
90., 94., 98., 102., 106., 110., 114., 118. ] )
sim_results['yield_event2']
assert_equal( sim_results['yield_event2'] , [ 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,
0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0. ] )
# # Loop until
# +
dy.clear()
system = dy.enter_system()
# the diagram
count_till = dy.counter()
with dy.sub_loop( max_iterations=1000 ) as loop:
cnt = dy.counter()
# compute:
#
# accumulated_cnt = 0 + 1 + 2 + 3 + ... + count_till
accumulated_cnt = dy.sum(cnt, no_delay=True)
loop.set_outputs([ cnt, accumulated_cnt ])
loop.loop_until( dy.counter() >= count_till )
cnt = loop.outputs[0]
accumulated_cnt = loop.outputs[1]
# define output(s)
dy.append_output(cnt, 'cnt')
dy.append_output(accumulated_cnt, 'accumulated_cnt')
# generate code
code_gen_results = dy.generate_code(template=tg.TargetCppMinimal())
#
# -
compiled_system = dyexe.CompiledCode(code_gen_results)
sim_results = dyexe.run_batch_simulation(dyexe.SystemInstance(compiled_system), input_data={}, N=30)
sim_results['cnt']
assert_equal( sim_results['cnt'] , [ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.,
13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.,
26., 27., 28., 29 ] )
sim_results['accumulated_cnt']
assert_equal( sim_results['accumulated_cnt'] , [ 0., 1., 3., 6., 10., 15., 21., 28., 36., 45., 55.,
66., 78., 91., 105., 120., 136., 153., 171., 190., 210., 231.,
253., 276., 300., 325., 351., 378., 406., 435. ] )
| tests/test_sub_loop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import torch.backends.cudnn as cudnn
import yaml
from train import train
from utils import AttrDict
import pandas as pd
# + code_folding=[]
cudnn.benchmark = True
cudnn.deterministic = False
# + code_folding=[0]
def get_config(file_path):
with open(file_path, 'r') as stream:
opt = yaml.safe_load(stream)
opt = AttrDict(opt)
if opt.lang_char == 'None':
characters = ''
for data in opt['select_data'].split('-'):
csv_path = os.path.join(opt['train_data'], data, 'labels.csv')
df = pd.read_csv(csv_path, sep='^([^,]+),', engine='python', usecols=['filename', 'words'], keep_default_na=False)
all_char = ''.join(df['words'])
characters += ''.join(set(all_char))
characters = sorted(set(characters))
opt.character= ''.join(characters)
else:
opt.character = opt.number + opt.symbol + opt.lang_char
os.makedirs(f'./saved_models/{opt.experiment_name}', exist_ok=True)
return opt
# -
opt = get_config("config_files/en_filtered_config.yaml")
train(opt, amp=False)
| trainer/trainer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RichardFreedman/CRIM_Collab_Notebooks/blob/main/CRIM_Data_Search.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="w6Sz-jMBYBWt"
import requests
# + id="5qesP2y8YBWu"
import pandas as pd
# + [markdown] id="0pJbYCDQYBWu"
# # Markdown for descriptive text
# ## level two
# ### Structure notebook for various sections and TOC
# Plain text is just normal
# - list
# - list item with dashes
# or numbers
#
# 1. this
# 2. that
# 3. another
# - Still other
# - Still more
# -And yet more
#
#
#
#
#
#
# + [markdown] id="82akTKDkYBWv"
# # Markdown vs Code
# Pick Markdown for type of cell above. **Shift + return** to enter these
# # Formatting
# Italics is *before* **bold**
#
# Escape then B to create new cells (and pick cell types later)
#
# # Fill
# Tab to auto fill within cell
# # Requests
# Requests in fact has several functions after the "." Like Get, or whatever
#
# Requests.get plus (), then Shift+Tab to see all the parameters that must be passed.
#
# Response object allows you to extract what you need, like JSON
#
# For Obs_1_json = response.json() we **need** the parenths to run the function
#
# # Dictionaries and Types
# Dictionary= Key>Value Pairs (Key is MEI Links, value is the link)
#
# Note that Values can themselves contain dictionary
#
# Python Types
#
# Dictionary (Pairs; can contain other Dictionaries)
# String (thing in a quote)
# List (always in square brackets, and can contain dictionaries and lists within them)
#
# indexing of items in a list start at ZERO
# last item is "-1", etc
#
# # Get Key
# To get an individual KEY from top level:
#
# Obs_ema_1 = Obs_1_json["ema"]
#
# This allows you to dig deeper in nested lists or dictionaries. In this case piece is top level in JSON, the MEI link is next. The number allows you to pick from items IN a list: Obs_1_json["piece"]["mei_links"][0]
#
# + id="esj93DxaYBWv"
Obs_1_url = "https://crimproject.org/data/observations/1/"
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="nUM-DM6BYBWv" outputId="86648a04-1ede-4534-f737-56f365841da8"
Obs_1_url
# + id="UQCJCaHoYBWw"
response = requests.get(Obs_1_url)
# + colab={"base_uri": "https://localhost:8080/"} id="DWRwOJOkYBWw" outputId="21667b44-c9b7-495d-dfc8-95bdb583f21c"
response
# + colab={"base_uri": "https://localhost:8080/"} id="f4SBQPrYYBWw" outputId="58810aa6-5d6b-401f-d213-7f09c4cd6691"
type(response)
# + id="9Dn3EXNuYBWx"
Obs_1_json = response.json()
# + colab={"base_uri": "https://localhost:8080/"} id="YcDB1k13YBWx" outputId="e7e82ae3-d323-4cd6-8332-cdca778d1bea"
Obs_1_json
# + id="2mkygzInYBWy" outputId="dc4b7dda-d8c9-432c-992a-10a7bc0dc5ce"
type(Obs_1_json)
# + id="7oY--9CqYBWy"
example_list_1 = [5, 3, "this", "that"]
# + id="sj7tMgiLYBWy" outputId="a81d4f1c-a54a-45c2-c91c-10073e8f46b0"
example_list_1[3]
# + id="m1S4BDh3YBWy" outputId="b20ed9f5-65d8-4e9a-fbb6-044e539b2c33"
Obs_1_json.keys()
# + id="ZLok0p4XYBWz"
Obs_ema_1 = Obs_1_json["ema"]
# + id="FiHtCAUHYBWz" outputId="30ddc3b6-73b9-4d6f-b534-9693c0608a94"
Obs_ema_1
# + id="VpdWxyezYBWz" outputId="54811a6e-3997-4e56-a6a1-a4874d824b40"
type(Obs_ema_1)
# + id="dPMeuTk-YBWz" outputId="c2e3e84e-e2a5-4610-ae6e-6355c3ffa961"
print("here is a print statement")
# + id="s76BBiK1YBW0" outputId="4b1a8c54-8f31-4d11-9a42-8f343946285f"
Obs_1_json["musical_type"]
# + id="Lvqu8MP-YBW0"
Obs_1_mt = Obs_1_json["musical_type"]
# + id="NzbtgAxdYBW0" outputId="2a183843-fb75-4409-8bc5-aa2a6aa3c975"
Obs_1_mt
# + id="kaxezvCeYBW0"
Obs_1_piece = Obs_1_json["piece"]
# + id="rJE7kWVBYBW1" outputId="c58da908-b85c-4ad9-e28e-23bc3f2c1198"
Obs_1_piece
# + id="_urUM0OeYBW1"
Obs_1_mei = Obs_1_piece["mei_links"]
# + id="BSHY7s70YBW1" outputId="033c5fe3-882d-40b1-e6bf-515cbc61ec79"
Obs_1_mei
# + id="GmPbTMSpYBW1" outputId="8e39bab0-f74a-4fd4-d64c-55ea70eeb690"
len(Obs_1_mei)
# + id="zB-9QbI5YBW2" outputId="441315b3-8636-41f5-f0aa-4cea864ebdff"
Obs_1_mei[0]
# + id="B7gGCVhXYBW2" outputId="809deadf-5e3f-45e8-d98d-6f624286d925"
Obs_1_json["piece"]["mei_links"][0]
# + id="yraYKsHKYBW3"
# + id="3duF_FL5YBW3" outputId="4c2437e6-6f0a-458f-81d0-c4a0b5b5145a"
Obs_1_json["ema"]
# + [markdown] id="yejVZ6TmYBW3"
# # Loops
#
# + id="L0DIOs8NYBW4"
test_list = [1,5,2,5,6]
# + id="NMGMUxJ-YBW4" outputId="8aac5937-7043-4422-c866-ada5db7c4784"
for i, observation_id in enumerate(test_list):
# do stuff
print(i, observation_id)
# + id="MSvCGhnPYBW4" outputId="712e304e-8d99-4275-9430-ec54ed6f86a0"
for number in range(1,10):
print(number)
# + id="38_m1jgTYBW4"
def myfunction():
print("it is running")
# + id="HXnWHJOqYBW5" outputId="5c4f8abb-74c1-4a15-ac5e-e661b8bad87d"
myfunction
# + id="GStbp3-GYBW5" outputId="156a50e9-fec5-4b08-bf80-480870f83d89"
myfunction()
# + id="Ea3p4AiiYBW5"
def adder(num_1, num_2):
return num_1 + num_2
# + id="suu6tHrHYBW5" outputId="c4417e80-e120-442a-8cec-d9fecd4428b7"
adder(5,9)
# + id="3-fgdrStYBW5"
def get_ema_for_observation_id(obs_id):
# get Obs_1_url
url = "https://crimproject.org/data/observations/{}/".format(obs_id)
return url
# + id="PLuhNSJNYBW6"
def get_ema_for_observation_id(obs_id):
# get Obs_1_ema
my_ema_mei_dictionary = dict()
url = "https://crimproject.org/data/observations/{}/".format(obs_id)
response = requests.get(url)
Obs_json = response.json()
# Obs_ema = Obs_json["ema"]
my_ema_mei_dictionary["id"]=Obs_json["id"]
my_ema_mei_dictionary["musical type"]=Obs_json["musical_type"]
my_ema_mei_dictionary["int"]=Obs_json["mt_fg_int"]
my_ema_mei_dictionary["tint"]=Obs_json["mt_fg_tint"]
my_ema_mei_dictionary["ema"]=Obs_json["ema"]
my_ema_mei_dictionary["mei"]=Obs_json["piece"]["mei_links"][0]
my_ema_mei_dictionary["pdf"]=Obs_json["piece"]["pdf_links"][0]
# Obs_piece = Obs_json["piece"]
# Obs_mei = Obs_piece["mei_links"]
print(f'Got: {obs_id}')
# return {"ema":Obs_ema,"mei":Obs_mei}
return my_ema_mei_dictionary
# + id="grwPmtayYBW6" outputId="47b0fe84-4754-42bd-d235-f51c04ec90d1"
get_ema_for_observation_id(20)
# + id="V-nMIbgpYBW6" outputId="4efe6b30-ddad-4ea6-81ee-27fafb681736"
output = get_ema_for_observation_id(20)
# + id="JHS_WV_XYBW6" outputId="dadde1fc-086a-4e7f-b56b-b4075ce859b5"
pd.Series(output).to_csv("output.csv")
# + id="annXdpHXYBW7"
# this holds the output as a LIST of DICTS
obs_data_list = []
# + id="7eTXWPJ9YBW7"
# this is the list of IDs to call
obs_call_list = [1,3,5,17,21]
# + id="IS_l6w3LYBW7" outputId="f422583a-fa19-4cf4-9bea-bbfe35766cd4"
# this is the LOOP that runs through the list aboe
# for observ in obs_call_list:
for observ in range(1,11):
call_list_output = get_ema_for_observation_id(observ)
# the print command simply puts the output in the notebook terminal.
#Later we will put it in the List of Dicts.
# print(call_list_output)
obs_data_list.append(call_list_output)
# + id="HI9sX4RUYBW7"
# list includes APPEND function that will allow us to add one item after each loop.
# EX blank_list = [1,5,6] (note that these are in square brackets as LIST)
# blank_list.append(89)
# range would in parenths as in: range(1,11)
# here we make a LIST object that contains the Range.
# This allows it to iterate over the range
# since the range could be HUGE We can ONLY append a number to a LIST!
Obs_range = list(range(1,11))
# + id="m0Hx7RnnYBW8"
# blank_list.append(76)
# + id="IDG-pI-yYBW8" outputId="795f169d-ac85-4dd0-fc8e-671ddad3ee4c"
blank_list
# + id="8mGewcMSYBW8" outputId="638758ee-c35d-4a5a-be95-f38d8c0a7ca8"
obs_data_list
# + id="g8pyV3SKYBW9" outputId="a3a371c5-bb6f-44e7-8263-024240914091"
pd.Series(obs_data_list).to_csv("obs_data_list.csv")
# + id="oTb0N37SYBW9"
# Pandas DataFrame interprets the series of items in each Dict
# as separate 'cells' (a tab structure)
DF_output = pd.DataFrame(obs_data_list)
# + id="2oOvU5tAYBW9" outputId="b58678dc-71b2-4800-c989-f466fcbd115f"
DF_output
# + id="4D7eQAU9YBW9"
DF_output.to_csv("obs_data_list.csv")
# + id="J8Z1PtTNYBW9"
# two = means check for equality
# for 'contains' use str.contains("letter")
# can also use regex in this (for EMA range)
# Filter_by_Type = (DF_output["musical type"]=="Fuga") & (DF_output["id"]==8)
Filter_by_Type = DF_output["musical type"].str.contains("Fuga")
#
# + id="DngBe_muYBW-" outputId="02de96f5-8874-42ea-fca5-1a0ad6601d8a"
DF_output[Filter_by_Type]
# + id="NBoDkRxOYBW-" outputId="d5d4ea06-2631-4a35-b02e-26bd00e40eb5"
# here is a string of text with numbers in it
my_num = 5
f"here is a string of text with numbers in it: {my_num}"
# + id="RMqubnMsYBW-"
| CRIM_Data_Search_Collab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Conda-python3
# language: python
# name: conda-python3
# ---
# # 视频物体分割
# 本案例分为以下几个章节:
# 1. 视频物体分割简介
# 2. OSVOS算法训练和预测
# 3. 视频物体分割的应用
#
# 下面我们开始本案例的学习,由于本案例的代码是在华为云ModelArts Notebook上运行,所以需要先按照如下步骤来进行Notebook环境的准备。
#
# ### 进入ModelArts
#
# 点击如下链接:https://www.huaweicloud.com/product/modelarts.html , 进入ModelArts主页。点击“立即使用”按钮,输入用户名和密码登录,进入ModelArts使用页面。
#
# ### 创建ModelArts Notebook
#
# 下面,我们在ModelArts中创建一个Notebook开发环境,ModelArts Notebook提供网页版的Python开发环境,可以方便的编写、运行代码,并查看运行结果。
#
# 第一步:在ModelArts服务主界面依次点击“开发环境”、“创建”
#
# 
#
# 第二步:填写notebook所需的参数:
#
# | 参数 | 说明 |
# | - - - - - | - - - - - |
# | 计费方式 | 按需计费 |
# | 名称 | Notebook实例名称,如 object_segmentation |
# | 工作环境 | Python3 |
# | 资源池 | 选择"公共资源池"即可 |
# | 类型 | 本案例使用较为复杂的深度神经网络模型,需要较高算力,选择"GPU" |
# | 规格 | 选择"[限时免费]体验规格GPU版" |
# | 存储配置 | 选择EVS,磁盘规格5GB |
#
# 第三步:配置好Notebook参数后,点击下一步,进入Notebook信息预览。确认无误后,点击“立即创建”
#
# 第四步:创建完成后,返回开发环境主界面,等待Notebook创建完毕后,打开Notebook,进行下一步操作。
# 
#
# ### 在ModelArts中创建开发环境
#
# 接下来,我们创建一个实际的开发环境,用于后续的实验步骤。
#
# 第一步:点击下图所示的“打开”按钮,进入刚刚创建的Notebook
# 
#
# 第二步:创建一个Python3环境的的Notebook。点击右上角的"New",然后创建Conda-python3开发环境。
#
# 第三步:点击左上方的文件名"Untitled",并输入一个与本实验相关的名称,如"object_segmentation"
# 
# 
#
#
# ### 在Notebook中编写并执行代码
#
# 在Notebook中,我们输入一个简单的打印语句,然后点击上方的运行按钮,可以查看语句执行的结果:
# 
#
#
#
# ## 1. 视频物体分割简介
# 视频物体分割就是从视频所有图像中将感兴趣物体的区域完整地分割出来。
#
# 注意“感兴趣物体”这个词,“感兴趣物体”是指在一段视频中最容易捕获人眼注意力的一个或多个物体,比如下图中左上角子图中三个正在跳舞的人,这三个人物是“感兴趣物体”,而周围的人群不属于我们常识上的感兴趣物体,下图中的其他子图也是如此,因此视频物体分割算法一般不需要将视频图像中的所有物体都进行分割,而是只需要分割“感兴趣物体”即可。
#
# 
#
# 学术界在视频物体分割领域主要有三个研究方向:
#
# (1)半监督视频物体分割
#
# (2)交互式视频物体分割
#
# (3)无监督视频物体分割
#
# 下面我们来一一讲解三个研究方向的内容。
#
# ### 1.1 半监督视频物体分割
#
# 半监督是指由用户给定感兴趣物体在视频第一帧图片上的人工标注真实分割区域,然后算法根据这一帧图片和标注进行学习,完成学习后,由算法来对后续所有帧图片进行分割区域的预测。
#
# 
#
# 如上图所示,第一行分别是一个视频的RGB图片,第二行是感兴趣物体区域,第一列是视频的第一帧图片和人工标注的分割区域,之后的三列分别是第20、40、60帧图片和算法预测的分割区域。
#
# 半监督视频物体分割算法还可以再分为两类:有在线学习和无在线学习。有在线学习的算法就是上面提到的根据第一帧物体的 ground-truth,利用 one-shot learning 的策略来 fine-tune 分割模型,每次对一个视频进行预测前,都要先对该视频的第一帧进行学习,fine-tune一下模型,再进行预测,代表性算法是[One-Shot Video Object Segmentation](http://openaccess.thecvf.com/content_cvpr_2017/papers/Caelles_One-Shot_Video_Object_CVPR_2017_paper.pdf)。无在线学习的算法是指它的模型是事先训练好的,不需要针对样本进行 fine-tune,具有更好的时效性,代表性算法是[FEELVOS: Fast End-to-End Embedding Learning for Video Object Segmentation](https://arxiv.org/pdf/1902.09513)。
#
# ### 1.2 交互式视频物体分割
#
# 交互式视频物体分割是指算法在运行过程中需要人不断与其交互,然后根据人的交互信息来决定感兴趣物体并进行分割。
#
# 
#
# 如上图所示,是交互式视频物体分割算法的基本流程,第一帧根据人划的一条线或一个框来确定感兴趣物体,然后得到初始帧的物体分割结果,然后算法继续逐帧预测,直到上面的第3张图,算法分割区域发生了错误,选中了后面一只骆驼的部分区域作为分割区域,这时可以再结合一次人的交互,由人工进行划线区分正负样本,随后算法就进行修正,得到了第4张图的结果。这种交互式视频物体分割算法的特点就是通过人的多次交互达到较好的分割效果。代表性算法是[Fast User-Guided Video Object Segmentation by Interaction-and-propagation Networks](http://openaccess.thecvf.com/content_CVPR_2019/papers/Oh_Fast_User-Guided_Video_Object_Segmentation_by_Interaction-And-Propagation_Networks_CVPR_2019_paper.pdf)。
#
# ### 1.3 无监督视频物体分割
#
# 无监督视频物体分割是全自动的分割,除了 RGB 视频,没有其他任何输入,其目的是分割出视频中显著性的物体区域,是目前最新的一个研究方向。半监督和交互式视频物体分割中,感兴趣物体是事先指定的,不存在任何歧义,而在无监督视频物体分割中,物体显著性是主观概念,不同人之间存在一定的歧义,因此无监督视频物体分割算法可能需要输出视频中人眼会注意到的多个物体的分割结果。代表性算法是[UnOVOST: Unsupervised Offline Video Object Segmentation and Tracking for the 2019 Unsupervised DAVIS Challenge](https://davischallenge.org/challenge2019/papers/DAVIS-Unsupervised-Challenge-1st-Team.pdf).
#
# 以上三个研究方向其实就是[DAVIS挑战赛](https://davischallenge.org/index.html)的三个任务,如果您对此感兴趣,可以前往其网站进行了解。
#
# 在以上三个研究方向中,由于半监督视频物体分割算法的发展较为成熟,因此本案例采用该类算法的代表性算法OSVOS来实现一个视频物体分割的demo,接下来我们来开始执行OSVOS的代码。
# ## 2. OSVOS算法训练和预测
#
# ### 2.1 准备代码和数据
# 相关代码、数据和模型都已准备好存放在OBS中,执行下面一段代码即可将其拷贝到Notebook中
# +
import os
import moxing as mox
print('Downloading datasets and code ...')
if not os.path.exists('./video_object_segmention/OSVOS-PyTorch'):
mox.file.copy('s3://modelarts-labs-bj4/notebook/DL_video_object_segmentation/OSVOS-PyTorch.zip',
'./video_object_segmention/OSVOS-PyTorch.zip')
os.system('cd ./video_object_segmention/;unzip OSVOS-PyTorch.zip;rm OSVOS-PyTorch.zip')
if os.path.exists('./video_object_segmention/OSVOS-PyTorch'):
print('Download success')
else:
raise Exception('Download failed')
else:
print('Download success')
# -
# ### 2.2 安装需要的python模块
# 耗时约1分半钟
# !pip install -r ./video_object_segmention/OSVOS-PyTorch/requirements.txt
# 导入需要的python模块
# +
from __future__ import division
import os
import cv2
import sys
sys.path.insert(0, './video_object_segmention/OSVOS-PyTorch')
import socket
import time
import timeit
import numpy as np
from datetime import datetime
from tensorboardX import SummaryWriter
# PyTorch includes
import torch
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
# Custom includes
from dataloaders import davis_2016 as db
from dataloaders import custom_transforms as tr
from util import visualize as viz
import scipy.misc as sm
import networks.vgg_osvos as vo
from layers.osvos_layers import class_balanced_cross_entropy_loss
from dataloaders.helpers import *
from mypath import Path
from IPython.display import clear_output, Image, display
# -
# 定义模型和训练超参
# +
# Setting of parameters
if 'SEQ_NAME' not in os.environ.keys():
seq_name = 'flamingo'
else:
seq_name = str(os.environ['SEQ_NAME'])
db_root_dir = Path.db_root_dir() # 训练数据所在路径,定义在./video_object_segmention/OSVOS-PyTorch/mypath.py中
save_dir = Path.save_root_dir() # 训练结果保存路径,定义在./video_object_segmention/OSVOS-PyTorch/mypath.py中
if not os.path.exists(save_dir):
os.makedirs(os.path.join(save_dir))
vis_net = 0 # Visualize the network?
vis_res = 0 # Visualize the results?
nAveGrad = 5 # Average the gradient every nAveGrad iterations
nEpochs = 1000 * nAveGrad # Number of epochs for training # 总的训练轮数
snapshot = nEpochs # Store a model every snapshot epochs
parentEpoch = 240
# Parameters in p are used for the name of the model
p = {
'trainBatch': 1, # Number of Images in each mini-batch
}
seed = 0
parentModelName = 'parent'
# Select which GPU, -1 if CPU
gpu_id = 0
device = torch.device("cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
# Network definition
net = vo.OSVOS(pretrained=0)
net.load_state_dict(torch.load(os.path.join(save_dir, parentModelName+'_epoch-'+str(parentEpoch-1)+'.pth'),
map_location=lambda storage, loc: storage))
print('Initializing weights success')
# Logging into Tensorboard
log_dir = os.path.join(save_dir, 'runs', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname()+'-'+seq_name)
writer = SummaryWriter(log_dir=log_dir)
net.to(device) # PyTorch 0.4.0 style
# Use the following optimizer
lr = 1e-8
wd = 0.0002
optimizer = optim.SGD([
{'params': [pr[1] for pr in net.stages.named_parameters() if 'weight' in pr[0]], 'weight_decay': wd},
{'params': [pr[1] for pr in net.stages.named_parameters() if 'bias' in pr[0]], 'lr': lr * 2},
{'params': [pr[1] for pr in net.side_prep.named_parameters() if 'weight' in pr[0]], 'weight_decay': wd},
{'params': [pr[1] for pr in net.side_prep.named_parameters() if 'bias' in pr[0]], 'lr': lr*2},
{'params': [pr[1] for pr in net.upscale.named_parameters() if 'weight' in pr[0]], 'lr': 0},
{'params': [pr[1] for pr in net.upscale_.named_parameters() if 'weight' in pr[0]], 'lr': 0},
{'params': net.fuse.weight, 'lr': lr/100, 'weight_decay': wd},
{'params': net.fuse.bias, 'lr': 2*lr/100},
], lr=lr, momentum=0.9)
# -
# 定义数据生成器
# +
# Preparation of the data loaders
# Define augmentation transformations as a composition
composed_transforms = transforms.Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-30, 30), scales=(.75, 1.25)),
tr.ToTensor()])
# Training dataset and its iterator
db_train = db.DAVIS2016(train=True, db_root_dir=db_root_dir, transform=composed_transforms, seq_name=seq_name)
trainloader = DataLoader(db_train, batch_size=p['trainBatch'], shuffle=True, num_workers=1)
# Testing dataset and its iterator
db_test = db.DAVIS2016(train=False, db_root_dir=db_root_dir, transform=tr.ToTensor(), seq_name=seq_name)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
loss_tr = []
aveGrad = 0
# -
# ### 2.3 开始在线学习
# 默认训练5000epoch,总耗时约25分钟
# +
print("Start of Online Training, sequence: " + seq_name)
start_time = timeit.default_timer()
# Main Training and Testing Loop
for epoch in range(0, nEpochs):
# One training epoch
running_loss_tr = 0
np.random.seed(seed + epoch)
for ii, sample_batched in enumerate(trainloader):
inputs, gts = sample_batched['image'], sample_batched['gt']
# Forward-Backward of the mini-batch
inputs.requires_grad_()
inputs, gts = inputs.to(device), gts.to(device)
outputs = net.forward(inputs)
# Compute the fuse loss
loss = class_balanced_cross_entropy_loss(outputs[-1], gts, size_average=False)
running_loss_tr += loss.item() # PyTorch 0.4.0 style
# Print stuff
if epoch % (nEpochs//20) == (nEpochs//20 - 1):
running_loss_tr /= num_img_tr
loss_tr.append(running_loss_tr)
print('[Epoch: %d, numImages: %5d]' % (epoch+1, ii + 1))
print('Loss: %f' % running_loss_tr)
writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch)
# Backward the averaged gradient
loss /= nAveGrad
loss.backward()
aveGrad += 1
# Update the weights once in nAveGrad forward passes
if aveGrad % nAveGrad == 0:
writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch)
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(save_dir, seq_name + '_epoch-'+str(epoch) + '.pth'))
stop_time = timeit.default_timer()
print('Online training success, model saved at', os.path.join(save_dir, seq_name + '_epoch-'+str(epoch) + '.pth'))
print('Online training time: ' + str(stop_time - start_time))
# -
# ### 2.4 测试模型
# +
# Testing Phase
if vis_res:
import matplotlib.pyplot as plt
plt.close("all")
plt.ion()
f, ax_arr = plt.subplots(1, 3)
save_dir_res = os.path.join(save_dir, 'Results', seq_name) # 图片测试结果保存路径
if not os.path.exists(save_dir_res):
os.makedirs(save_dir_res)
print('Testing Network')
with torch.no_grad(): # PyTorch 0.4.0 style
# Main Testing Loop
for ii, sample_batched in enumerate(testloader):
img, gt, fname = sample_batched['image'], sample_batched['gt'], sample_batched['fname']
# Forward of the mini-batch
inputs, gts = img.to(device), gt.to(device)
outputs = net.forward(inputs)
for jj in range(int(inputs.size()[0])):
pred = np.transpose(outputs[-1].cpu().data.numpy()[jj, :, :, :], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
# Save the result, attention to the index jj
sm.imsave(os.path.join(save_dir_res, os.path.basename(fname[jj]) + '.png'), pred)
if vis_res:
img_ = np.transpose(img.numpy()[jj, :, :, :], (1, 2, 0))
gt_ = np.transpose(gt.numpy()[jj, :, :, :], (1, 2, 0))
gt_ = np.squeeze(gt)
# Plot the particular example
ax_arr[0].cla()
ax_arr[1].cla()
ax_arr[2].cla()
ax_arr[0].set_title('Input Image')
ax_arr[1].set_title('Ground Truth')
ax_arr[2].set_title('Detection')
ax_arr[0].imshow(im_normalize(img_))
ax_arr[1].imshow(gt_)
ax_arr[2].imshow(im_normalize(pred))
plt.pause(0.001)
writer.close()
print('Test end')
print('Results saved at', save_dir_res)
# -
# ### 2.5 查看视频分割结果
src_dir = './video_object_segmention/OSVOS-PyTorch/DAVIS_2016/JPEGImages/480p/flamingo'
result_dir = './video_object_segmention/OSVOS-PyTorch/./models/Results/flamingo'
files = os.listdir(result_dir)
files.sort()
for file_name in files:
clear_output(wait=True)
src_img = cv2.imread(os.path.join(src_dir, file_name.split('.')[0] + '.jpg'))
result_img = cv2.imread(os.path.join(result_dir, file_name))
src_img = cv2.resize(src_img, (416, 256), interpolation=cv2.INTER_AREA)
result_img = cv2.resize(result_img, (416, 256), interpolation=cv2.INTER_AREA)
cv2.putText(src_img, 'id: ' + str(file_name.split('.')[0]), (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # 画frame_id
cv2.putText(result_img, 'id: ' + str(file_name.split('.')[0]), (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # 画frame_id
img_show = np.hstack((src_img, np.zeros((256, 20, 3), dtype=src_img.dtype), result_img))
display(Image(data=cv2.imencode('.jpg', img_show)[1]))
time.sleep(0.04)
print('end')
# 可以从上面的分割结果看出,目标大概是能被分割的,但是细节部分还存在一些差异,特别是目标之外的区域也被分割了
# ## 3. 视频物体分割的应用
# 如果你已经得到了一个视频的物体分割结果,那么可以用来做什么呢?
#
# 视频物体分割是一项广泛使用的技术,电影电视特效、短视频直播等可以用该技术将场景中的前景从背景中分离出来,通过修改或替换背景,可以将任务设置在现实不存在或不易实现的场景、强化信息的冲击力。传统方式可通过视频图像的手工逐帧抠图方式(比如,摄影在带绿幕的专业摄影棚环境摄制,后期特效完成背景移除切换,如下图所示),比如《复仇者联盟》《美国队长》《钢铁侠》等通过专业影视处理软件加入各种逼真的特效,让影片更加有趣,更加震撼。
#
# 
#
# 再比如华为Mate 20系列手机新增的人像留色功能,能够在录像过程中,实时识别出人物的轮廓,然后通过AI优化只保留人物衣服、皮肤、头发的颜色,将周边景物全部黑白化,如此一来使人物主体更加突出,打造大片既视感。这种人像留色功能就是使用了视频物体分割的技术,将人物从视频中分割出来,再保留其色彩。
#
# 
# 
#
# 接下来,我们将实现视频抠图的另一种应用:将某个目标从视频中去掉,仿佛该目标从来没在视频中出现过一样。本案例采用的视频抠图算法是[Deep Flow-Guided Video Inpainting](https://arxiv.org/pdf/1905.02884.pdf)
#
# ### 3.1 准备代码和数据
# 相关代码、数据和模型都已准备好存放在OBS中,执行下面一段代码即可将其拷贝到Notebook中。
# +
import os
import moxing as mox
print('Downloading datasets and code ...')
if not os.path.exists('./video_object_segmention/Deep-Flow-Guided-Video-Inpainting'):
mox.file.copy('s3://modelarts-labs-bj4/notebook/DL_video_object_segmentation/Deep-Flow-Guided-Video-Inpainting.zip',
'./video_object_segmention/Deep-Flow-Guided-Video-Inpainting.zip')
os.system('cd ./video_object_segmention/;unzip Deep-Flow-Guided-Video-Inpainting.zip;rm Deep-Flow-Guided-Video-Inpainting.zip')
if os.path.exists('./video_object_segmention/Deep-Flow-Guided-Video-Inpainting'):
print('Download success')
else:
raise Exception('Download failed')
else:
print('Download success')
# -
# ### 3.2 运行算法demo
# 回到 ModelArts Notebook 页面,按照下图打开一个terminal:
# 
# 然后复制以下四条命令到terminal中粘贴执行,执行过程耗时约2分半:
#
# source activate /home/ma-user/anaconda3
#
# # cd /home/ma-user/work/video_object_segmention/Deep-Flow-Guided-Video-Inpainting
#
# bash install_scripts.sh
#
# python tools/video_inpaint.py --frame_dir ./demo/frames --MASK_ROOT ./demo/masks --img_size 512 832 --FlowNet2 --DFC --ResNet101 --Propagation
# ### 3.3 查看视频抠图结果
# 执行下面这段代码将看到视频抠图的结果,左侧视频是原视频,右侧视频是去除了一只火烈鸟目标之后的视频,可以看到目标抠除的效果是非常好的,完全不影响背景,仿佛这个目标在视频中就从来没有出现过一样。
#
# 注意:由于视频物体分割算法的发展时间较短,离实用场景还有一段距离,所以本案例的视频抠图demo使用的目标分割区域是人工标注的(存储位置在./video_object_segmention/Deep-Flow-Guided-Video-Inpainting/demo/masks),而不是上面的OSVOS算法输出的分割区域。
src_dir = './video_object_segmention/Deep-Flow-Guided-Video-Inpainting/demo/frames'
result_dir = './video_object_segmention/Deep-Flow-Guided-Video-Inpainting/demo/Inpaint_Res/inpaint_res'
files = os.listdir(result_dir)
files.sort()
for file_name in files:
clear_output(wait=True)
src_img = cv2.imread(os.path.join(src_dir, file_name.split('.')[0] + '.jpg'))
result_img = cv2.imread(os.path.join(result_dir, file_name))
src_img = cv2.resize(src_img, (416, 256), interpolation=cv2.INTER_AREA)
result_img = cv2.resize(result_img, (416, 256), interpolation=cv2.INTER_AREA)
cv2.putText(src_img, 'id: ' + str(file_name.split('.')[0]), (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # 画frame_id
cv2.putText(result_img, 'id: ' + str(file_name.split('.')[0]), (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # 画frame_id
img_show = np.hstack((src_img, np.zeros((256, 20, 3), dtype=src_img.dtype), result_img))
display(Image(data=cv2.imencode('.jpg', img_show)[1]))
time.sleep(0.02)
print('end')
# 参考资料
#
# [视频分割在移动端的算法进展综述](https://zhuanlan.zhihu.com/p/60621619)
#
# [视频物体分割算法的三个方向与最新应用](https://new.qq.com/omn/20190731/20190731A0BMCE00.html)
#
# [https://davischallenge.org/index.html](https://davischallenge.org/index.html)
#
# [https://github.com/kmaninis/OSVOS-PyTorch](https://github.com/kmaninis/OSVOS-PyTorch)
#
# [https://github.com/nbei/Deep-Flow-Guided-Video-Inpainting](https://github.com/nbei/Deep-Flow-Guided-Video-Inpainting)
#
# [A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Perazzi_A_Benchmark_Dataset_CVPR_2016_paper.pdf)
| notebook/DL_video_object_segmentation/object_segmentation.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// # `Nuqleon.Linq.CompilerServices`
//
// Provides expression tree utilities such as visitors, analyzers, rewriters, evaluators, and more.
// ## Reference the library
// ### Option 1 - Use a local build
//
// If you have built the library locally, run the following cell to load the latest build.
// + dotnet_interactive={"language": "csharp"}
#r "bin/Debug/net50/Nuqleon.Linq.CompilerServices.dll"
// -
// ### Option 2 - Use NuGet packages
//
// If you want to use the latest published package from NuGet, run the following cell.
// + dotnet_interactive={"language": "csharp"}
#r "nuget:Nuqleon.Linq.CompilerServices,*-*"
// -
// ## (Optional) Attach a debugger
//
// If you'd like to step through the source code of the library while running samples, run the following cell, and follow instructions to start a debugger (e.g. Visual Studio). Navigate to the source code of the library to set breakpoints.
// + dotnet_interactive={"language": "csharp"}
System.Diagnostics.Debugger.Launch();
// -
// ## Visitors
//
// `System.Linq.Expressions` comes with a default `ExpressionVisitor` that visits all nodes of an expression tree and invokes the `Update` method on a node if any of its children changes (as tested by an reference equality check). This library provides additional types of visitors to enable rewrites of expressions to other types, to track scope information of variables, to visit other elements of the tree (such as reflection information), etc.
//
// There are too many visitor types to demonstrate here, so we'll start by providing a small list and zoom in to a few of them in the sections below.
//
// * `ExpressionVisitor<TExpression>` and related types visit an `Expression` but convert it to a `TExpression`. To do so, one implements a bunch of `Make` abstract methods, e.g. `MakeBinary` given the result of recursively converting `Left`, `Right`, etc.
// * `ExpressionVisitorNarrow<...>` implements a generic visitor by overriding all of the `Make` methods for statement trees (e.g. `Block`, `Loop`, etc.) as throwing `NotSupportedException`.
// * `PartialExpressionVisitor<TExpression>` implements a generic visitor by overriding all of the `Make` methods as throwing `NotSupportedException`. Users can override the nodes they want to support.
// * `ExpressionVisitorWithReflection` is like an `ExpressionVisitor` but it also provides virtual methods that visit the reflection objects that occur in trees, e.g. `MethodInfo` on a `Call` node.
// * `CooperativeExpressionVisitor` supports dispatching to custom visitor logic when encountering a member that has a `[Visitor]` attribute applied to it.
// * `ScopedExpressionVisitor<TState>` visits an expression tree while providing ways to track declaration and use sites of `ParameterExpression` nodes.
//
// Let's zoom in to a few of these.
// ### `ExpressionVisitorWithReflection`
//
// In the sample below, we harvest all of the `MethodInfo` objects that occur in an expression tree.
// + dotnet_interactive={"language": "csharp"}
using System.Linq.CompilerServices;
using System.Linq.Expressions;
using System.Reflection;
class HarvestMethods : ExpressionVisitorWithReflection
{
public HashSet<MethodInfo> Methods { get; } = new();
protected override MethodInfo VisitMethod(MethodInfo method)
{
Methods.Add(method);
return method;
}
}
Expression<Func<string, int>> f = s => s.ToLower().Substring(1, 2).Length;
var harvester = new HarvestMethods();
harvester.Visit(f);
foreach (var method in harvester.Methods)
{
Console.WriteLine(method);
}
// -
// ### `CooperativeExpressionVisitor`
//
// Cooperative expression visitors enable dispatching into a user-specified visitor for nodes that refer to methods, properties, fields, or constructors. The benefit of this approach is to avoid centralizing the knowledge of analysis or rewrite rules in a centralized visitor.
//
// As an example, we'll try to optimize an expression containing calls to methods that are annotated with a cooperative visitor that knows how to perform local optimizations.
// + dotnet_interactive={"language": "csharp"}
static class Sample
{
[Visitor(typeof(AbsVisitor))]
public static long Abs(long x) => x < 0 ? -x : x;
private sealed class AbsVisitor : IRecursiveExpressionVisitor
{
public bool TryVisit(Expression expression, Func<Expression, Expression> visit, out Expression result)
{
var method = (MethodCallExpression)expression;
if (method.GetArgument(0) is ConstantExpression c && c.Value is long x)
{
result = Expression.Constant(Sample.Abs(x));
return true;
}
result = null;
return false;
}
}
}
Expression<Func<long>> f = () => Sample.Abs(-42);
var visitor = new CooperativeExpressionVisitor();
Expression res = visitor.Visit(f);
Console.WriteLine(res);
// -
// ### `ScopedExpressionVisitor<TState>`
//
// A scoped expression visitor keeps a map of `ParameterExpression` to `TState` values for each declaration site of a variable, e.g. in `LambdaExpression.Parameters`, `BlockExpression.Variables`, and `CatchBlock.Variable`. Upon encountering a use site of a variable, this state can be looked up. This utility enabled building visitors that deal with binding of variables or that analyze declared variables and their scopes.
//
// In the code below, we'll build a scope tracking visitor to find unbound variables in an expression `(int x) => x + y`. In this example, `x` has a declaration and a use site, but `y` does only have a use site, and is thus considered unbound.
// + dotnet_interactive={"language": "csharp"}
class FindUnboundVariables : ScopedExpressionVisitor<ValueTuple>
{
public HashSet<ParameterExpression> UnboundVariables { get; } = new();
protected override ValueTuple GetState(ParameterExpression variable) => default;
protected override Expression VisitParameter(ParameterExpression node)
{
if (!TryLookup(node, out var ignored))
{
UnboundVariables.Add(node);
}
return node;
}
}
// -
// Note that we don't care about associating some value (of type `TState`) with each declared variable. Instead, we're just interested to figure out whether a variable is defined or not. As such, we use a dummy empty `ValueTuple` type. In order to figure out whether we saw a variable in a declaration site when encountering a use site, we use the `TryLookup` method in `VisitParameter`. In case we didn't find a declaration for the variable, we consider it to be unbound and add it to `UnboundVariables`.
//
// Next, we'll craft the expression `(int x) => x + y` using the `Expression` factory methods.
// + dotnet_interactive={"language": "csharp"}
var x = Expression.Parameter(typeof(int), "x");
var y = Expression.Parameter(typeof(int), "y");
var expr = Expression.Lambda<Func<int, int>>(Expression.Add(x, y), x);
Console.WriteLine(expr);
// -
// Finally, let's use our utility to visit the expression and consult the `UnboundVariables` collection.
// + dotnet_interactive={"language": "csharp"}
var fuv = new FindUnboundVariables();
fuv.Visit(expr);
Console.WriteLine($"Unbound variables in `{expr}` = {{ {string.Join(", ", fuv.UnboundVariables)} }}");
// -
// ## `FuncletExpression`
//
// The `FuncletExpression` is a custom `Expression` node that represents a subexpression that can be partially evaluated. Expression visitors that are aware of `FuncletExpression` nodes can retain them in an expression tree, but any other expression visitor will cause a reduction of the node to a `ConstantExpression` by triggering evaluation of the `FuncletExpression`.
//
// As an example, consider an expression tree visitor for math operations that only retains simple unary and binary arithmetic operations, as well as constants and default values. Any other expression will be turned into a `FuncletExpression`.
// First, we'll create an expression to operate on.
// + dotnet_interactive={"language": "csharp"}
Expression<Func<int>> f = () => Array.Empty<int>().Length * "foo".Length + 3;
var expr = f.Body;
Console.WriteLine(expr);
// -
// In here, `Array.Empty<int>().Length` and `"bar".Length` are expressions that cannot be evaluated by a math engine, so we'd like to reduce them to a `ConstantExpression` by causing partial evaluation at some point in time. This can be achieved by wrapping the node with a `FuncletExpression`. To do so, let's write a visitor that retains only nodes of supported types, but wraps all the other ones with a `FuncletExpression`.
// + dotnet_interactive={"language": "csharp"}
class ArithOnlyVisitor : ExpressionVisitor
{
public override Expression Visit(Expression node) =>
node switch
{
null => null,
_ when node.NodeType is
ExpressionType.Constant or
ExpressionType.Default
=> node,
_ when node.NodeType is
ExpressionType.Add or
ExpressionType.AddChecked or
ExpressionType.Divide or
ExpressionType.Modulo or
ExpressionType.Multiply or
ExpressionType.MultiplyChecked or
ExpressionType.Negate or
ExpressionType.NegateChecked or
ExpressionType.Subtract or
ExpressionType.SubtractChecked or
ExpressionType.UnaryPlus
=> base.Visit(node),
_ => FuncletExpression.Create(node)
};
}
// -
// Now we're ready to run our expression through the visitor and observe `Array.Empty<int>().Length` and `"bar".Length` getting wrapped in a `FuncletExpression` node.
// + dotnet_interactive={"language": "csharp"}
var res = new ArithOnlyVisitor().Visit(expr);
Console.WriteLine(res);
// -
// Note that evaluation has not taken place yet. Expression visitors that are aware of `FuncletExpression` nodes can choose to defer or avoid reducing them. An example could be an arithmetic optimizer, as shown below.
// + dotnet_interactive={"language": "csharp"}
class ArithOptimizer : ExpressionVisitor
{
protected override Expression VisitBinary(BinaryExpression node)
{
if (node.Type == typeof(int) && node.Conversion == null && node.Method == null)
{
if (node.NodeType is ExpressionType.Multiply or ExpressionType.MultiplyChecked)
{
var left = Visit(node.Left);
if (left is ConstantExpression leftConst)
{
switch ((int)leftConst.Value)
{
case 0: // 0 * right = 0
return leftConst;
case 1: // 1 * right = right
return Visit(node.Right);
}
}
var right = Visit(node.Right);
if (right is ConstantExpression rightConst)
{
switch ((int)rightConst.Value)
{
case 0: // left * 0 = 0
return rightConst;
case 1: // left * 1 = left
return left;
}
}
return node.Update(left, conversion: null, right);
}
else if (node.NodeType is ExpressionType.Add or ExpressionType.AddChecked)
{
var left = Visit(node.Left);
if (left is ConstantExpression leftConst && (int)leftConst.Value == 0) // 0 + right = right
{
return Visit(node.Right);
}
var right = Visit(node.Right);
if (right is ConstantExpression rightConst && (int)rightConst.Value == 0) // left + 0 = left
{
return left;
}
return node.Update(left, conversion: null, right);
}
// Omitted similar optimizations for other operations such as Divide, Modulo, Subtract, etc.
}
return base.VisitBinary(node);
}
}
// -
// We've omitted many arithmetic rules here, but just enough to demonstrate the point. When the optimizer visits the following node:
//
// ```
// Eval(ArrayLength(Empty())) * Eval("foo".Length)
// ```
//
// it will enter the `Multiply` branch and start by visiting the `Left` node, which corresponds to:
//
// ```
// Eval(ArrayLength(Empty()))
// ```
//
// Visiting a `FuncletExpression` node causes partial evaluation, which in this case will result in a `ConstantExpression` with value `0`. The optimizer then detects this particular case to reduce `0 * anything` to `0`, using the rules of multiplication. As such, it avoids evaluating `"bar".Length` entirely.
//
// **Note:** Obviously this may take away side-effects which would otherwise occur. A proper conservative optimizer for a language with side-effects would avoid making such rewrites unless it knows that `Right` has no observable side-effects. The optimizer in `Nuqleon.Linq.Expressions.Optimizers` does operate in such a manner.
// + dotnet_interactive={"language": "csharp"}
var opt = new ArithOptimizer().Visit(res);
Console.WriteLine(opt);
// -
// ## Expression tree factories
//
// The `IExpressionFactory` abstracts over the factory methods found on `Expression` and allows for custom implementations that can inject various behaviors. For example, a custom factory could return cached shared nodes for various invocations, perform additional forms of type checking, etc.
//
// A first implementation of `IExpressionFactory` is `ExpressionFactory` which simply calls the corresponding factory methods on `Expression`.
// + dotnet_interactive={"language": "csharp"}
IExpressionFactory exprFactory = ExpressionFactory.Instance;
var expr = exprFactory.Add(exprFactory.Parameter(typeof(int), "x"), exprFactory.Constant(1));
Console.WriteLine(expr);
// -
// Another implementation of `IExpressionFactory` is `ExpressionUnsafeFactory` which bypasses various type checks that occur in `Expression` factory methods and can be used for expression deserializers. This can provide a significant speedup. To illustrate this, let's define a small benchmark that constructs a `MethodCallExpression` which requires reflection calls to perform type checking on the object and arguments for the method being called.
//
// To define the benchmark, we'll leverage the facilities in `Nuqleon.Time` to build stopwatches to measure time and memory allocations.
// + dotnet_interactive={"language": "csharp"}
using System.Time;
class MemoryClock : IClock
{
public long Now => GC.GetAllocatedBytesForCurrentThread();
}
IStopwatch swMem = StopwatchFactory.FromClock(new MemoryClock()).Create();
IStopwatch swTime = StopwatchFactory.Diagnostics.Create();
void Benchmark(string title, Action test, int n)
{
swMem.Restart();
swTime.Restart();
for (int i = 0; i < n; i++)
{
test();
}
swTime.Stop();
swMem.Stop();
Console.WriteLine($"{title} completed in {swTime.ElapsedMilliseconds} ms and allocated {swMem.ElapsedTicks} bytes.");
}
// The core benchmark that constructs expression trees.
var mtd = typeof(string).GetMethod(nameof(string.Substring), new[] { typeof(int), typeof(int) });
void Benchmark(string title, IExpressionFactory factory, int n)
{
Benchmark(title, () => factory.Call(factory.Constant("bar"), mtd, factory.Constant(1), factory.Constant(2)), n);
}
// -
// Now we're ready to invoke this benchmark to compare the regular and the unsafe expression factory.
// + dotnet_interactive={"language": "csharp"}
IExpressionFactory defaultFactory = ExpressionFactory.Instance;
IExpressionFactory unsafeFactory = ExpressionUnsafeFactory.Instance;
Benchmark("Default", ExpressionFactory.Instance, 1_000_000);
Benchmark("Unsafe", ExpressionUnsafeFactory.Instance, 1_000_000);
// -
// Note that the execution time for the unsafe factory is lower than for the regular factory which has to perform a lot of type checks. However, memory cost is identical, in part due to the `Expression` factory methods maintaining an internal cache of `ParameterInfo[]` arrays obtained from calling `GetParameters()` on the `MethodInfo` during the type checking of the `Arguments` provided to `Call`. Despite this caching, there are still many corners of the `Expression` factories where expensive operations and sometimes allocations take place.
//
// In the context of a trusted subsystem where (a lot of) expression trees get serialized and deserialized, bypassing the type checking can be beneficial. If the original expression, prior to serialization, did type check correctly, and we can guarantee that any types and members referenced in these expressions did not change, then the expression should still type check at the point of deserialization. This is one of the optimizations that have been used in high-density Reaqtor deployments where we recover millions of expression trees containing hundreds of nodes each. The savings achieved can add up to significantly reduce recovery times, during which event processing is stalled.
// Because the expression factories implement all factory methods as `virtual`, we can also derive from the factories to add additional optimizations, e.g. caching of `Constant` or `Default` nodes to reduce the number of allocations. We illustrate this below using the `Nuqleon.Memory` faciltities for function memoization.
// + dotnet_interactive={"language": "csharp"}
using System.Memory;
class ExpressionFactoryWithCaching : ExpressionFactory, IClearable
{
private readonly IMemoizedDelegate<Func<object, Type, ConstantExpression>> _makeConstant;
private readonly IMemoizedDelegate<Func<Type, DefaultExpression>> _makeDefault;
public ExpressionFactoryWithCaching(IMemoizer memoizer)
{
_makeConstant = memoizer.Memoize<object, Type, ConstantExpression>((value, type) => base.Constant(value, type));
_makeDefault = memoizer.Memoize<Type, DefaultExpression>(type => base.Default(type));
}
public override ConstantExpression Constant(object value) => Constant(value, value?.GetType() ?? typeof(object));
public override ConstantExpression Constant(object value, Type type) => _makeConstant.Delegate(value, type);
public override DefaultExpression Default(Type type) => _makeDefault.Delegate(type);
public void Clear()
{
_makeConstant.Cache.Clear();
_makeDefault.Cache.Clear();
}
}
// -
// Now we can create an instance of `ExpressionFactoryWithCaching` and observe the effects of caching.
// + dotnet_interactive={"language": "csharp"}
var mem = Memoizer.Create(MemoizationCacheFactory.Unbounded);
var factory = new ExpressionFactoryWithCaching(mem);
var const1 = factory.Constant(42);
var const2 = factory.Constant(42);
Console.WriteLine(object.ReferenceEquals(const1, const2));
// -
// ## Expression tree analysis
//
// Various utilities are provided in this library to make the task of analyzing expression trees easier.
// ### Equality comparers
//
// `ExpressionEqualityComparer` implements `IEqualityComparer<Expression>` to compare two `Expression` instances for equality, taking binding of variables and labels (used in `Goto` and `Label` expressions) into account. For example, given trees `x => x` and `y => y`, they will compare equal even though `x` and `y` are not reference equal. That is, as long as the binding of variables between use sites and definition sites is equivalent in both trees being compared, the equality requirement is met.
//
// An example of using comparers is shown below.
// + dotnet_interactive={"language": "csharp"}
var eq = new ExpressionEqualityComparer();
Expression<Func<int, int, int>> e1 = (x, y) => x * y;
Expression<Func<int, int, int>> e2 = (a, b) => a * b;
Console.WriteLine(eq.Equals(e1, e1));
// -
// Unbound parameters do not compare equal by default.
// + dotnet_interactive={"language": "csharp"}
var p1 = Expression.Parameter(typeof(int), "x");
var p2 = Expression.Parameter(typeof(int), "x");
Console.WriteLine(eq.Equals(p1, p2));
// -
// If one wants to treat unbound parameters different, a custom `ExpressionEqualityComparator` can be built that overrides `EqualsGlobalParameter`, as shown below.
// + dotnet_interactive={"language": "csharp"}
class ExpressionEqualityComparatorWithGlobalVariableEqualityByName : ExpressionEqualityComparator
{
protected override bool EqualsGlobalParameter(ParameterExpression left, ParameterExpression right) => left.Type == right.Type && left.Name == right.Name;
}
// -
// `ExpressionEqualityComparer` provides an equality compararer by wrapping a factory for `ExpressionEqualityComparator` instances. The latter are stateful because they have to keep track of environments, i.e. the variables and labels that are declarated within the tree, in order to perform binding. To use our custom `ExpressionEqualityComparatorWithGlobalVariableEqualityByName`, we can wrap it as shown below.
// + dotnet_interactive={"language": "csharp"}
var eq = new ExpressionEqualityComparer(() => new ExpressionEqualityComparatorWithGlobalVariableEqualityByName());
var p1 = Expression.Parameter(typeof(int), "x");
var p2 = Expression.Parameter(typeof(int), "x");
Console.WriteLine(eq.Equals(p1, p2));
// -
// This time around, the global parameters are considered to be equal. We use this technique in Reaqtor to compare expressions for equality prior to performing binding operations.
// ### Free variable scanner
//
// We've already built a free variable scanner manually earlier in this notebook, but the library comes with such a facility built-in. As an example, consider an expression of the form `x => x + y` where `y` is unbound.
// + dotnet_interactive={"language": "csharp"}
var x = Expression.Parameter(typeof(int), "x");
var y = Expression.Parameter(typeof(int), "y");
var expr = Expression.Lambda<Func<int, int>>(Expression.Add(x, y), x);
Console.WriteLine(expr);
// -
// We can now use `FreeVariableScanner` to determine whether the expression contains any free variables, or to get a list of such variables. Note that the former operation is more efficient than getting all the unbound variables and performing an `Any()` or `Count() > 0` check, because we don't have to allocate a collection.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine($"Has free variables = {FreeVariableScanner.HasFreeVariables(expr)}");
Console.WriteLine($"Free variables = {{ {string.Join(", ", FreeVariableScanner.Scan(expr))} }}");
// -
// ### Allow list scanner
//
// When processing expressions, it's often useful to scan them for undesirable operations, such as calls to methods that are unsafe to the hosting environment (when evaluating an expression) or constructs that cannot be translated to some target language (e.g. when writing a query provider). This library provides allow list scanners that check expressions against a given list of allowed items. Two different types are provided:
//
// * `ExpressionTypeAllowListScanner` which checks the `Type` for every `Expression` node in a given expression tree, e.g. to make sure a tree evaluates within a certain "type domain";
// * `ExpressionMemberAllowListScanner` which checks occurrences of any `MemberInfo` on any `Expression` node in a given exprssion tree, e.g. to check for methods, properties, fields, and constructors used.
//
// Let's first have a look at these utilities.
// First, we'll explore a trivial `ExpressionTypeAllowListScanner` that only allows expressions that use `int`-based operations.
// + dotnet_interactive={"language": "csharp"}
var typeScanner = new ExpressionTypeAllowListScanner
{
Types = { typeof(int) }
};
var validExpr = Expression.Add(Expression.Constant(1), Expression.Constant(2));
var res = typeScanner.Visit(validExpr);
Console.WriteLine(res);
// -
// In case the tree contains operations involving other types, it gets rejected.
// + dotnet_interactive={"language": "csharp"}
var invalidExpr = Expression.Add(Expression.Constant(1), Expression.Property(Expression.Constant("foo"), nameof(string.Length)));
var res = typeScanner.Visit(invalidExpr);
// -
// In case it's desirable to handle expressions that don't pass the allowlist rules, one can override `ResolveExpression` as shown below.
// + dotnet_interactive={"language": "csharp"}
class MyExpressionTypeAllowListScanner : ExpressionTypeAllowListScanner
{
protected override Expression ResolveExpression<T>(T expression, Type type, Func<T, Expression> visit) => FuncletExpression.Create(expression);
}
var myTypeScanner = new MyExpressionTypeAllowListScanner
{
Types = { typeof(int) }
};
var res = myTypeScanner.Visit(invalidExpr);
Console.WriteLine(res);
// -
// In this example we use the sledgehammer of wrapping the rejected expression in a funclet, which will cause partial evaluation down the line. For example, prior to submitting an expression to a remote service, partial evaluation of any unsupported constructs can take place locally. A better example may be using a member-based allow list scanner, as shown below.
// + dotnet_interactive={"language": "csharp"}
var memberScanner = new ExpressionMemberAllowListScanner
{
Members =
{
typeof(Math).GetMethod(nameof(Math.Abs), new[] { typeof(int) })
}
};
Expression<Func<int, int>> f = x => Math.Abs(x + 1);
var res = memberScanner.Visit(f);
Console.WriteLine(res);
// -
// Here, we allow uses of `Math.Abs(int)`, but nothing else. Alternatively, we could have added `typeof(Abs)` to the `DeclaringTypes` collection on the scanner, to allow all members on `Math`.
// Now, let's consider a more complex expression that refers to a variable in the outer scope. This causes the creation of a closure, which will manifest itself in the expression tree as a field lookup using a `MemberExpression`.
// + dotnet_interactive={"language": "csharp"}
int y = 1;
Expression<Func<int, int>> fWithClosure = x => Math.Abs(x + y);
Console.WriteLine(fWithClosure);
// -
// This field lookup on the compiler-generated closure object is not present in the allow list, so the scanner will reject it.
// + dotnet_interactive={"language": "csharp"}
var res = memberScanner.Visit(fWithClosure);
// -
// We can now build a custom scanner type that uses a funclet to trigger partial evaluation of unsupported nodes. In this case, it will cause reduction of the closure access to a `ConstantExpression`.
// + dotnet_interactive={"language": "csharp"}
class MyExpressionMemberAllowListScanner : ExpressionMemberAllowListScanner
{
protected override Expression ResolveExpression<T>(T expression, MemberInfo member, Func<T, Expression> visit) => FuncletExpression.Create(expression);
}
var myMemberScanner = new MyExpressionMemberAllowListScanner
{
Members =
{
typeof(Math).GetMethod(nameof(Math.Abs), new[] { typeof(int) })
}
};
var res = myMemberScanner.Visit(fWithClosure);
Console.WriteLine(res);
// -
// To illustrate the partial evaluation carried out by the `FuncletExpression`, we can simply run an `ExpressionVisitor` over the expression, which will trigger calls to `Reduce`.
// + dotnet_interactive={"language": "csharp"}
class ReduceExpressionVisitor : ExpressionVisitor
{
}
var reduced = new ReduceExpressionVisitor().Visit(res);
Console.WriteLine(reduced);
// -
// More advanced uses of type-based and member-based allow list scanners can be constructed by deriving from the `ExpressionTypeAllowListScannerBase` and `ExpressionTypeAllowMemberScannerBase` classes instead. These provide `Check` methods that are used to check whether a type or member is allowed to be used. These predicates can be arbitrary rather than type-based. For example, one could allow any type or member in a particular assembly.
// ## Expression tree rewriters
//
// This library also provides a whole plethora of expression tree rewriting utilities.
// ### Alpha renaming
//
// Lambda calculus has the notion of alpha renaming whereby the names of variables get changed in a way that doesn't inadvertently changes the bindings of variable. For example, consider an expression `x => y => x + y`. In here, we can change the name of `x` to `a` but not to `y` because it'd cause the binding to change to the `y` declared on the inner lambda expression.
//
// This library provides an implementation of alpha renaming using the `AlphaRenamer.EliminateNameConflicts` static method, as shown below.
// + dotnet_interactive={"language": "csharp"}
var x0 = Expression.Parameter(typeof(int), "x");
var x1 = Expression.Parameter(typeof(int), "x");
var ambiguousNames1 = Expression.Lambda(Expression.Lambda(x0, x0), x0);
var ambiguousNames2 = Expression.Lambda(Expression.Lambda(x0, x1), x1);
Console.WriteLine(ambiguousNames1);
Console.WriteLine(ambiguousNames2);
// -
// Both functions have different meanings. The first one is equivalent to `_ => x => x`, because the use site of `x` in the inner lambda binds to the declaration site of `x` as a parameter on the inner lambda. The second one is equivalent to `x => _ => x`.
//
// Alpha renaming gets rid of the syntactic ambiguity by creating new `ParameterExpression` nodes with unique names.
// + dotnet_interactive={"language": "csharp"}
var uniqueNames1 = AlphaRenamer.EliminateNameConflicts(ambiguousNames1);
var uniqueNames2 = AlphaRenamer.EliminateNameConflicts(ambiguousNames2);
Console.WriteLine(uniqueNames1);
Console.WriteLine(uniqueNames2);
// -
// ### Beta reduction
//
// Computation in lambda calculus is driven by beta reduction. Given an invocation of a lambda expression, a reduction can be made by substituting the arguments used in the invocation for the parameters of the lambda expression. For example:
//
// ```csharp
// (x => x + 1)(2)
// ```
//
// can be reduced to `2 + 1` by substituting `2` for `x` in the body of the lambda expression. That is, arguments get inlined in the lambda body.
//
// Let's first translate this example to expression trees, as follows:
// + dotnet_interactive={"language": "csharp"}
Expression<Func<int, int>> f = x => x + 1;
var expr = Expression.Invoke(f, Expression.Constant(2));
Console.WriteLine(expr);
// -
// We can now use the `BetaReducer` in this library to reduce the expression to `2 + 1`, as shown below.
// + dotnet_interactive={"language": "csharp"}
var reduced = BetaReducer.Reduce(expr);
Console.WriteLine(reduced);
// -
// The call to `Reduce` does find any `Invoke(Lambda(...), ...)` structure in the given expression tree and attempts to reduce it. Given that beta reduction is a very essential technique employed by Reaqtor to bind reactive query expressions, we'll build a slightly bigger sample below that illustrates binding and reduction.
// First, let's build a little catalog of functions for a simple math engine. We'll start by defining some operations such as `Abs` and `Pow` and put them in a dictionary.
// + dotnet_interactive={"language": "csharp"}
var functions = new Dictionary<string, Expression>
{
{ "abs", (Expression<Func<double, double>>)(x => Math.Abs(x)) },
{ "pow", (Expression<Func<double, double, double>>)((x, y) => Math.Pow(x, y)) },
};
// -
// The goal is for users to be able to submit expressions that refer to the functions by name using unbound `ParameterExpression` nodes. For example:
// + dotnet_interactive={"language": "csharp"}
var expr = Expression.Invoke(Expression.Parameter(typeof(Func<double, double>), "abs"), Expression.Constant(-2.0));
Console.WriteLine(expr);
// -
// Reaqtor uses this technique to normalize expressions submitted by client libraries. Rather than doing so for math operations, it does this for reactive query operators such as `Where`, `Select`, `Window`, etc. The normalization step takes a friendly user expression such as `xs.Where(x => x > 0)` to an expression that is no longer bound to a concrete client-side `MethodInfo` for `Where`. In our simplified math example shown here, one can image a client library that allows the user to write `calculator.Abs(-2.0)`, which gets normalized to `Invoke(Abs, -2.0)` where `Abs` is an unbound parameter expression. The calculator engine then binds the `Abs` function to a concrete implementation, e.g. using `Math.Abs`.
// To build a binder, we'll leverage the `FreeVariableScanner` to find all unbound parameters in the expression submitted by the user, and then build a binding expression around it. Let's do these things one step at a time and start by scanning the free variables.
// + dotnet_interactive={"language": "csharp"}
var freeVars = FreeVariableScanner.Scan(expr);
Console.WriteLine(string.Join(", ", freeVars));
// -
// Next, we can perform a technique called *lambda lifting* to wrap the user expression in a lambda that has all of the unbound variables as parameters. This is shown below.
// + dotnet_interactive={"language": "csharp"}
var lambda = Expression.Lambda(expr, freeVars);
Console.WriteLine(lambda);
// -
// To perform binding, we can now look up all of the unbound parameters in our `functions` registry and construct an `InvocationExpression` around the lambda that was built above.
// + dotnet_interactive={"language": "csharp"}
var bound = Expression.Invoke(lambda, freeVars.Select(freeVar => functions[freeVar.Name]));
Console.WriteLine(bound);
// -
// Note that expression factories perform rigorous type checking, so if the user's expression is trying to invoke `abs` using `int` operands, the binding step above would fail. Similarly, binding would fail if we don't find a particular variable in the `function` registry.
//
// At this point, we could already evaluate the user's expression. For this, we'll make use of the `Evaluate` extension method provided by this library (discussed further on in this notebook).
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(bound.Evaluate<double>());
// -
// However, this can be rather inefficient because the compiled expression tree can be simplified by applying beta reduction steps first. This can also help with debugging, size of expressions stored, etc. So, let's give beta reduction a try.
// + dotnet_interactive={"language": "csharp"}
var reduced = BetaReducer.Reduce(bound);
Console.WriteLine(reduced);
// -
// This didn't work because the `BetaReducer` is very conservative when it comes to inlining non-trivial expressions which may cause reordering of side-effects. To override this behavior, we can use an overload of `Reduce`, as shown below.
// + dotnet_interactive={"language": "csharp"}
var reduced = BetaReducer.Reduce(bound, BetaReductionNodeTypes.Unrestricted, BetaReductionRestrictions.None);
Console.WriteLine(reduced);
// -
// A detailed description of these parameters is outside the scope of this notebook, but a little brainteaser is shown below to point out the potential evils of performing beta reduction.
//
// ```csharp
// (s => Console.ReadLine() + s + s)(Console.ReadLine())
// ```
//
// When evaluating this code, the user will get two prompts to input a string. Say the user enters `bar` and `foo`, then the result will be `foobarbar`. If "naive" unrestricted beta reduction is carried out, we can end up with the following:
//
// ```csharp
// Console.ReadLine() + Console.ReadLine() + Console.ReadLine()
// ```
//
// which prompts the user for three inputs. If the user enters `bar`, `foo`, and `qux`, the result will be `barfooqux`.
// Continuing with our running example, note that the resutling expression is not fully reduced yet. In particular, we still have an `InvocationExpression` node left in the tree. To cause further reduction, we can use the `ReduceEager` method which carries out a *fixed point* loop around the core beta reduction logic. That is, it keeps applying beta reduction until the tree no longer changes. An example is shown below.
// + dotnet_interactive={"language": "csharp"}
var reduced = BetaReducer.ReduceEager(bound, BetaReductionNodeTypes.Unrestricted, BetaReductionRestrictions.None, throwOnCycle: true);
Console.WriteLine(reduced);
// -
// In many cases, binding and beta reduction needs to be carried out in a *fixed loop* algorithm, because definitions may refer to other definitions. As an example, consider adding a `GetRadius` function that's built on top of `Pow` and `Sqrt`. Let's add a few definitions to our `functions` registry.
// + dotnet_interactive={"language": "csharp"}
functions.Add("sqrt", (Expression<Func<double, double>>)(x => Math.Sqrt(x)));
var x = Expression.Parameter(typeof(double), "x");
var y = Expression.Parameter(typeof(double), "y");
var sqrt = Expression.Parameter(typeof(Func<double, double>), "sqrt");
var pow = Expression.Parameter(typeof(Func<double, double, double>), "pow");
var two = Expression.Constant(2.0);
var getRadius = Expression.Lambda(Expression.Invoke(sqrt, Expression.Add(Expression.Invoke(pow, x, two), Expression.Invoke(pow, y, two))), x, y);
functions.Add("getRadius", getRadius);
// -
// Let's now create a reusable `Bind` function, encapsulating the logic we used before, but adding a `bool` return type to indicate whether any binding took place.
// + dotnet_interactive={"language": "csharp"}
bool Bind(Expression expr, out Expression bound)
{
var freeVars = FreeVariableScanner.Scan(expr);
if (freeVars.Count() == 0)
{
bound = expr;
return false;
}
var lambda = Expression.Lambda(expr, freeVars);
bound = Expression.Invoke(lambda, freeVars.Select(freeVar => functions[freeVar.Name]));
return true;
}
// -
// Let's also encapsulate our beta reduction logic in a `Reduce` method.
// + dotnet_interactive={"language": "csharp"}
static Expression Reduce(Expression expr)
{
var reduced = BetaReducer.ReduceEager(expr, BetaReductionNodeTypes.Unrestricted, BetaReductionRestrictions.None, throwOnCycle: true);
return reduced;
}
// -
// To show the effects of a single pass of binding and reduction on a user expression that refers to `GetRadius`, we'll go ahead and build a such an expression.
// + dotnet_interactive={"language": "csharp"}
var expr = Expression.Invoke(Expression.Parameter(typeof(Func<double, double, double>), "getRadius"), Expression.Constant(3.0), Expression.Constant(4.0));
// -
// A single turn of the binding and reduction crank yields the following result.
// + dotnet_interactive={"language": "csharp"}
if (Bind(expr, out var bound))
{
var res = Reduce(bound);
Console.WriteLine(res);
}
// -
// This expression cannot be evaluated yet because it now refers to `Sqrt` and `Pow`. To get to the point of evaluating the expression, we need to continue binding until no more binding steps are necessary. This can be achieved using a simple loop.
// + dotnet_interactive={"language": "csharp"}
Expression BindFully(Expression expr)
{
while (Bind(expr, out expr))
{
expr = Reduce(expr);
}
return expr;
}
var res = BindFully(expr);
Console.WriteLine(res);
// -
// Now the expression is fully bound to `Math.*` methods, and we can go ahead an evaluate it.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(res.Evaluate<double>());
// -
// ### Eta converter
//
// A final lambda calculus construct is eta conversion, which simplifies an expression of the form `(x => f(x))` to `f`, by taking away the redundant lambda "abstraction" whose body contains an invocation "application". The result is a simpler, more compact, expression. This library supports this form of simplifying expressions through `EtaConverter.Convert`.
// + dotnet_interactive={"language": "csharp"}
var x = Expression.Parameter(typeof(int), "x");
var f = Expression.Parameter(typeof(Func<int, int>), "f");
var expr = Expression.Lambda(Expression.Invoke(f, x), x);
Console.WriteLine(expr);
var simpler = EtaConverter.Convert(expr);
Console.WriteLine(simpler);
// -
// ### Compiler-generated name elimination
//
// The C# compiler emits compiler-generated names for certain lambda expression parameters, e.g. in the context of `let` clauses that introduce so-called "transparent identifiers". An example is shown below using `IQueryable<T>`.
// + dotnet_interactive={"language": "csharp"}
var res = from x in new[] { 1, 2, 3 }.AsQueryable()
let y = x + 1
let z = y * 2
select x + y - z;
Console.WriteLine(res.Expression);
// -
// The resulting expression tree shows quite verbose compiler-generated identifiers which can hamper debuggability. Using the `CompilerGeneratedNameEliminator.Prettify` utility we can rewrite the expression using simpler identifiers.
// + dotnet_interactive={"language": "csharp"}
var pretty = CompilerGeneratedNameEliminator.Prettify(res.Expression);
Console.WriteLine(pretty);
// -
// Note that mileage is limited because the rewriter cannot change type names; it can only change the names of `ParameterExpression` nodes.
// ### Constant hoisting
//
// In services like Reaqtor that host and evaluate millions of expressions, it's often useful to detect expressions that are identical modulo constants. These constants may get introduced due to partial evaluation at the client side, e.g. due to closures. An example is shown below:
//
// ```csharp
// int a = 41;
//
// IQueryable<int> query = from x in xs where x > a select x + 1;
//
// foreach (var x in query)
// {
// // use results
// }
// ```
//
// This query will effectively get translated to the following if closures are being eliminates (e.g. using the `FuncletExpression` approach described earlier):
//
// ```csharp
// IQueryable<int> query = from x in xs where x > 42 select x + 1;
// ```
//
// Other invocations of the same code may use a different value for `a` and thus generate other queries that are identical modulo this single constant:
//
// ```csharp
// IQueryable<int> query1 = from x in xs where x > 41 select x + 1;
// IQueryable<int> query2 = from x in xs where x > 42 select x + 1;
// IQueryable<int> query3 = from x in xs where x > 43 select x + 1;
// ```
//
// If a high-density service ends up compiling all of these query expressions in order to evaluate them, we can end up with a lot of JITted code on the heap, and pay the price for emitting the code in the first place (e.g. using `System.Reflection.Emit` under the hood). By using the technique of constant hoisting, we can lift constants out of these queries, like this:
//
// ```csharp
// Func<int, int, IQueryable<int>> query = (c1, c2) => from x in xs where x > c1 select x + c2;
//
// IQueryable<int> query1 = query(41, 1);
// IQueryable<int> query2 = query(42, 1);
// IQueryable<int> query3 = query(43, 1);
// ```
//
// The `ConstantHoister` in this library provides support to do this, which is shown below. First, let's write a few user queries that are identical modulo constants.
// + dotnet_interactive={"language": "csharp"}
Expression<Func<IEnumerable<int>>> query1 = () => Enumerable.Range(1, 10).Where(x => x % 2 == 0).Select(x => x + 9);
Expression<Func<IEnumerable<int>>> query2 = () => Enumerable.Range(2, 20).Where(y => y % 3 == 1).Select(y => y + 8);
Expression<Func<IEnumerable<int>>> query3 = () => Enumerable.Range(3, 30).Where(z => z % 4 == 2).Select(z => z + 7);
Expression expr1 = query1.Body;
Expression expr2 = query2.Body;
Expression expr3 = query3.Body;
Console.WriteLine(expr1);
Console.WriteLine(expr2);
Console.WriteLine(expr3);
// -
// To make our setup a little more challenging, note that we've also used different variable names in the three expressions. Modulo the constants, all the queries have the same semantics.
// Next, let's explore the `ConstantHoister` facility to hoist out constants for these three query expressions.
// + dotnet_interactive={"language": "csharp"}
var hoister = new ConstantHoister();
ExpressionWithEnvironment hoisted1 = hoister.Hoist(expr1);
// -
// The call to `Hoist` returns an object that contains an expression and an environment. Let's first look at the `Expression` property.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(hoisted1.Expression);
// -
// Note that all of the `ConstantExpression` nodes have been substituted by `ParameterExpression` nodes. In other words, we end up with a parameterized query. Now, let's look at the `Environment`.
// + dotnet_interactive={"language": "csharp"}
var sb = new StringBuilder();
foreach (var (key, value) in hoisted1.Environment)
{
sb.AppendLine($" {key} -> {value}");
}
Console.WriteLine(sb);
// -
// We can now combine the environment and the hoisted expression to create a parameterized query.
// + dotnet_interactive={"language": "csharp"}
var constantHoistedExpr = Expression.Lambda(hoisted1.Expression, hoisted1.Environment.Select(kv => kv.Key));
Console.WriteLine(constantHoistedExpr);
// -
// The original user expression is now equivalent to invoking this query with the values in the environment, like this:
// + dotnet_interactive={"language": "csharp"}
var reconstructedQueryExpr = Expression.Invoke(constantHoistedExpr, hoisted1.Environment.Select(kv => Expression.Constant(kv.Value, kv.Key.Type)));
Console.WriteLine(reconstructedQueryExpr);
// -
// However, we don't just want to use the `ConstantHoister` to hoist out constants just to put them back using lambda lifting, invocation, and beta reduction. Instead, we want to look up a compiled query expression from the lambda lifted query expression. To do so, let's start off by defining a compiled delegate cache as a dictionary that maps from `LambdaExpression` to `Delegate`.
// + dotnet_interactive={"language": "csharp"}
var compiledQueries = new Dictionary<LambdaExpression, Delegate>(new ExpressionEqualityComparer());
// -
// By using the `ExpressionEqualityComparer`, expressions get compared using value equality semantics rather than reference equality. Note that this will also take care of query expressions being different modulo parameter names in e.g. query clauses.
// Next, let's encapsulate our constant hoisting experiments in a single `Evaluate` function that will perform constant hoisting and lambda lifting prior to looking up whether the query has been received before through our `compiledQueries` cache.
// + dotnet_interactive={"language": "csharp"}
void Evaluate(Expression expression)
{
var hoister = new ConstantHoister();
ExpressionWithEnvironment hoisted = hoister.Hoist(expression);
var constantHoistedExpr = Expression.Lambda(hoisted.Expression, hoisted.Environment.Select(kv => kv.Key));
if (!compiledQueries.TryGetValue(constantHoistedExpr, out var compiled))
{
Console.WriteLine($"Compiling {constantHoistedExpr}");
compiled = constantHoistedExpr.Compile();
compiledQueries.Add(constantHoistedExpr, compiled);
}
var constants = hoisted.Environment.Select(kv => Expression.Constant(kv.Value, kv.Key.Type)).ToArray();
var bound = Expression.Invoke(Expression.Constant(compiled, constantHoistedExpr.Type), constants);
object res = bound.Evaluate();
Console.WriteLine($"Evaluating {bound} = {res}");
}
// -
// Now we can apply `Evaluate` to our query expressions which only differ in the constants used.
// + dotnet_interactive={"language": "csharp"}
Evaluate(expr1);
Evaluate(expr2);
Evaluate(expr3);
// -
// Note we only compiled the query expression once and then went ahead to evaluate the compiled expression three times, with different constants.
// More advanced facilities in the `ConstantHoister` enable excluding certain constants. For example, strings passed to the format parameter of `String.Format(string, object[])` or the regular expression parameter of `Regex.Match(string, string)` tend to not vary; in fact, these are strings that encode a small embedded programming language. To exclude such constants, exclusion patterns can be added to the `ConstantHoister` constructor.
// + dotnet_interactive={"language": "csharp"}
using System.Text.RegularExpressions;
var betterHoister = new ConstantHoister(useDefaultForNull: true, exclusions: new LambdaExpression[]
{
(Expression<Func<string, string>>)(s => string.Format(s, default(object))),
(Expression<Func<string, string>>)(s => string.Format(s, default(object), default(object))),
(Expression<Func<string, string>>)(s => string.Format(s, default(object), default(object), default(object))),
(Expression<Func<string, string>>)(s => string.Format(s, default(object[]))),
(Expression<Func<string, Match>>)(pattern => Regex.Match("", pattern)),
});
// -
// The patterns in the exclusion list are lambda expressions where the first parameter indicates the position where a constant node should be ignored for hoisting. For example, if the user wrote `Regex.Match("foobar", "[a-z]*")`, then the first argument `"foobar"` will be hoisted as a constant, but the second argument `"[a-z]*"` won't be hoisted. An example is shown below:
// + dotnet_interactive={"language": "csharp"}
Expression<Func<Match>> f = () => Regex.Match(string.Format("{0}{1}", "foo", "bar"), "[a-z]*");
ExpressionWithEnvironment res = betterHoister.Hoist(f);
Console.WriteLine(res.Expression);
// -
// Note that only `"foo"` and `"bar"` are getting hoisted while the format string and regular expression constants are kept in the expression tree.
// ### Type substitution
//
// The `TypeSubstitutionExpressionVisitor` can be used to retype an expression tree from one set of types to another set of types. This involves specifying rules to rebind members involving the types being substituted. In the context of Reaqtor, type substitution is often used to move a tree from one *type space* to another, e.g. from one set of reactive interfaces to another.
//
// A trivial example using `DateTime` and `DateTimeOffset` is shown below:
// + dotnet_interactive={"language": "csharp"}
var subst = new TypeSubstitutionExpressionVisitor(new Dictionary<Type, Type>
{
{ typeof(DateTime), typeof(DateTimeOffset) }
});
Expression<Func<DateTime>> tomorrow = () => DateTime.Now.AddDays(1);
Expression rewritten = subst.Apply(tomorrow);
Console.WriteLine(rewritten);
// -
// More advanced use cases can be supported by deriving from `TypeSubstitutionExpressionVisitor` to override various `Resolve` methods that are used to resolve reflection objects as any of the types changes. In the sample above, the substitution of `DateTime` to `DateTimeOffset` triggers lookup for `Now` and `AddDays` on `DateTimeOffset`, which can be found successfully. In some cases, more complex mappings are required. An example of a failing rewrite is shown below.
// + dotnet_interactive={"language": "csharp"}
Expression<Func<DateTime>> birthday = () => new DateTime(1983, 2, 11);
subst.Apply(birthday);
// -
// This fails because `DateTimeOffset` does not define a constructor that's isomporhic to the constructor on `DateTime`. To support this type of rewrite, we can override `VisitNew` to handle the case manually. Because `TypeSubstitutionExpressionVisitor` is just a specialized visitor, we can override any of the `Visit` methods if we have to. In this case, that's our only hope because we can't use `ResolveConstructor` to find a `ConstructorInfo` on `DateTimeOffset` that takes three `int` parameters.
// + dotnet_interactive={"language": "csharp"}
class DateTimeSubstitutor : TypeSubstitutionExpressionVisitor
{
private static readonly Dictionary<Type, Type> s_map = new Dictionary<Type, Type>
{
{ typeof(DateTime), typeof(DateTimeOffset) }
};
private static readonly ConstructorInfo s_ctorDateTime_IntIntInt = typeof(DateTime).GetConstructor(new[] { typeof(int), typeof(int), typeof(int) });
private static readonly ConstructorInfo s_ctorDateTimeOffset_DateTime = typeof(DateTimeOffset).GetConstructor(new[] { typeof(DateTime) });
public DateTimeSubstitutor()
: base(s_map)
{
}
protected override Expression VisitNew(NewExpression node)
{
if (node.Constructor?.DeclaringType == typeof(DateTime))
{
if (node.Constructor == s_ctorDateTime_IntIntInt)
{
var args = Visit(node.Arguments);
return Expression.New(s_ctorDateTimeOffset_DateTime, node.Update(args));
}
}
return base.VisitNew(node);
}
}
// -
// Now, we can perform the rewrite.
// + dotnet_interactive={"language": "csharp"}
var betterSubst = new DateTimeSubstitutor();
Console.WriteLine(betterSubst.Apply(birthday));
// -
// Furthermore, if an expression tree contains a `ConstantExpression` of a type that has to be changed, a call to a `ConvertConstant` virtual method is made, which enables for the insertion of custom conversion logic.
// + dotnet_interactive={"language": "csharp"}
var now = Expression.Constant(DateTime.Now);
Console.WriteLine(betterSubst.Apply(now));
// -
// To support this case, let's derive from our previous attempt (to reduce code in this notebook), and override `ConvertConstant` as well.
// + dotnet_interactive={"language": "csharp"}
class EvenBetterDateTimeSubstitutor : DateTimeSubstitutor
{
protected override object ConvertConstant(object originalValue, Type newType)
{
if (newType == typeof(DateTimeOffset) && originalValue?.GetType() == typeof(DateTime))
{
return Expression.Convert(Expression.Constant(originalValue, typeof(DateTime)), newType).Evaluate();
}
return base.ConvertConstant(originalValue, newType);
}
}
// -
// Finally, we should be able to also convert a `ConstantExpression` from `DateTime` to `DateTimeOffset`.
// + dotnet_interactive={"language": "csharp"}
var evenBetterSubst = new EvenBetterDateTimeSubstitutor();
Console.WriteLine(evenBetterSubst.Apply(now));
// -
// ### Tupletization
//
// One of the drawbacks of delegate types in .NET is the lack of support for variadic generics, causing us to end up with a whole ladder of types to support functions with different numbers of parameters (cf. `Action<...>` and `Func<...>` families).
//
// While expression trees support the creation of `LambdaExpression` nodes with any number of parameters, it relies on runtime code generation (using `System.Reflection.Emit`) to manufacture delegate types of arities that exceed the available `Func<>` and `Action<>` types. This results in types that are tricky to carry along across machine boundaries (though Bonsai supports ways to represent function types as first-class citizens).
//
// > **Note:** One can argue that functions with more than 16 parameters should be avoided, but keep in mind that expressions are often machine generated. For example, when performing constant hoisting, the number of parameters generated is linear in the number of constants that occurred in an expression.
//
// Let's first have a look at the runtime generation of delegate types by the `Lambda` factory.
// + dotnet_interactive={"language": "csharp"}
for (int i = 0; i < 20; i++)
{
Console.WriteLine(Expression.Lambda(Expression.Empty(), Enumerable.Range(1, i).Select(j => Expression.Parameter(typeof(int)))).Type);
}
for (int i = 0; i < 20; i++)
{
Console.WriteLine(Expression.Lambda(Expression.Constant(42), Enumerable.Range(1, i).Select(j => Expression.Parameter(typeof(int)))).Type);
}
// -
// This library provides a mechanism to normalize lambda expressions to unary functions using a technique called tupletization. Let's first construct a couple of sample expression trees below:
// + dotnet_interactive={"language": "csharp"}
var xs = Enumerable.Range(1, 20).Select(i => Expression.Parameter(typeof(int), "x" + i)).ToArray();
var sum = xs.Cast<Expression>().Aggregate((l, r) => Expression.Add(l, r));
Expression<Func<int>> f1 = () => 42;
Expression<Func<int, int>> f2 = x => x + 1;
Expression<Func<int, int, int>> f3 = (x, y) => x * y;
LambdaExpression f20 = Expression.Lambda(sum, xs);
Expression<Action> a1 = () => Console.WriteLine(42);
Expression<Action<int>> a2 = x => Console.WriteLine(x + 1);
Expression<Action<int, int>> a3 = (x, y) => Console.WriteLine(x * y);
LambdaExpression a20 = Expression.Lambda(Expression.Call(typeof(Console).GetMethod(nameof(Console.WriteLine), new[] { typeof(int) }), sum), xs);
var exprs = new LambdaExpression[]
{
f1,
f2,
f3,
f20,
a1,
a2,
a3,
a20
};
// -
// Using the `ExpressionTupletizer` we can convert back and forth between the N-ary function form and a so-called tupletized form, using `Pack` and `Unpack` methods. We'll illustrate this roundtripping in the next cell.
// + dotnet_interactive={"language": "csharp"}
foreach (var expr in exprs)
{
var sb = new StringBuilder();
sb.AppendLine(expr.ToString());
LambdaExpression packed = ExpressionTupletizer.Pack(expr, voidType: typeof(ValueTuple));
sb.AppendLine(" Pack: " + packed.ToString());
LambdaExpression unpacked = ExpressionTupletizer.Unpack(packed, voidType: typeof(ValueTuple));
sb.AppendLine(" Unpack: " + unpacked.ToString());
Console.WriteLine(sb);
}
// -
// In here, the use of `voidType` is necessary to specify how a lambda expression without any parameters is turned into a unary lambda expression with one parameter. To do so, the introduced dummy parameter needs to have a type, which is represented using `voidType`. The use of `ValueTuple` is well-suited for this purpose.
//
// Reaqtor uses the tupletized form in quite a few places to standardize on unary functions at the base layers of the platform. Any function arity is supported at layers higher up, which are merely syntactic sugar to construct expressions in tuple form.
// ### `AnonynmousTypeTupletizer`
//
// Another rewriting utility that relies on tuples is `AnonynmousTypeTupletizer`. This expression rewriter is a type substitution visitor that replaces anonymous types (e.g. produced by `let` clauses in query expression, due to the use of transparent identifiers) by tuple types. This can be useful to shake off compiler-generated types prior to carrying out further rewrite steps (or prior to serialization of the expression for shipment to another machine). For example:
// + dotnet_interactive={"language": "csharp"}
var query = from x in new[] { 1, 2, 3 }.AsQueryable()
let y = x + 1
select x + y;
var queryExpr = CompilerGeneratedNameEliminator.Prettify(query.Expression);
Console.WriteLine(queryExpr);
// -
// Rather than allocating anonymous types for the transparent identifiers, we can replace these by tuple types instead. This is achieved through `AnonymousTypeTupletizer.Tupletize`, which takes up to three parameters. Besides the expression to rewrite, the `unitValue` specifies the expression to use when replacing `new {}` anonymous types (due to the lack of a non-generic `Tuple` type in the BCL), and `excludeVisibleTypes` prevents the tuple rewriting from changing the type of the top-level expression. We'll get to this detail in a bit, but let's first apply the rewrite to our query expression.
// + dotnet_interactive={"language": "csharp"}
var tupletizedQueryExpr = AnonymousTypeTupletizer.Tupletize(queryExpr, unitValue: Expression.Constant(new object()), excludeVisibleTypes: true);
Console.WriteLine(tupletizedQueryExpr);
// -
// In the resulting expression, we have no anonymous types left. This simplifies the expression by reducing the number of types it relies on. This in turn makes tasks like expression serialization easier.
// ### CPS transformations
//
// This library provides very rudimentary support for continuation passing style (CPS) transforms of code which can be used in the context of binding query execution plans to asynchronous infrastructure.
//
// **Note:** The CPS transform support in this library only works for expressions because it originates from a LINQ provider toolkit we wrote in the .NET 3.5 days, prior to support for statement nodes. A full-blown CPS transformation framework existed in the context of Volta's tier splitting work, but it was written in CCI (the Common Compiler Infrastructure) and never got ported to .NET 4.0's expressions. While Nuqleon doesn't use this implementation of CPS transform directly (anymore), we're keeping it around for other uses (e.g. execution plans in some internal stores). A more complete CPS transformation framework could be built, especially in conjunction with the work on modernized expression trees with support for async lambdas and `await` expressions.
//
// CPS transforms involve rewriting method invocations such as `Add(a, b)` to `Add(a, b, ret => ...)` where the result is provided through a callback rather than a returned result. This then enables the use of asynchronous functions.
//
// > **Note:** This functionality has been used for execution plans in stores that have asynchronous callback-based `Get`, `Read`, `Enumerate`, etc. operations, but the user's code uses synchronous operations instead. By enabling rewrites to synchronous stub methods (e.g. `T Get<T>(string key)`) and annotating these stubs with a `UseAsyncMethod` attribute, the CPS transformation framework performs a set of tree rewrites that replace the stub method calls with their async counterparts (e.g. `void Get<T>(string key, Action<T>)`).
// To illustrate this facility, let's first build the most trivial CPS transformation demo with a static `Add` method.
// + dotnet_interactive={"language": "csharp"}
static class Calculator
{
[UseAsyncMethod]
public static int Add(int x, int y) => throw new NotImplementedException();
public static void Add(int x, int y, Action<int> ret) => ret(x + y);
}
// -
// Given this definition, a user can write an expression of the form `Add(1, Add(2, 3))` but not that the method does not have an implementation. Instead, we want to translate the user's code to a corresponding asynchronous implementation, which is indicated by the `[UseAsyncMethod]` annotation.
// + dotnet_interactive={"language": "csharp"}
Expression<Func<int>> calcExpr = () => Calculator.Add(1, Calculator.Add(2, 3));
Console.WriteLine(calcExpr);
// -
// If the underlying evaluation infrastructure happens to be asynchronous, we can use the CPS rewriter to retarget the call to `int Add(int, int)` to `void Add(int, int, Action<int>)` which is the calback-based equivalent operation. In the code below, we perform this rewrite and specify the continuation we want to invoke upon obtaining the result of evaluating the expression asynchronously.
// + dotnet_interactive={"language": "csharp"}
var rewriter = new ClassicCpsRewriter();
Expression<Action<int>> continuation = ret => Console.WriteLine(ret);
Expression asyncExpr = rewriter.Rewrite(calcExpr, continuation);
Console.WriteLine(asyncExpr);
// -
// We can also evaluate the resulting expression.
// + dotnet_interactive={"language": "csharp"}
Expression.Lambda<Action>(asyncExpr).Compile()();
// -
// A more realistic example of using the CPS transformation is to map an execution plan onto asynchronous implementations of primitive operations, e.g. in the context of a database product. Note that the CPS utilities in this library predate the introduction of `async` and `await`, but it is possible to combine these facilities with `Task<T>` based APIs. An example is shown below.
// + dotnet_interactive={"language": "csharp"}
static class AsyncCalculator
{
[UseAsyncMethod]
public static int Add(int x, int y) => throw new NotImplementedException();
public static async void Add(int x, int y, Action<int> ret)
{
await Task.Delay(1000);
ret(x + y);
}
}
// -
// Once more, we create a sample expression.
// + dotnet_interactive={"language": "csharp"}
Expression<Func<int>> calcExpr = () => AsyncCalculator.Add(1, AsyncCalculator.Add(2, 3));
Console.WriteLine(calcExpr);
// -
// In order to make the entire CPS-transformed expression return a `Task<int>`, we need to make the final continuation use a `TaskCompletionSource<int>`. To do so, we can build an expression tree that's very similar to the machinery of `async` methods in C# 5.0 and beyond. First, we'll craft some variables for `TaskCompletionSource<int>` and an expression for the `Action<int>` continuation.
// + dotnet_interactive={"language": "csharp"}
var tcs = Expression.Parameter(typeof(TaskCompletionSource<int>), "tcs");
var result = Expression.Parameter(typeof(int), "res");
var setResult = Expression.Call(tcs, typeof(TaskCompletionSource<int>).GetMethod(nameof(TaskCompletionSource<int>.SetResult), new[] { typeof(int) }), result);
var continuation = Expression.Lambda<Action<int>>(setResult, result);
Console.WriteLine(continuation);
// -
// With a continuation expression available, we can now perform the CPS transformation on `calcExpr`.
// + dotnet_interactive={"language": "csharp"}
var rewriter = new ClassicCpsRewriter();
Expression asyncExpr = rewriter.Rewrite(calcExpr, continuation);
Console.WriteLine(asyncExpr);
// -
// Finally, we wrap the whole fanfare into a async ramp function that instantiates the `TaskCompletionSource<int>`, stores it in a local, kicks off the async operation, and finally returns the `Task`.
// + dotnet_interactive={"language": "csharp"}
var task = Expression.Property(tcs, typeof(TaskCompletionSource<int>).GetProperty(nameof(TaskCompletionSource<int>.Task)));
var newTcs = Expression.New(typeof(TaskCompletionSource<int>).GetConstructor(Type.EmptyTypes));
var calcAsync =
Expression.Lambda<Func<Task<int>>>(
Expression.Block(
new[] { tcs },
Expression.Assign(tcs, newTcs),
asyncExpr,
task
)
);
Console.WriteLine(calcAsync);
// -
// At long last, we can evaluate the expression and wait for the task's completion (here using `.Result` for simplicity in the notebook).
// + dotnet_interactive={"language": "csharp"}
Task<int> t = calcAsync.Evaluate<Task<int>>();
Console.WriteLine(t.Result); // This should take 2 seconds to complete due to the Task.Delay(1000) inside Add, which gets invoked twice.
// -
// To support propagation of errors, the `ClassicCpsRewriterWithErrorPropagation` rewriter can be used instead. Rather than rewriting synchronous methods to methods taking in a single callback of type `Action<T>`, an additional callback of type `Action<Exception>` is passed for the error continuation. We can illustrate this using a `Div` function that can throw `DivideByZeroException`.
// + dotnet_interactive={"language": "csharp"}
static class AsyncFancyCalculator
{
[UseAsyncMethod]
public static int Div(int x, int y) => throw new NotImplementedException();
public static async void Div(int x, int y, Action<int> ret, Action<Exception> error)
{
await Task.Delay(1000);
if (y == 0)
{
error(new DivideByZeroException());
}
else
{
ret(x / y);
}
}
}
// -
// We'll repeat all of the steps carried out earlier, but this time using a `ClassicCpsRewriterWithErrorPropagation` instead.
// + dotnet_interactive={"language": "csharp"}
static Expression<Func<Task<int>>> MakeAsync(Expression<Func<int>> expr)
{
// TaskCompletionSource<int> tcs
var tcs = Expression.Parameter(typeof(TaskCompletionSource<int>), "tcs");
// int res
var result = Expression.Parameter(typeof(int), "res");
// Exception ex
var exception = Expression.Parameter(typeof(Exception), "ex");
// tcs.SetResult(res)
var setResult = Expression.Call(tcs, typeof(TaskCompletionSource<int>).GetMethod(nameof(TaskCompletionSource<int>.SetResult), new[] { typeof(int) }), result);
// tcs.SetException(ex)
var setException = Expression.Call(tcs, typeof(TaskCompletionSource<int>).GetMethod(nameof(TaskCompletionSource<int>.SetException), new[] { typeof(Exception) }), exception);
// (int res) => tcs.SetResult(res)
var onSuccess = Expression.Lambda<Action<int>>(setResult, result);
// (Exception ex) => tcs.SetException(ex)
var onError = Expression.Lambda<Action<Exception>>(setException, exception);
// Op(arg_1, ..., arg_n, (int res) => tcs.SetResult(res), (Exception ex) => tcs.SetException(ex))
var asyncExpr = new ClassicCpsRewriterWithErrorPropagation().Rewrite(expr, onSuccess, onError);
// new TaskCompletionSource<int>()
var newTcs = Expression.New(typeof(TaskCompletionSource<int>).GetConstructor(Type.EmptyTypes));
// tcs.Task
var task = Expression.Property(tcs, typeof(TaskCompletionSource<int>).GetProperty(nameof(TaskCompletionSource<int>.Task)));
// new Func<Task<int>>(() =>
// {
// var tcs = new TaskCompletionSource<int>();
// Op(arg_1, ..., arg_n, (int res) => tcs.SetResult(res), (Exception ex) => tcs.SetException(ex))
// return tcs.Task;
// })
var calcAsync =
Expression.Lambda<Func<Task<int>>>(
Expression.Block(
new[] { tcs },
Expression.Assign(tcs, newTcs),
asyncExpr,
task
)
);
return calcAsync;
}
// -
// Let's now run this for two different expressions and observe the result or error getting propagated through the `Task<int>` object.
// + dotnet_interactive={"language": "csharp"}
Task<int> resSuccess = MakeAsync(() => AsyncFancyCalculator.Div(3, 2)).Compile()();
Task<int> resError = MakeAsync(() => AsyncFancyCalculator.Div(1, 0)).Compile()();
Console.WriteLine(await resSuccess);
Console.WriteLine(await resError); // Will throw!
// -
// ## Expression tree evaluation
//
// Evaluation of expression trees can be done using `Compile` methods on `LambdaExpression` and `Expression<TDelegate>` as shown below:
// + dotnet_interactive={"language": "csharp"}
Expression<Func<int>> f = () => 42;
Func<int> d = f.Compile();
int x = d();
Console.WriteLine(x);
// -
// By default, `Compile` will use `System.Reflection.Emit` to generate IL code at runtime. There's also support for using an interpreter using a Boolean `preferInterpretation` flag, which can be faster if the expression is simple. Furthermore, it can reduce costs (due to IL code generation and JIT compilation) if the expression is only to be evaluated once. If the expression is evaluated often or is rather complex, compiled expressions tend to perform better.
// + dotnet_interactive={"language": "csharp"}
Func<int> d = f.Compile(preferInterpretation: true);
int x = d();
Console.WriteLine(x);
// -
// All of the above are part of the `System.Linq.Expressions` library in the BCL. This library adds some functionality on top of this.
// ### The `Evaluate` and `Funcletize` extension methods
//
// First of all, this library provides a number of methods that make it easier to perform evaluation of any `Expression` without having to manually construct a `LambdaExpression` or `Expression<TDelegate>` around it. This wrapping step is simplified using `Funcletize`, as shown below:
// + dotnet_interactive={"language": "csharp"}
Expression expr = Expression.Constant(42);
Expression<Func<int>> eval = expr.Funcletize<int>();
Console.WriteLine(eval);
// -
// This can then be used to call `Compile()` in order to perform evaluation. However, if the end goal is to just do that, we can directly call `Evaluate` instead, as shown below:
// + dotnet_interactive={"language": "csharp"}
int answer = expr.Evaluate<int>();
Console.WriteLine(answer);
// -
// Where things become more interesting is in the additional `ICompiledDelegateCache` that can be passed to `Evaluate` overloads.
// ### Using compiled delegate caches with `ICompiledDelegateCache`
//
// Compiled delegate caches use expression tree equality comparison to avoid reocmpiling expression trees that have been compiled previously. We demonstrated this technique earlier in this notebook by building a `Dictionary<LambdaExpression, Delegate>`, when discussing constant hoisting. The built-in support for `ICompiledDelegateCache` makes this task easier, while also providing for caching policies. To illustrate the effect of compiled delegate caching, we'll first define a little benchmark utility.
// + dotnet_interactive={"language": "csharp"}
using System.Time;
class MemoryClock : IClock
{
public long Now => GC.GetAllocatedBytesForCurrentThread();
}
IStopwatch swMem = StopwatchFactory.FromClock(new MemoryClock()).Create();
IStopwatch swTime = StopwatchFactory.Diagnostics.Create();
void Benchmark(string title, Action test, int n)
{
swMem.Restart();
swTime.Restart();
for (int i = 0; i < n; i++)
{
test();
}
swTime.Stop();
swMem.Stop();
Console.WriteLine($"{title} completed in {swTime.ElapsedMilliseconds} ms and allocated {swMem.ElapsedTicks} bytes.");
}
// -
// Next, we'll craft a little expression tree to test compilation with. Note we won't evaluate this expression; instead, we'll use `ICompiledDelegateCache` as a parameter to a `Compile` extension method defined in this library. Our goal is to compare the cost of always calling `Compile()` versus calling `Compile(ICompiledDelegateCache)`.
// + dotnet_interactive={"language": "csharp"}
Expression<Func<string, int>> expr = s => s.ToUpper().Substring(1, 2).ToLower().Length - 1;
// -
// To test compiled delegate caches, we'll start off with a `SimpleCompiledDelegateCache` which has an unbounded cache size. Alternatively we could use the `LeastRecentlyUsedCompiledDelegateCache` which has an LRU cache eviction policy.
// + dotnet_interactive={"language": "csharp"}
var cache = new SimpleCompiledDelegateCache();
Benchmark("Without caching", () => expr.Compile(), 10_000);
Benchmark("With caching", () => expr.Compile(cache), 10_000);
// -
// The time taken to compile an expression that has been compiled before is significantly shorter because we're avoiding going into `System.Reflection.Emit` and (down the line) triggering JIT compilation. In the code below, we'll also invoke the compiled delegate.
// + dotnet_interactive={"language": "csharp"}
var cache = new SimpleCompiledDelegateCache();
Benchmark("Without caching", () => expr.Compile()("foobar"), 10_000);
Benchmark("With caching", () => expr.Compile(cache)("foobar"), 10_000);
// -
// The difference is even more pronounced when we try to compile expressions concurrently, which mimics exactly what happens during recovery of query evaluators in Reaqtor. Due to locks inside the CLR (around the code generation area), we end up with a slow down due to lock contention. It should also be noted that the total memory cost reported here is far lower than the real cost involved due to much of the memory allocations happening on the native side of the CLR. Our `MemoryClock` only accounts for managed memory allocations.
// + dotnet_interactive={"language": "csharp"}
using System.Threading;
void BenchmarkConcurrent(string title, Action compileAndEval, int n)
{
long totalMs = 0L;
long totalBytes = 0L;
var threads =
Enumerable.Range(0, Environment.ProcessorCount).Select(_ =>
new Thread(() =>
{
IStopwatch swMem = StopwatchFactory.FromClock(new MemoryClock()).StartNew();
IStopwatch swTime = StopwatchFactory.Diagnostics.StartNew();
for (int i = 0; i < n; i++)
{
compileAndEval();
}
Interlocked.Add(ref totalMs, swTime.ElapsedMilliseconds);
Interlocked.Add(ref totalBytes, swMem.ElapsedTicks);
})
).ToArray();
foreach (var thread in threads)
{
thread.Start();
}
foreach (var thread in threads)
{
thread.Join();
}
Console.WriteLine($"{title} completed in {totalMs} ms and allocated {totalBytes} bytes (using {Environment.ProcessorCount} threads).");
}
var cache = new SimpleCompiledDelegateCache();
BenchmarkConcurrent("Without caching", () => expr.Compile()("foobar"), 10_000);
BenchmarkConcurrent("With caching", () => expr.Compile(cache)("foobar"), 10_000);
// -
// Note that `SimpleCompiledDelegateCache` is thread-safe, so it's okay to have concurrent compilations all trying to access the cache.
// Compilation using compiled delegate caching has a few more knobs that can be turned using overloads of `Compile`. The most flexible overload has the following signature:
//
// ```csharp
// public static T Compile<T>(this Expression<T> expression, ICompiledDelegateCache cache, bool outliningEnabled, IConstantHoister hoister);
// ```
//
// Outlining (as the opposite of inlining) can be used when expressions have nested lambdas. For example, `xs => xs.Where(x => x > 0).Select(x => x * 2)` has inner lambdas `x => x > 0` and `x => x * 2`. By using outlining, we'll compile this inner lambda using caching as well, allowing for reuse if an identical lambda occurs elsewhere. For example, if another expression has `ys => ys.Select(y => y + 1).Where(y => y > 4)`, we can reuse a cached compiled lambda of the form `t => t > c` where `c` is a hoisted constant. Both `x => x > 0` and `y => y > 4` match this pattern, for different constants.
//
// A custom hoister can also be specified, for example to add exclusion rules (e.g. for the format string in `string.Format(string, object[])`) as described earlier in this notebook.
//
// We'll finish our discussion of compiled delegate caching by illustrating the use of outlining (which is enabled by default when using simpler overloads of `Compile`).
// + dotnet_interactive={"language": "csharp"}
Expression<Func<IEnumerable<int>, IEnumerable<int>>> queryExpr1 = xs => xs.Where(x => x > 0).Take(1);
Expression<Func<IEnumerable<int>, IEnumerable<int>>> queryExpr2 = ys => ys.Take(2).Where(y => y > 3);
// -
// Both expressions have a nested lambda of the form `t => t > c` for some constant value `c`. When outlining is enabled, we'll end up hoisting these inner lambdas out of the original expression, and will compile (and cache) them separately. We can visualize this by making use of events declared on `LeastRecentlyUsedCompiledDelegateCache` which can be used to inspect additions, hits, and removals of entries in the cache.
// + dotnet_interactive={"language": "csharp"}
var cache = new LeastRecentlyUsedCompiledDelegateCache(capacity: 16);
cache.Added += (object o, CacheEventArgs args) =>
{
Console.WriteLine($"Added {args.Lambda} to cache. Delegate = {args.Delegate.GetHashCode()}");
};
cache.Hit += (object o, CacheEventArgs args) =>
{
Console.WriteLine($"Retrieved {args.Lambda} from cache. Delegate = {args.Delegate.GetHashCode()}");
};
// -
// Now we can try to compile the first query expression using the cache, with outlining enabled (which is the default behavior for the simpler `Compile` overload used below).
// + dotnet_interactive={"language": "csharp"}
queryExpr1.Compile(cache);
// -
// Note that two delegates were added to the cache. One for the inner lambda and one for the outer lambda. If we try to compile the same query expression again, we should see two hits.
// + dotnet_interactive={"language": "csharp"}
queryExpr1.Compile(cache);
// -
// Let's finally try to compile the second expression, where the inner lambda is identical modulo parameter names and constant values. We should see a hit for this lambda, but not for the outer lambda which has a different structure than the first expression.
// + dotnet_interactive={"language": "csharp"}
queryExpr2.Compile(cache);
// -
// ## Expression tree optimization
//
// This library provides a few expression tree optimizations. More optimizations are available in `Nuqleon.Linq.Expressions.Optimizers`.
// ### Delegate invocation inlining
//
// The `DelegateInvocationInliner` provides a very narrow optimization that looks for `InvocationExpression` nodes whose target is a `ConstantExpression` containing a delegate-typed expression. If the delegate has an invocation list with a single invocation target (i.e. it's not a multicast delegate with multiple invocation targets attached to it), the tree is rewritten to a `MethodCallExpression` for the method targeted by the delegate.
//
// This optimization can be useful after carrying out binding steps where the binding targets for functions are `ConstantExpression`s containing delegates pointing at a function's implementation.
// As an example, consider an unbound expression `abs(1)`, as crafted below.
// + dotnet_interactive={"language": "csharp"}
var abs = Expression.Parameter(typeof(Func<int, int>), "abs");
var expr = Expression.Invoke(abs, Expression.Constant(1));
Console.WriteLine(expr.ToCSharpString());
// -
// Note we're using `ToCSharpString` which prints the expression tree in a C#-like syntax. We'll discuss this later in the notebook.
//
// Given this unbound expression, we can perform binding steps as we've illustrated before in the context of beta reduction. First, we'll find all of the unbound variables using the `FreeVariableScanner`.
// + dotnet_interactive={"language": "csharp"}
var variables = FreeVariableScanner.Scan(expr).ToArray();
foreach (var variable in variables)
{
Console.WriteLine($"{variable.Name} : {variable.Type.ToCSharpString()}");
}
// -
// Note we're using `ToCSharpString` on an instance of type `Type` to print the type using C#-like syntax. We'll discuss this later in the notebook.
//
// Just like we've done before while discussing beta reduction, we'll go ahead and perform lambda lifting to declare all of the unbound variables in a surrounding lambda expression.
// + dotnet_interactive={"language": "csharp"}
var lifted = Expression.Lambda(expr, variables);
Console.WriteLine(lifted.ToCSharpString());
// -
// As the next step to achieve expression binding, we'll introduce a registry of expressions to bind. This is similar to what we've done for beta reduction, but we'll make a sight change this time around.
// + dotnet_interactive={"language": "csharp"}
var registry = new Dictionary<string, Expression>
{
{ "abs", Expression.Constant(new Func<int, int>(Math.Abs)) }
};
// -
// The difference lies in the expression used to bind `abs`. Rather than using an `Expression<Func<int, int>>` of the form `x => Math.Abs(x)`, we're using a `ConstantExpression` that holds a delegate to `Math.Abs`.
//
// Given this registry, we can bind the functions and construct an `InvocationExpression` around the `LambdaExpression` we built earlier.
// + dotnet_interactive={"language": "csharp"}
var bindings = variables.Select(variable => registry[variable.Name]).ToArray();
var bound = Expression.Invoke(lifted, bindings);
Console.WriteLine(bound.ToCSharpString());
// -
// Note the presence of `__c0` in the textual output, which represents an opaque `ConstantExpression` which could not be printed in C# syntax. This is our delegate to `Math.Abs`. Using `ToCSharp` rather than `ToCSharpExpression`, we can inspect this as well.
// + dotnet_interactive={"language": "csharp"}
CSharpExpression cs = bound.ToCSharp();
Console.WriteLine($"Global variables: {{ {string.Join(", ", cs.GlobalVariables)} }}");
Console.WriteLine($"Code: {cs.Code}");
Console.WriteLine($"Constants: {string.Join(", ", cs.Constants.Select(kv => $"{kv.Key} : {kv.Value.Value.ToString()}"))}");
// -
// To simplify the bound expression, we can apply beta reduction.
// + dotnet_interactive={"language": "csharp"}
var simplified = BetaReducer.Reduce(bound);
Console.WriteLine(simplified.ToCSharpString());
// -
// The result is an `InvocationExpression` that will invoke the delegate stored in the `ConstantExpression`. This does have some overhead (due to extracting the constant and invoking the delegate) which can be reduced by using `DelegateInvocationInliner` to unpack the delegate and call the underlying method directly using a `MethodCallExpression`.
// + dotnet_interactive={"language": "csharp"}
var result = DelegateInvocationInliner.Apply(simplified, inlineNonPublicMethods: false);
Console.WriteLine(result.ToCSharpString());
// -
// ### Expression tree interning
//
// Because expression trees are immutable, they can be shared safely. For example, if two expressions have a common subexpression, the same object can be used to represent that common subexpression:
//
// ```csharp
// f() + g() + h()
// ```
//
// and
//
// ```csharp
// f() + g() + i()
// ```
//
// can reuse the common subexpression `f() + g()`. However, it's not always possible to figure out that two or more expressions have commonalities that present opportunities for sharing. For example, when expression visitors are used to manipulate expression trees, commonalities may not occur until rewrites have completed.
//
// As an example, let's build a few simpler expressions that have a opportunities for sharing:
//
// + dotnet_interactive={"language": "csharp"}
var expr1 = Expression.Add(Expression.Multiply(Expression.Constant(1), Expression.Constant(2)), Expression.Constant(3));
var expr2 = Expression.Add(Expression.Multiply(Expression.Constant(1), Expression.Constant(2)), Expression.Constant(3));
var expr3 = Expression.Add(Expression.Multiply(Expression.Constant(1), Expression.Constant(2)), Expression.Constant(4));
var expr4 = Expression.Subtract(Expression.Multiply(Expression.Constant(1), Expression.Constant(2)), Expression.Constant(3));
Console.WriteLine(expr1);
Console.WriteLine(expr2);
Console.WriteLine(expr3);
Console.WriteLine(expr4);
// -
// Expression tree interning allows for the detection of common subexpressions across a "forest" of trees, and rewrite expressions to allow for reuse of common subexpressions, thus reducing the memory utilitization. This is very similar to `string.Intern`, although the latter only operates on complete strings, while expression tree interning can consider every subexpression.
//
// > **Note:** Interning is quite expensive due to the computations involved to figure out commonalities between expressions. It's only recommended to use interning if expressions are long-lived. In the context of Nuqleon, this is most useful when expression trees are kept alive in registries or on quotations that are needed to support higher-order query operators (e.g. in `xs.SelectMany(x => f(x))`, the expression `x => f(x)` is kept - even after compilation to a delegate - because inner subscriptions need to be able to construct an expression, which will get checkpointed, that represents the observable `f(x)` for a given value of `x`).
// Let's try applying interning to the expressions shown above.
// + dotnet_interactive={"language": "csharp"}
var cache = new ExpressionInterningCache();
var expr1_i = expr1.Intern(cache);
var expr2_i = expr2.Intern(cache);
var expr3_i = expr3.Intern(cache);
var expr4_i = expr4.Intern(cache);
Console.WriteLine(expr1_i);
Console.WriteLine(expr2_i);
Console.WriteLine(expr3_i);
Console.WriteLine(expr4_i);
// -
// We can start by spot-checking a few equalities to check whether interning had an effect. For example, `expr1_i` and `expr2_i` should be identical, even though `expr1` and `expr2` were not.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine($"(expr1 == expr2 ) = {expr1 == expr2 }");
Console.WriteLine($"(expr1_i == expr2_i) = {expr1_i == expr2_i}");
// -
// In order to investigate interior nodes for reference equality, we can build a simple visitor that will print all nodes and their hash code.
// + dotnet_interactive={"language": "csharp"}
class PrintHashCodeVisitor : ExpressionVisitor
{
public override Expression Visit(Expression node)
{
if (node != null)
{
Console.WriteLine($"{node} [{node.GetHashCode()}]");
}
return base.Visit(node);
}
}
// -
// We can first run this visitor on the original expressions to see that all nodes are unique.
// + dotnet_interactive={"language": "csharp"}
var printer = new PrintHashCodeVisitor();
printer.Visit(expr1);
printer.Visit(expr2);
printer.Visit(expr3);
printer.Visit(expr4);
// -
// When we run the visitor over the interned nodes, we can see the effect of deduplicating nodes that are equivalent.
// + dotnet_interactive={"language": "csharp"}
printer.Visit(expr1_i);
printer.Visit(expr2_i);
printer.Visit(expr3_i);
printer.Visit(expr4_i);
// -
// Expression tree interning only considers semantic equivalence of expressions. For example, interning `x => x + 1` and `y => y + 1` can result in variable renaming because both lambda expressions are equivalent.
//
// > **Note**: If this is not desirable, one can specify a custom `ExpressionEqualityComparator` that overrides the behavior for equality of `ParameterExpression` nodes to include the `Name` property.
// + dotnet_interactive={"language": "csharp"}
Expression<Func<int, int>> f1 = x => x + 1;
Expression<Func<int, int>> f2 = y => y + 1;
var f1_i = f1.Intern();
var f2_i = f2.Intern();
Console.WriteLine(f1_i);
Console.WriteLine(f2_i);
Console.WriteLine(f1_i == f2_i);
// -
// ## Diagnostics
//
// To assist with logging or debugging of expression trees, this library provides a `ToCSharpString` extension method for `Expression` which produces C#-like syntax to represent the tree. Not all constructs in expression trees can be (correctly) represented in C# (e.g. `LoopExpression` having a result), so the resulting syntax just looks and feels like C#.
//
// > **Note:** Better expression tree printing support is available in https://github.com/bartdesmet/ExpressionFutures/tree/master/CSharpExpressions.
// + dotnet_interactive={"language": "csharp"}
var expr = Expression.Add(Expression.Constant(1), Expression.Parameter(typeof(int), "x"));
Console.WriteLine(expr.ToCSharpString());
// -
// An additional Boolean parameter called `allowCompilerGeneratedNames` can be passed to `ToCSharpString` to control whether the resulting string can contain compiler-generated names (otherwise, an exception will be thrown when encountering such a name). To illustrate this, let's have th C# compiler craft a closure type (also known as a display class).
// + dotnet_interactive={"language": "csharp"}
static class C
{
public static Expression Expression
{
get
{
int x = 42;
Expression<Func<int>> f = () => x;
return f.Body;
}
}
}
// -
// This expression will be of the form `Member(Constant(closure), FieldInfo("x"))` in pseudo-code. We can now construct an expression tree whose C# syntax requires to reveal the type, e.g. a `default(T)` expression.
// + dotnet_interactive={"language": "csharp"}
var closureType = ((MemberExpression)C.Expression).Expression.Type;
var expr = Expression.Default(closureType);
Console.WriteLine(expr.ToCSharpString(allowCompilerGeneratedNames: true));
// -
// Clearly this is not valid C# syntax. In fact, the whole purpose of the `<>__` naming prefix is to ensure that compiler-generated type names cannot conflict with identifiers introduced by user code, and to prevent user code from directly accessing these types. If we disable `allowCompilerGeneratedNames`, an exception is thrown instead.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(expr.ToCSharpString(allowCompilerGeneratedNames: false));
// -
// Alternatively, the `ToCSharp` method can be used which returns a `CSharpExpression`, providing additional information about the tree, alongside a string representation. This additional information includes a table of `Constants` as well as a list of `GlobalParameters`. An example is shown below, where we have an opaque constant that doesn't have a C# representation, and an unbound variable.
// + dotnet_interactive={"language": "csharp"}
var expr = Expression.Add(Expression.Constant(DateTime.Now), Expression.Parameter(typeof(TimeSpan), "t"));
CSharpExpression csharpExpr = expr.ToCSharp();
Console.WriteLine(csharpExpr.Code);
Console.WriteLine("Globals:");
foreach (var global in csharpExpr.GlobalVariables)
{
Console.WriteLine(" " + global);
}
Console.WriteLine("Constants:");
foreach (var constant in csharpExpr.Constants)
{
Console.WriteLine(" " + constant);
}
// -
// ## BURS
//
// BURS stands for Bottom-Up Rewrite System and is based on the https://www.researchgate.net/publication/220752446_Simple_and_Efficient_BURS_Table_Generation paper. It provides for a table-driven weight-based approach to generate code. The tables contain rules which consist of patterns to recognize, a cost, and a rewrite rule.
//
// This library generalizes the BURS mechanism to conversions between a generalized notion of trees, represented as `ITree<T>`. In this context, BURS is a table-driven way to build a compiler that translates between an `ITree<T>` to an `ITree<R>`. The special case of rewriting from an `ITree<T>` to an `ITree<T>` is a rule-driven optimizer.
// ### Building a rewriter
//
// As an example, consider a first `ITree<T>` where `T` represents node information for a tree of some arithmetic language providing unary operators `+` and `-`, as well as binary operators `+`, `-`, `*`, and `%`. Building the whole `ITree<T>` implementation in the notebook gets a bit unweildy, so we'll just import a `SampleTrees` assembly that can be found in this repo, and is also used for testing.
// + dotnet_interactive={"language": "csharp"}
#r "..\SampleTrees\bin\Debug\net5.0\SampleTrees.dll"
// -
// We can now explore these tree types and try to craft an expression in this arithmetic expression language.
// + dotnet_interactive={"language": "csharp"}
using SampleTrees.Arithmetic;
var arithExpr = new Add(
new Mul(
new Add(
new Mul(
new Const(2),
new Const(3)
),
new Const(1)
),
new Mul(
new Const(4),
new Const(5)
)
),
new Const(6)
);
Console.WriteLine(arithExpr);
// -
// Next, let's have a look at a second expression language, which we call numerical (for lack of a better name). It's similar to the arithmetic language, but has some different node types.
// + dotnet_interactive={"language": "csharp"}
using SampleTrees.Numerical;
var numExpr = new TimesPlus(
new Inc(
new Times(
new Val(2),
new Val(3)
)
),
new Times(
new Val(4),
new Val(5)
),
new Val(6)
);
Console.WriteLine(numExpr);
// -
// Note how this second language has nodes like `Inc` and `TimesPlus` which are unary and ternary specialized operators. While this sample is quite academic in nature, it has resemblances to instructions in specialized hardware or highly optimized libraries for BLAS operations. Also note that the two expressions we built in the `Arithmetic` and `Numerical` languages are equivalent. Both compute the same result.
//
// With BURS, we can write a rule-driven translator that takes an expression from the `Arithmetic` language and translates it to the `Numerical` language. Each rule has weights associates with it, and the rewriter tries to find a rewrite with the lowest weight. For example, we'd like `Add(Mul(2, 3), 1)` to use the specialized `Inc` rather than `Plus` with an operand of value `1`. Similarly, we'd like an `Add(Mul(a, b), c)` where `c` is different from `1` to leverage the specialized `TimesPlus` operator.
//
// In the cell below, we'll go ahead and construct a BURS rewriter.
// + dotnet_interactive={"language": "csharp"}
using System.IO;
using System.Linq.CompilerServices;
var logger = new StringWriter();
var burw = new BottomUpRewriter<ArithExpr, ArithNodeType, NumExpr, ArithWildcardFactory>
{
// Leaf nodes
Leaves =
{
{ (Const c) => new Val(c.Value), 1 },
},
// Tree patterns
Rules =
{
{ (l, r) => new Add(l, r), (l, r) => new Plus(l, r), 2 },
{ (l, r) => new Mul(l, r), (l, r) => new Times(l, r), 3 },
{ (a, b, c) => new Add(new Mul(a, b), c), (a, b, c) => new TimesPlus(a, b, c), 4 },
{ x => new Add(x, new Const(1)), x => new Inc(x), 1 },
},
Log = logger
};
// -
// The most interesting bit is the rules table which maps patterns on the left in the source language to productions on the right in the target language, with an associated weight. The leaf nodes collection tells the rewriter how to take nullary leaf nodes from the source to the target language. We also hooked up a logger to see the rewriter in action.
//
// First, we can go ahead an inspect the rewriter's internal tables using a `DebugView` property.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(burw.DebugView);
// -
// This may look quite intimidating but it encodes the different rules in a bottom-up fashion. As a source tree is given to the rewriter, it starts by matching patterns in the rule tables starting from the leaf nodes. As it works it way up the tree, it tries to match bigger patterns and calculates the total weight of all candidate matches. When the tree is fully covered, it tries to minimize the cost by selecting the match with the lowest weight and then applies the rewrite rules. A concrete example will make this clear, by running our `arithExpr` through the rewriter.
// + dotnet_interactive={"language": "csharp"}
var numExpr = burw.Rewrite(arithExpr);
Console.WriteLine(numExpr);
// -
// Note we did get a tree that uses `TimesPlus` and `Inc` rather than less optimal patterns. Remember that we hooked up a logger to the rewriter, which we can now use to see the steps taken by the rewriter.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(logger.ToString());
// -
// Describing the full inner workings of BURS is outside the scope of this notebook, but suffice it to say that the labeled tree shown above shows the different rules that match each of the nodes in the tree, with an associated cost (denoted by the `$` sign representing the total cost of the rewrite). For example, the top-level node has annotations:
//
// ```
// 0*, 1! (17$), 3! (16$)
// ```
//
// The `*` refers to a wildcard, meaning this node could be matched in a bigger parent tree. The `1!` and `3!` are final productions that correspond to using `Plus` or `TimesPlus`, as can be seen from the `DebugView` we dumped earlier. The associated costs are `17$` and `16$`, which means that the second `TimesPlus` option is cheaper, and thus the rewriter follwed that rule to rewrite the expression.
// ### Building an optimizer
//
// BURS can also be used to write rule-driven optimizers, which are merely rewriters where the input and output languages are the same. Unlike rewriters between different languages, the rule table of an optimizer does not have to cover all possible input patterns. Instead, an optimizer can simply express some patterns it knows to optimize. To illustrate this, we'll use yet another language, called `Logic` for Boolean-valued expressions.
// + dotnet_interactive={"language": "csharp"}
using SampleTrees.Logic;
var logicExpr = !(!BoolConst.True & !BoolConst.False);
Console.WriteLine(logicExpr);
// -
// Obviously this can be reduced to a single constant `true` or `false` value, but it illustrates a number of possible rewrite rules. For example, `!true` can become `false`, but a more complex pattern of the form `!(!a & !b)` can also be turned into `a | b` using De Morgan's law. Once more, we can construct a table-driven rewriter that captures various rewrite rules.
// + dotnet_interactive={"language": "csharp"}
var logger = new StringWriter();
var burw = new BottomUpOptimizer<LogicExpr, LogicNodeType, LogicWildcardFactory>
{
// Leaf nodes
Leaves =
{
{ (BoolConst b) => b, 1 },
},
Rules =
{
// Tree patterns
{ () => !BoolConst.True, () => BoolConst.False, 1 },
{ () => !BoolConst.False, () => BoolConst.True, 1 },
{ p => !!p, p => p, 2 },
{ p => p & BoolConst.True, p => p, 2 },
{ p => p & BoolConst.False, p => BoolConst.False, 2 },
{ p => BoolConst.True & p, p => p, 2 },
{ p => BoolConst.False & p, p => BoolConst.False, 2 },
{ p => p | BoolConst.True, p => BoolConst.True, 2 },
{ p => p | BoolConst.False, p => p, 2 },
{ p => BoolConst.True | p, p => BoolConst.True, 2 },
{ p => BoolConst.False | p, p => p, 2 },
{ (p, q) => !(!p & !q), (p, q) => p | q, 1 },
{ (p, q) => !(!p | !q), (p, q) => p & q, 1 },
},
Log = logger
};
// -
// Note we have rules for negation of constants, double negation, use of `&` and `|` where one operand is a constant, but also the De Morgan rules at the bottom. Let's feed the expression from above to the rewriter and see what happens.
// + dotnet_interactive={"language": "csharp"}
var optimizedLogicExpr = burw.Optimize(logicExpr);
Console.WriteLine(optimizedLogicExpr);
// -
// This time, we called `Optimize` rather than `Rewrite` which keeps performing rewrites until the tree no longer changes. If we were to just call `Rewrite` (left as an exercise), we may end up with a tree that's not yet fully optimized.
//
// Let's now have a look at the rewrites that took place, by inspecting the `logger`.
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(logger.ToString());
// -
// We can see that the rewriter was called more than once. First, our tree got rewritten using rules for Boolean negation at the leafs (`!true` became `false`, and vice versa), and the Morgan's law was applied at the top, turning the `Not(And(...))` into an `Or`. The second optimization pass was then faced with an `Or` with a `true` operand, which got ultimately reduced to `true`.
// ### Applying BURS to LINQ expression trees
//
// In order to use BURS with .NET expression trees, one has to first convert expression trees to the `ITree<T>` interface type that BURS operates on. The library comes with a `ToExpressionTree` extension method that can be used to achieve this.
// + dotnet_interactive={"language": "csharp"}
var expr = Expression.And(Expression.Constant(true), Expression.Constant(false));
var exprTree = expr.ToExpressionTree();
Console.WriteLine(exprTree);
// -
// Note that the `ToString` representation of an `ExpressionTree` (which is an `ITree<ExpressionTreeNode>`) is a little funny looking because it rigorously prints the node's kind followed by the children in between `(` and `)`. Leaf nodes like constants have no children and end up with a `()` at the end.
// Once we have an `ExpressionTree` which implements `ITree<T>` we can start to use BURS to perform rewrites, e.g. to translate an expression tree to some target language. BURS has been used to build table-driven query providers (e.g. going from expression trees to some `ITree<SqlNode>` for translation to SQL) and optimizers. Mileage varies depending on the complexity of the rule matching involved. Further extensions of BURS have been written in spin-off projects, modeling type system checks (e.g. how does a rule involving a method invocation `object.Equals(object)` relate to rules that involve an override of this virtual method on a more derived type?), supporting additional predicates to drive the rule selection process, and dynamic computation of weights.
// ## Miscellaneous utilities
//
// In this final section of the notebook, we'll look at some remaining utilities that are provided by the library.
// ### `ReflectionHelpers`
//
// This type provides a set of `InfoOf` methods that obtain a `MemberInfo` from an expression tree. This is a mechanism akin to `typeof` for types but targeting methods, properties, fields, and constructors instead (much like a hypothethical C# `infoof` operator could do). For example:
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine((MethodInfo)ReflectionHelpers.InfoOf((string s) => s.Substring(0, 1)));
Console.WriteLine((PropertyInfo)ReflectionHelpers.InfoOf(() => DateTime.Now));
Console.WriteLine((ConstructorInfo)ReflectionHelpers.InfoOf(() => new TimeSpan(0, 0, 0)));
// -
// ### `RuntimeCompiler`
//
// The runtime compiler uses `System.Reflection.Emit` to build anonymous types, closure types, and so-called record types. The resulting types are often used in expression trees. For example, one could erase nominal types (such as a `Person` type) for structurally identical types (e.g. a record containing a `string Name` and `int Age`), which then case be used to serialize types by their shape rather than by their name.
// #### Anonymous types
//
// Runtime-generated anonymous types are analogous to C# 3.0 and VB 9.0 anonymous types. Both flavors can be built using different overloads of `CreateAnonymousType`:
//
// ```csharp
// public static Type CreateAnonymousType(IEnumerable<KeyValuePair<string, Type>> properties);
// public static Type CreateAnonymousType(IEnumerable<StructuralFieldDeclaration> properties);
//
// public static Type CreateAnonymousType(IEnumerable<KeyValuePair<string, Type>> properties, params string[] keys);
// public static Type CreateAnonymousType(IEnumerable<StructuralFieldDeclaration> properties, params string[] keys);
// ```
//
// Overloads with `StructuralFieldDeclaration` support adding custom attributes to the generated properties. The difference between overloads that lack a `keys` parameter versus the ones that have one has to do with the properties that participate in the implementation for `Equals` and `GetHashCode`. Being able to specify particular properties as "keys" matches the design of anonymous types in Visual Basic.
//
// An example is shown below:
// + dotnet_interactive={"language": "csharp"}
var anon = RuntimeCompiler.CreateAnonymousType(new StructuralFieldDeclaration[]
{
new("Name", typeof(string)),
new("Age", typeof(int))
});
Console.WriteLine(anon);
var person1 = Activator.CreateInstance(anon, new object[] { "Bart", 21 });
Console.WriteLine(person1);
var person2 = Activator.CreateInstance(anon, new object[] { "Bart", 21 });
Console.WriteLine(person2);
Console.WriteLine($"Are equal? {person1.Equals(person2)}");
// -
// #### Closure types
//
// Closure types are simply classes that declare a bunch of fields. To create a closure type at runtime, use the `CreateClosureType` method:
//
// ```csharp
// public static Type CreateClosureType(IEnumerable<KeyValuePair<string, Type>> fields);
// ```
//
// An example is shown below:
// + dotnet_interactive={"language": "csharp"}
var closure = RuntimeCompiler.CreateClosureType(new KeyValuePair<string, Type>[]
{
new("x", typeof(int)),
new("b", typeof(bool))
});
Console.WriteLine(closure);
dynamic d = Activator.CreateInstance(closure);
d.x = 42;
d.b = true;
// -
// #### Record types
//
// The notion of record types in this library predates C# 9.0's record types by almost a decade, so their characteristics are quite different. Record types in this library are classes that are similar to anonymous types but provide control over the implementation of equality (value versus reference equality). Furthermore, they support assignment to properties. Use the `CreateRecordType` method to create them:
//
// ```csharp
// public static Type CreateRecordType(IEnumerable<KeyValuePair<string, Type>> properties, bool valueEquality);
// public static Type CreateRecordType(IEnumerable<StructuralFieldDeclaration> properties, bool valueEquality);
// ```
//
// An example is shown below:
// + dotnet_interactive={"language": "csharp"}
var record = RuntimeCompiler.CreateRecordType(new StructuralFieldDeclaration[]
{
new("Name", typeof(string)),
new("Age", typeof(int))
}, valueEquality: true);
Console.WriteLine(record);
dynamic person1 = Activator.CreateInstance(record);
person1.Name = "Bart";
person1.Age = 21;
Console.WriteLine(person1);
dynamic person2 = Activator.CreateInstance(record);
person2.Name = "Bart";
person2.Age = 21;
Console.WriteLine(person2);
Console.WriteLine($"Are equal? {person1.Equals(person2)}");
// -
// #### `Define*` method variants
//
// In addition to the `Create*` methods illustrated above, variants with the `Define*` prefix are provided as well. Rather than returning a `Type`, these accept a `TypeBuilder` to define the type on. These variants are useful when trying to build recursive types (i.e. there's a cycle between declarations and uses of types), because one can use `TypeBuilder` instances for the types of the properties on the anonymous or record type being constructed. Furthermore, one could extend these types with custom members (e.g. a closure type could have instance members associated with it that operate on the state captured in fields). Once all types have been defined, the user can then call `CreateType` on the `TypeBuilder` instances. Examples of types with cycles are:
//
// ```csharp
// // A -> A
// class A
// {
// public A Next { get; set; }
// }
//
// // B -> C
// class B
// {
// public C C { get; set; }
// }
//
// // C -> B
// class C
// {
// public B B { get; set; }
// }
// ```
//
// As an example, a structurally equivalent pair of record types for `B` and `C` could be built as follows.
// + dotnet_interactive={"language": "csharp"}
using System.Reflection.Emit;
var compiler = new RuntimeCompiler();
TypeBuilder tb = compiler.GetNewRecordTypeBuilder();
TypeBuilder tc = compiler.GetNewRecordTypeBuilder();
compiler.DefineRecordType(tb, new KeyValuePair<string, Type>[] { new("C", tc) }, valueEquality: false);
compiler.DefineRecordType(tc, new KeyValuePair<string, Type>[] { new("B", tb) }, valueEquality: false);
Type b = tb.CreateType();
Type c = tc.CreateType();
dynamic objB = Activator.CreateInstance(b);
dynamic objC = Activator.CreateInstance(c);
objB.C = objC;
objC.B = objB;
Console.WriteLine(objB.GetHashCode());
Console.WriteLine(objC.GetHashCode());
Console.WriteLine(objB.C.GetHashCode());
Console.WriteLine(objB.C.B.GetHashCode());
// -
// Note we use `valueEquality` set to `false` in the example above. This avoids implementations like `Equals` and `GetHashCode` from going in a cycle. Using methods like `ToString` on cyclic types like this one is fraught with danger. If you want to exit the notebook with some fireworks, run the following cell to see why (**at your own risk**).
// + dotnet_interactive={"language": "csharp"}
Console.WriteLine(objB.ToString());
| Nuqleon/Core/LINQ/Nuqleon.Linq.CompilerServices/GettingStarted.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
import requests
import pandas as pd
from bs4 import BeautifulSoup
import datetime
from datetime import datetime, timedelta, date
# # Part 1 : Main Data Analysis from GW2 official API for ROI
# #### Récupération du CSV contenant tous les id/name des objets de GW2.
offapi = pd.read_csv('items.csv')
# print(offapi.to_string())
# print(offapi)
offapi = offapi.astype({
'name': 'str',
})
offapi.head(5)
# #### Création d'une boucle parcourant tous les id des objets dans l'API officiel de GW2.
# +
df = pd.DataFrame()
cpt = 0
for idx in offapi['id']:
r = requests.get("https://api.guildwars2.com/v2/commerce/prices/" + str(idx))
j = r.json()
now = datetime.now()
if "text" in j:
continue
id = j['id']
bought_quantity = j['buys']['quantity']
bought_unit_price = j['buys']['unit_price']
sold_unit_price = j['sells']['unit_price']
sold_quantity = j['sells']['quantity']
profit = sold_unit_price *0.85 - bought_unit_price
# if bought_unit_price == 0:
# profit_percentage = 0
# else:
# profit_percentage = profit/bought_unit_price *100
dict = {
'id': id,
'bought_quantity': bought_quantity,
'bought_unit_price': bought_unit_price,
'sold_unit_price': sold_unit_price,
'sold_quantity': sold_quantity,
'profit' : profit,
# 'profit_percentage' : profit_percentage,
'date' : now
}
df = df.append(dict, ignore_index=True)
df = df.astype({
'id': 'int64',
'bought_quantity': 'int64',
'bought_unit_price': 'int64',
'sold_unit_price': 'int64',
'sold_quantity': 'int64',
'date' : 'str',
})
cpt += 1
print(cpt)
display(df)
# -
# #### Restructuration du dataframe et innerjoin sur l'id avec le df csv pour récupérer le nom de l'objet.
# +
df = df.merge(offapi[['id','name']], on='id')
df = df.astype({
'name': 'str',
})
colorder = ['id', 'name', 'bought_quantity', 'bought_unit_price', 'sold_quantity', 'sold_unit_price', 'profit', 'date']
df = df.reindex(columns=colorder)
display(df)
# -
# #### Sauvegarde du dataframe dans un CSV (au cas où)
df.to_csv(r'C:\Users\tadav\OneDrive\Bureau\Data&GO\PFF\items2.csv' ,index=False)
df = pd.read_csv(r'items2.csv')
# #### Push du dataframe sur postgreSQL pour visualisation sur Tableau
from datetime import datetime
from sqlalchemy import Column, Date, Integer, String, TIMESTAMP
from sqlalchemy.orm import sessionmaker
import psycopg2
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
# +
# gw2 ici est = nom de la database, part 1 = nom de la table
engine = sqlalchemy.create_engine('postgresql+psycopg2://postgres:azerty09@localhost:5432/gw2')
Base = declarative_base()
# create a configured "Session" class
Session = sessionmaker(bind=engine)
# create a Session
session = Session()
# -
# Write data into the table in PostgreSQL database
df.to_sql('part1',engine, if_exists='replace')
# # Part 2 : Trend Analysis
# #### Using non offical GW2 API from gw2spidy.com to get prices and quantities history
# +
from time import time, sleep
while True:
df2 = pd.DataFrame()
combined_results = {}
r = requests.get("http://www.gw2spidy.com/api/v0.9/json/listings/19976/sell/")
j = r.json()
number_of_pages = j['last_page']
links_name = ['sell', 'buy']
for link_name in links_name:
for page in range (1, number_of_pages + 1):
r = requests.get("http://www.gw2spidy.com/api/v0.9/json/listings/19976/" + link_name + "/"+ str(page))
j = r.json()
sell_or_buy = j['sell-or-buy']
#j['results'] = results:[{...}, {...}]
#element == {...}
for element in j['results']:
date = element['listing_datetime']
selling_price = element['unit_price']
selling_quantity = element['quantity']
datetime_str = date
datetime_object = datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S %Z')
dict = {
'sell-or-buy': sell_or_buy,
'listing_datetime' : datetime_object,
'unit_price' : selling_price,
'quantity' : selling_quantity
}
df2 = df2.append(dict, ignore_index=True)
print(df2)
engine = sqlalchemy.create_engine('postgresql+psycopg2://postgres:azerty09@localhost:5432/gw2')
Base = declarative_base()
# create a configured "Session" class
Session = sessionmaker(bind=engine)
# create a Session
session = Session()
# Write data into the table in PostgreSQL database
df2.to_sql('part2',engine, if_exists='replace')
sleep(60 - time() % 60)
# -
# # Part 3 : Model Price Prediction
# Importing modules
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
df2.head(5)
gw2model = df2[df2['listing_datetime'].notnull() & (df2['sell-or-buy'] == "sell") & (df2['unit_price']).notnull()]
print(gw2model)
print(gw2model.columns)
# +
gw2model = gw2model[['listing_datetime', 'unit_price']]
print(gw2model.to_string())
print(gw2model.columns)
# -
# Setting the index
plot_gw2 = gw2model.set_index('listing_datetime')
plot_gw2['unit_price'].plot(figsize=(16,6))
gw2model = gw2model.groupby(pd.Grouper(key="listing_datetime", freq="D")).mean().reset_index()
print(gw2model.to_string())
new_gw2 = gw2model.loc["2016-06-01":]
print(new_gw2)
| PFF - GW2 data analyses.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problem 1
# ## Exercise 1 - Introduction - Say "Hello, World!" With Python
if __name__ == '__main__':
my_string = "Hello, World!"
print(my_string)
# ## Exercise 2 - Introduction - Python If-Else
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
a = "Weird"
b = "Not Weird"
n = int(input().strip())
if (n%2 != 0):
print (a)
if (2<=n<=5) and (n%2 == 0):
print(b)
if (6<=n<=20)and (n%2 == 0):
print(a)
if (n>20)and (n%2 == 0):
print(b)
# -
# ## Exercise 3 - Introduction - Arithmetic Operators
if __name__ == '__main__':
a = int(input())
b = int(input())
s = a + b
d = a - b
m = a*b
print(s)
print(d)
print(m)
# ## Exercise 4 - Introduction - Python: Division
if __name__ == '__main__':
a = int(input())
b = int(input())
d = a//b
f = a/b
print(d)
print(f)
# ## Exercise 5 - Introduction - Loops
if __name__ == '__main__':
n = int(input())
i = 0
while i < n:
print(i**2)
i = i+1
# ## Exercise 6 - Introduction - Write a function
def is_leap(year):
leap = False
if year % 4 == 0:
if year % 100 ==0:
if year % 400 ==0:
leap = True
else:
leap = False
else:
leap = True
return leap
# ## Exercise 7 - Introduction - Print Function
if __name__ == '__main__':
n = int(input())
string = ""
for i in range(1,n+1):
string += str(i)
print(string)
# ## Exercise 8 - Basic data types - List Comprehensions
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
final_lst = []
for i in range(x+1):
for j in range(y+1):
for k in range(z+1):
lst=[i,j,k]
if (i+j+k) != n :
final_lst.append(lst)
print(final_lst)
# ## Exercise 9 - Basic data types - Find the Runner-Up Score!
if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
lst = list(arr)
lst_no_duplicates = list(dict.fromkeys(lst))
sort_lst = sorted(lst_no_duplicates)
print(sort_lst[-2])
# ## Exercise 10 - Basic data types - Nested Lists
# +
marksheet = []
scorelist = []
if __name__ == '__main__':
for i in range(int(input())):
name = input()
score = float(input())
marksheet += [[name,score]]
scorelist += [score]
b = sorted(list(set(scorelist)))[1]
for a,c in sorted(marksheet):
if c==b:
print(a)
# -
# ## Exercise 11 - Basic data types - Finding the percentage
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
average = sum(scores) / 3
student_marks[name] = average
query_name = input()
print('%.2f' % student_marks[query_name])
# ## Exercise 12 - Basic data types - Lists
if __name__ == '__main__':
n = int(input())
result = []
for line in range(n):
instruction = input().split(" ")
command = instruction[0]
if command == 'append':
result.append(int(instruction[1]))
if command == 'print':
print(result)
if command == 'insert':
result.insert(int(instruction[1]), int(instruction[2]))
if command == 'reverse':
result = result[::-1]
if command == 'pop':
result.pop()
if command == 'sort':
result = sorted(result)
if command == 'remove':
result.remove(int(instruction[1]))
# ## Exercise 13 - Basic data types - Tuples
if __name__ == '__main__':
n = int(input())
integer_list = map(int,input().split())
t = tuple(integer_list)
print(hash(t))
# ## Exercise 14 - Strings - sWAP cASE
def swap_case(s):
new = s.swapcase()
return new
# ## Exercise 15 - Strings - String Split and Join
def split_and_join(line):
line1 = line.split(" ")
line2 = "-".join(line1)
return line2
# ## Exercise 16 - Strings - What's Your Name?
def print_full_name(a,b):
print("Hello "+ a + " " + b + "! You just delved into python.")
# ## Exercise 17 - Strings - Mutations
def mutate_string(string, position, character):
new = string[:position] + character + string[position+1:]
return new
# ## Exercise 18 - Strings - Find a string
def count_substring(string, sub_string):
results = 0
sub_len = len(sub_string)
for i in range(len(string)):
if string[i:i+sub_len] == sub_string:
results += 1
return results
# ## Exercise 19 - Strings - String Validators
if __name__ == '__main__':
s = input()
print(any(char.isalnum() for char in s))
print(any(char.isalpha() for char in s))
print(any(char.isdigit() for char in s))
print(any(char.islower() for char in s))
print(any(char.isupper() for char in s))
# ## Exercise 20 - Strings - Text Alignment
# +
#Replace all ______ with rjust, ljust or center.
thickness = int(input()) #This must be an odd number
c = 'H'
#Top Cone
for i in range(thickness):
print((c*i).rjust(thickness-1)+c+(c*i).ljust(thickness-1))
#Top Pillars
for i in range(thickness+1):
print((c*thickness).center(thickness*2)+(c*thickness).center(thickness*6))
#Middle Belt
for i in range((thickness+1)//2):
print((c*thickness*5).center(thickness*6))
#Bottom Pillars
for i in range(thickness+1):
print((c*thickness).center(thickness*2)+(c*thickness).center(thickness*6))
#Bottom Cone
for i in range(thickness):
print(((c*(thickness-i-1)).rjust(thickness)+c+(c*(thickness-i-1)).ljust(thickness)).rjust(thickness*6))
# -
# ## Exercise 21 - Strings - Text Wrap
def wrap(string, max_width):
return textwrap.fill(string,max_width)
# ## Exercise 22 - Strings - Designer Door Mat
# +
# Enter your code here. Read input from STDIN. Print output to STDOUT
height, length = map(int, input().split())
for i in range(0, height // 2):
s = '.|.' * (i * 2 + 1)
print(s.center(length,'-'))
print('WELCOME'.center(length, '-'))
for i in range(height // 2 - 1, -1, -1):
s = '.|.' * (i * 2 + 1)
print(s.center(length,'-'))
# -
# ## Exercise 23 - Strings - String Formatting
def print_formatted(number):
width = len("{0:b}".format(number))
for i in range(1, number + 1):
print("{0:{width}d} {0:{width}o} {0:{width}X} {0:{width}b}".format(i,width=width))
# ## Exercise 24 - Strings - Alphabet Rangoli
def print_rangoli(size):
letters = 'abcdefghijklmnopqrstuvwxyz'[0:size]
for i in range(size-1, -size, -1):
x = abs(i)
line = letters[size:x:-1]+letters[x:size]
print ("--"*x+ '-'.join(line)+"--"*x)
# ## Exercise 25 - Strings - Capitalize!
def solve(s):
return ' '.join(i.capitalize() for i in s.split(' '))
# ## Exercise 26 - Strings - The Minion Game
def minion_game(string):
v = 'AEIOU'
stuart, kevin = 0,0
numb = len(string)
for i in range(len(string)):
if string[i] in v:
kevin += numb - i
else:
stuart += numb - i
if stuart > kevin:
print('Stuart', stuart)
elif stuart == kevin:
print('Draw')
else:
print('Kevin', kevin)
# ## Exercise 27 - Strings - Merge the Tools!
# +
from collections import OrderedDict
def merge_the_tools(string, k):
i = iter(string)
for substring in zip(*[i]*k):
print(*OrderedDict.fromkeys(substring),sep='')
# -
# ## Exercise 28 - Sets - Introduction to Sets
def average(array):
return sum(set(array)) / len(set(array))
# ## Exercise 29 - Sets - No Idea!
# +
n,m = input().split()
arr = input().split()
A = set(input().split())
B = set(input().split())
happiness = 0
for i in arr:
if i in A:
happiness += 1
if i in B:
happiness -= 1
print(happiness)
# -
# ## Exercise 30 - Sets - Symmetric Difference
# +
i, m = input(), set(map(int, input().split()))
i, n = input(), set(map(int, input().split()))
print(*sorted(m.symmetric_difference(n)), sep='\n')
# -
# ## Exercise 31 - Sets - Set .add()
# +
n = int(input())
s = set()
for i in range(n):
string = input()
s.add(string)
print(len(s))
# -
# ## Exercise 32 - Sets - Set .discard(), .remove() & .pop()
# +
n = int(input())
s = set(map(int, input().split()))
for i in range(int(input())):
string = input().split()
if string[0] == 'remove':
s.remove(int(string[1]))
elif string[0] == 'discard':
s.discard(int(string[1]))
elif string[0] == 'pop':
s.pop()
print(sum(s))
# -
# ## Exercise 33 - Sets - Set .union() Operation
# +
n = input()
set_n = set(map(int, input().split()))
b = input()
set_b = set(map(int, input().split()))
print(len(set_n.union(set_b)))
# -
# ## Exercise 34 - Sets - Set .intersection() Operation
# +
n = input()
set_n = set(map(int, input().split()))
b = input()
set_b = set(map(int, input().split()))
print(len(set_n.intersection(set_b)))
# -
# ## Exercise 35 - Sets - Set .difference() Operation
# +
n = input()
set_n = set(map(int, input().split()))
b = input()
set_b = set(map(int, input().split()))
print(len(set_n.difference(set_b)))
# -
# ## Exercise 36 - Sets - Set .symmetric_difference() Operation
# +
n = input()
set_n = set(map(int, input().split()))
b = input()
set_b = set(map(int, input().split()))
print(len(set_n.symmetric_difference(set_b)))
# -
# ## Exercise 37 - Sets - Set Mutations
# +
a = int(input())
set1 = set(map(int, input().split()))
N = int(input())
for _ in range(N):
cmd = input().split()[0]
set2 = set(map(int, input().split()))
if(cmd == "intersection_update"):
set1.intersection_update(set2)
elif(cmd == "update"):
set1.update(set2)
elif(cmd == "symmetric_difference_update"):
set1.symmetric_difference_update(set2)
elif(cmd == "difference_update"):
set1.difference_update(set2)
print(sum(set1))
# -
# ## Exercise 38 - Sets - The Captain's Room
# +
k = int(input())
numbers = list(map(int, input().split()))
numbers_set = set(numbers)
print(((sum(numbers_set)*k) - (sum(numbers))) // (k-1))
# -
# ## Exercise 39 - Sets - Check Subset
for i in range(int(input())):
_, A = input(), set(map(int, input().split()))
_, B = input(), set(map(int, input().split()))
print(A.issubset(B))
# ## Exercise 40 - Sets - Check Strict Superset
# +
A = set(input().split())
for _ in range(int(input())):
if not A.issuperset(set(input().split())):
print(False)
break
else:
print(True)
# -
# ## Exercise 41 - Collections - collections.Counter()
# +
import collections
numShoes = int(input())
shoes = collections.Counter(map(int,input().split()))
numCust = int(input())
income = 0
for i in range(numCust):
size, price = map(int,input().split())
if shoes[size]:
income += price
shoes[size] -= 1
print(income)
# -
# ## Exercise 42 - Collections - DefaultDict Tutorial
# +
from collections import defaultdict
n, m = map(int, input().split())
d = defaultdict(list)
for i in range(1, n + 1):
d[input()].append(str(i))
for i in range(m):
print(' '.join(d[input()]) or -1)
# -
# ## Exercise 43 - Collections - Collections.namedtuple()
# +
from collections import namedtuple
n = int(input())
a = input()
total = 0
Student = namedtuple('Student', a)
for _ in range(n):
student = Student(*input().split())
total += int(student.MARKS)
print('{:.2f}'.format(total/n))
# -
# ## Exercise 44 - Collections - Collections.OrderedDict()
# +
from collections import OrderedDict
n = int(input())
ordered_dictionary = OrderedDict()
for i in range(n):
litem = input().split(' ')
price = int(litem[-1])
item_name = " ".join(litem[:-1])
if ordered_dictionary.get(item_name):
ordered_dictionary[item_name] += price
else:
ordered_dictionary[item_name] = price
for i,v in ordered_dictionary.items():
print(i,v)
# -
# ## Exercise 45 - Collections - Word Order
# +
from collections import OrderedDict
ordered_dictionary = OrderedDict()
for _ in range(int(input())):
word = input()
ordered_dictionary[word] = ordered_dictionary.get(word, 0) + 1
print(len(ordered_dictionary))
print(*ordered_dictionary.values())
# -
# ## Exercise 46 - Collections - Collections.deque()
# +
from collections import deque
d = deque()
for _ in range(int(input())):
line = input().split()
if line[0] == 'append':
d.append(line[1])
elif line[0] == 'pop':
d.pop()
elif line[0] == 'popleft':
d.popleft()
elif line[0] == 'appendleft':
d.appendleft(line[1])
print(*d)
# -
# ## Exercise 47 - Collections - Company Logo
# ## Exercise 48 - Collections - Piling Up!
# ## Exercise 49 - Date time - Calendar Module
# +
import calendar
date = input().split()
month = int(date[0])
day = int(date[1])
year = int(date[2])
c = calendar.weekday(year, month, day)
if c == 0:
print("MONDAY")
elif c == 1:
print("TUESDAY")
elif c == 2:
print("WEDNESDAY")
elif c==3:
print("THURSDAY")
elif c==4:
print("FRIDAY")
elif c== 5:
print("SATURDAY")
elif c==6:
print("SUNDAY")
# -
# ## Exercise 50 - Date time - Time Delta
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
from datetime import datetime
# Complete the time_delta function below.
def time_delta(t1, t2):
pattern = '%a %d %b %Y %H:%M:%S %z'
epoch1 =int(datetime.strptime(t1,pattern).timestamp())
epoch2 =int(datetime.strptime(t2,pattern).timestamp())
diff=epoch1-epoch2
return str(abs(diff))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
t1 = input()
t2 = input()
delta = time_delta(t1, t2)
fptr.write(delta + '\n')
fptr.close()
# -
# ## Exercise 51 - Exceptions -
for i in range(int(input())):
try:
a,b = map(int,input().split())
print(a // b)
except Exception as e:
print("Error Code:",e)
# ## Exercise 52 - Built-ins - Zipped!
# +
n, x = map(int, input().split())
sheet = []
for _ in range(x):
sheet.append( map(float, input().split()) )
for i in zip(*sheet):
print( sum(i)/len(i))
# -
# ## Exercise 53 - Built-ins - Athlete Sort
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n, m = map(int, input().split())
numbers = [list(map(int, input().split())) for i in range(n)]
k = int(input())
numbers.sort(key=lambda x: x[k])
for line in numbers:
print(*line, sep=' ')
# -
# ## Exercise 54 - Built-ins - Ginorts
# +
import string
order = string.ascii_letters + '1357902468'
ordered_string = sorted(input(), key=order.index)
print(*ordered_string,sep='')
# -
# ## Exercise 55 - Map and lambda function
# +
cube = lambda x: x**3
def fibonacci(n):
result=[0,1]
[result.append(result[i-2]+result[i-1]) for i in range(2,n)]
return result[0:n]
# -
# ## Exercise 56 - Regex - Detect Floating Point Number
# +
import re
n = int(input())
for i in range(n):
print(bool(re.search(r"^[+-]?[0-9]*\.[0-9]+$",input())))
# -
# ## Exercise 57 - Regex - Re.split()
regex_pattern = r"[,.]"
# ## Exercise 58 - Regex - Group(), Groups() & Groupdict()
# +
import re
m = re.search(r"([a-z0-9])\1+", input())
print(m.group(1) if m else -1)
# -
# ## Exercise 59 - Regex - Re.findall() & Re.finditer()
# +
import re
vow = 'aeiou'
cons = 'bcdfghjklmnpqrstvwxyz'
regex = '(?<=[' + cons + '])([' + vow + ']{2,})[' + cons + ']'
match = re.findall(regex, input(), re.IGNORECASE)
if match:
print(*match, sep='\n')
else:
print('-1')
# -
# ## Exercise 60 - Regex - Re.start() & Re.end()
# ## Exercise 61 - Regex - Regex Substitution
# ## Exercise 62 - Regex - Validating Roman Numerals
regex_pattern = r"^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$"
# ## Exercise 63 - Regex - Validating phone numbers
# +
import re
n = int(input())
for _ in range(n):
if re.match(r'[789]\d{9}$',input()):
print('YES')
else:
print('NO')
# -
# ## Exercise 64 - Regex - Validating and Parsing Email Addresses
# +
import re
regex = r"^<[a-zA-Z][\w\.-]*@[a-zA-Z]+\.[a-zA-Z]{1,3}>$"
for _ in range(int(input())):
name, email = input().split()
matches = re.match(regex, email)
if matches:
print(name, email)
# -
# ## Exercise 65 - Regex - Hex Color Code
# +
import re
regex = r'[\s:](#[a-f\d]{3,6})'
for _ in range(int(input())):
line = input()
matches = re.findall(regex, line, re.IGNORECASE)
if matches:
print("\n".join(matches))
# -
# ## Exercise 66 - Regex - HTML Parser - Part 1
# ## Exercise 67 - Regex - HTML Parser - Part 2
# ## Exercise 68 - Regex - Detect HTML Tags, Attributes and Attribute Values
# ## Exercise 69 - Regex - Validating UID
# ## Exercise 70 - Regex - Validating Credit Card Numbers
# ## Exercise 71 - Regex - Validating Postal Codes
# ## Exercise 72 - Regex - Matrix Script
# ## Exercise 73 - Xml - XML 1 - Find the Score
def get_attr_number(node):
s = 0
s = s + len(node.attrib)
for child in node:
s = s + get_attr_number(child)
return s
# ## Exercise 74 - Xml - XML 2 - Find the Maximum Depth
maxdepth = 0
def depth(elem, level):
global maxdepth
maxdepth = level + 1 if (level+1)>maxdepth else maxdepth
for child in list(elem):
depth(child,level+1)
# ## Exercise 75 - Closures and decorators - Standardize Mobile Number Using Decorators
def wrapper(f):
def fun(l):
f('+91 {} {}'.format(numb[-10:-5], numb[-5:]) for numb in l)
return fun
# ## Exercise 76 - Closures and decorators - Decorators 2 - Name Directory
def person_lister(f):
def inner(people):
return map(f,sorted(people,key=lambda x: int(x[2])))
return inner
# ## Exercise 77 - Numpy - Arrays
def arrays(arr):
return numpy.array(arr[::-1], float)
# ## Exercise 78 - Numpy - Shape and Reshape
# +
import numpy as np
arr=np.array(list(map(int,input().split())))
arr.shape=(3,3)
print(arr)
# -
# ## Exercise 79 - Numpy - Transpose and Flatten
# +
import numpy as np
n, m = map(int, input().split())
arr = np.array([input().strip().split() for _ in range(n)], int)
print(arr.transpose())
print(arr.flatten())
# -
# ## Exercise 80 - Numpy - Concatenate
# +
import numpy as np
n, m, p = map(int,input().split())
arr1 = np.array([input().split() for _ in range(n)],int)
arr2 = np.array([input().split() for _ in range(m)],int)
print(np.concatenate((arr1, arr2), axis = 0))
# -
# ## Exercise 81 - Numpy - Zeros and Ones
# +
import numpy as np
d = tuple(map(int,(input().split())))
arr1 = np.zeros((d), dtype = np.int)
print(arr1)
arr2 = np.ones((d), dtype = np.int)
print(arr2)
# -
# ## Exercise 82 - Numpy - Eye and Identity
# +
import numpy as np
n,m = map(int, input().split(' '))
np.set_printoptions(sign=' ')
print(np.eye(n,m))
# -
# ## Exercise 83 - Numpy - Array Mathematics
# +
import numpy as np
n,m = map(int,input().split())
arr1 = np.array([input().split() for i in range(n)], int)
arr2 = np.array([input().split() for i in range(n)], int)
print(arr1+arr2)
print(arr1-arr2)
print(arr1*arr2)
print(arr1//arr2)
print(arr1%arr2)
print(arr1**arr2)
# -
# ## Exercise 84 - Numpy - Floor, Ceil and Rint
# +
import numpy as np
np.set_printoptions(sign=' ')
arr = np.array(input().split(),float)
print(np.floor(arr))
print(np.ceil(arr))
print(np.rint(arr))
# -
# ## Exercise 85 - Numpy - Sum and Prod
# +
import numpy as np
n,m = map(int,input().split())
arr = np.array([input().split() for i in range(n)], int)
print(np.prod((np.sum(arr,axis=0))))
# -
# ## Exercise 86 - Numpy - Min and Max
# +
import numpy as np
n, m = map(int, input().split())
arr = np.array([input().split() for i in range(n)], int)
print(np.max(np.min(arr, axis=1)))
# -
# ## Exercise 87 - Numpy - Mean, Var, and Std
# +
import numpy as np
n,m = map(int, input().split())
arr = np.array([input().split() for i in range(n)],int)
np.set_printoptions(sign=' ')
print(np.mean(arr, axis = 1))
print(np.var(arr, axis = 0))
print(round(np.std(arr, axis = None),12))
# -
# ## Exercise 88 - Numpy - Dot and Cross
# +
import numpy as np
n = int(input())
arr1=np.array([list(map(int,input().split())) for i in range(n)])
arr2=np.array([list(map(int,input().split())) for i in range(n)])
print(np.dot(arr1,arr2))
# -
# ## Exercise 89 - Numpy - Inner and Outer
# +
import numpy as np
arr1 = np.array(input().split(), int)
arr2 = np.array(input().split(), int)
print(np.inner(arr1,arr2))
print(np.outer(arr1,arr2))
# -
# ## Exercise 90 - Numpy - Polynomials
# +
import numpy as np
coef = list(map(float,input().split()))
x = float(input())
arr = np.array(coef)
print(np.polyval(arr,x))
# -
# ## Exercise 91 - Numpy - Linear Algebra
# +
import numpy as np
n = int(input())
arr =np.array([input().split() for i in range(n)],float)
print(round(np.linalg.det(arr),2))
# -
# # Problem 2
# ## Exercise 92 - Challenges - Birthday Cake Candles
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the birthdayCakeCandles function below.
def birthdayCakeCandles(ar):
return ar.count(max(ar))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = birthdayCakeCandles(ar)
fptr.write(str(result) + '\n')
fptr.close()
# -
# ## Exercise 93 - Challenges - Kangaroo
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the kangaroo function below.
# Solving this problem is like solving the equation x1+n*v1 = x2+n*v2 with n being the number of jumps
# Moreover x2 > x1, so if v2>=v1 then 1 will never catch up 2
def kangaroo(x1, v1, x2, v2):
return 'YES' if (v1 > v2) and (x2 - x1) % (v2 - v1) == 0 else 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
x1V1X2V2 = input().split()
x1 = int(x1V1X2V2[0])
v1 = int(x1V1X2V2[1])
x2 = int(x1V1X2V2[2])
v2 = int(x1V1X2V2[3])
result = kangaroo(x1, v1, x2, v2)
fptr.write(result + '\n')
fptr.close()
# -
# ## Exercise 94 - Challenges - Viral Advertising
# +
# #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the viralAdvertising function below.
def viralAdvertising(n):
likes = 2
total_likes = 2
if n == 1:
return total_likes
else:
for i in range(2,n+1):
likes = likes * 3 // 2 # number of likes each day
total_likes += likes
return total_likes
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
result = viralAdvertising(n)
fptr.write(str(result) + '\n')
fptr.close()
# -
# ## Exercise 95 - Challenges - Recursive Digit Sum
# ## Exercise 96 - Challenges - Insertion Sort - Part 1
# ## Exercise 97 - Challenges - Insertion Sort - Part 2
| HW1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -
# <a target="_blank" href="https://colab.research.google.com/github/GoogleCloudPlatform/keras-idiomatic-programmer/blob/master/books/deep-learning-by-design/Deep%20Learning%20by%20Design%20-%20Workshop%20-%20Chapter%203%20-%201.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# # Deep Learning by Design - Code Labs
#
# ## Lab Exercise #3 - Get Familiar with Data Preprocessing
#
# ## Prerequistes:
#
# 1. Familiar with Python
# 2. Completed Chapter 3: Training Foundation
#
# ## Objectives:
#
# 1. Reading and Resizing Images
# 2. Assembling images into datasets
# 3. Setting the datatype
# 4. Normalizing/Standardizing images
# ## Setup:
#
# Install the additional relevant packages to get started with Keras/OpenCV, and then import them.
# +
# Install OpenCV computer vision package
# !pip install -U opencv-python
# Import OpenCV
import cv2
# We will also be using the numpy package in this code lab
import numpy as np
print(cv2.__version__)
# -
# ## Reading/Resizing Images
#
# Let's read in an image and then resize it for input vector (for CNN) as 128x128.
#
#
# You fill in the blanks (replace the ??), make sure it passes the Python interpreter, and then verify it's correctness with the summary output.
#
# You will need to:
#
# 1. Set the parameter for reading an image in as color (RGB)
# +
from urllib.request import urlopen
# Let's read in the image as a color image
url = "https://github.com/GoogleCloudPlatform/keras-idiomatic-programmer/blob/master/books/deep-learning-by-design/apple.jpg"
request = urlopen(url)
img_array = np.asarray(bytearray(request.read()), dtype=np.uint8)
# HINT: the parameter value for a color image
image = cv2.imdecode(img_array, cv2.IMREAD_??)
# Let's verify we read it in correctly. We should see (584, 612, 3)
print(image.shape)
# -
# ### Resize it to 128 x 128
#
# Okay, we see that the image is 584 (height) x 612 (width). Hum, that's not square. We could simply resize it to 128 x 128. But if we do that, the image will be skewed. Why? Because the original height and width are not the same, and if we resize them as-is to the same length, we will distort the aspect ratio,
#
# So, let's refit the image into a square frame and then resize it.
#
# You will need to:
#
# 1. Set the padding for the top and bottom.
# +
# Let's calculate the difference between width and height -- this should output 28
pad = (612 - 584)
print("pad size", pad)
# Split the padding evenly between the top and bottom
# HINT: even means half.
top = pad // ??
bottom = pad // ??
left = 0
right = 0
# Let's now make a copy of the image with the padded border.
# cv2.BORDER_CONSTANT means use a constant value for the padded border.
# [0, 0, 0] is the constant value (all black pixels)
color = [0, 0, 0]
image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
# This should output (612, 612, 3)
print("padded image", image.shape)
# +
# Let's resize the image now to 128 x 128
# HINT: The tuple parameter is height x width
image = cv2.resize(image, (128, 128))
# This should output (128, 128, 3)
print("resized image", image.shape)
# -
# ## Assemblying into a dataset.
#
# Let's read in a group of images, resize them to the same size and assembly into a dataset (i.e., a single numpy multidimensional array).
#
# You will need to:
#
# 1. Specify the numpy method to convert a list to a numpy array.
# +
# Let's build a dataset of four images. We will start by using a list to append each image
# as it is read in.
images = []
for _ in range(4):
# Let's pretend we are reading in different images and resizing them,
# but instead we will just reuse our image from above.
images.append(image)
# convert the list of images to a numpy multidimensional array
# HINT: use the method that converts list to numpy array
images = np.??(images)
# This should output (4, 128, 128, 3, where the 4 indicates this is a batch of 4 images.
print("dataset", images.shape)
# -
# ## Setting the Datatype
#
# Next, we will set the data type of the pixel data to a single precision floating point value. That's a FLOAT32, which means 32 bits (which is 4 bytes).
#
# You will need to:
#
# 1. Specify the data type for a 32-bit float.
# +
# Set the datatype to single precision float (FLOAT32)
# HINT: It is lowercased.
images = images.astype(np.??)
# This should output: 4
print("bytes per pixel", images.itemsize)
# -
# ## Normalizing/Standardizing the Pixel Data
#
# Finally, we will standardize the pixel data:
#
# 1. Calculate the mean and standard deviation using numpy methods
# 2. Substract the mean from images and then divide by the standard deviation
#
# You will need to:
#
# 1. Subtract the standard deviation
# +
# Calculate the mean value across all the pixels
mean = np.mean(images)
# Calculate the corresponding standard deviation
std = np.std(images)
# Subtract the mean and divide by the standard deviation
# HINT: you calculate the standard deviation above.
images = (images - mean) / ??
# Let's print the before and after values:
# You should see: 3.1789145e-07 -7.0159636e-08
print(mean, np.mean(images))
# -
# ## End of Lab Exercise
| books/deep-learning-by-design/Deep Learning by Design - Workshop - Chapter 3 - 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import arviz as az
data = az.load_arviz_data("centered_eight")
az.backends.output_file("three_column_plot.html")
az.rcParams
# +
# {'column', 'square', 'default', 'row', 'square_trimmed'} or regex {'\\d*row', '\\d*column'}
# -
az.rcParams["plot.bokeh.layout.order"] = "3column"
# +
# {'stretch_width', 'scale_width', 'stretch_both', 'stretch_height', 'scale_height', 'fixed', 'scale_both'}
# -
az.rcParams["plot.bokeh.layout.sizing_mode"] = "scale_width"
az.rcParams["plot.max_subplots"] = 100
az.rcParams["plot.backend"] = "bokeh"
axes = az.plot_pair(data, show=False, divergences=True, backend_kwargs={"width" : 300, "height" : 300})
az.backends.show_layout(axes, show=True)
| Arviz_examples/arviz_bokeh_customization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import matplotlib.pyplot as plt
DHESN_PCA = np.genfromtxt("DHESN_RESULTS/DHESN_data_VARIOUS_DHESN_WITH_PCA_2__2018-03-21.csv", delimiter=',', skip_header=1)
print(DHESN_PCA)
import pandas as pd
data_pca = pd.read_csv("DHESN_RESULTS/DHESN_data_VARIOUS_DHESN_WITH_PCA_2__2018-03-21.csv", delimiter=',')
data_pca.sort_values(by=[data_pca.columns[-2]])
data_vae = pd.read_csv("DHESN_RESULTS/DHESN_data_VARIOUS_DHESN_WITH_VAE_GRID_SEARCH_3__2018-03-20.csv", delimiter=',')
data_vae_no_zeros = data_vae.drop(data_vae[data_vae[data_vae.columns[0]] == 0].index)
len(data_vae_no_zeros)
data_vae_no_zeros.sort_values(by=[data_vae_no_zeros.columns[-2]])
# %load_ext autoreload
from ESN.ESN import DHESN, ESN
from Helper.utils import nrmse
from MackeyGlass.HenonGenerator import runHenon
from MackeyGlass.MackeyGlassGenerator import run
data = np.array([run(15100, init_x=1.0, init_x_tau=0.8)]).reshape(-1, 1)
print(data)
print(np.shape(data))
MEAN_OF_DATA = np.mean(data)
split = 14100
X_train = np.array(data[:split-1])
y_train = np.array(data[1:split])
X_valid = np.array(data[split-1:-1])
y_valid = np.array(data[split:])
print(np.shape(X_train))
print(np.shape(y_train))
plt.plot(range(len(X_valid[:500])), X_valid[:500])
plt.show()
# data_pca.sort_values(by=[data_pca.columns[-2]])
data_vae_no_zeros.sort_values(by=[data_vae_no_zeros.columns[-2]])
import time
# +
runs = 10
_errs = []
_times = []
for _ in range(runs):
n=8
start_time = time.time()
# dhesn = DHESN(1, 1, num_reservoirs = n,
# reservoir_sizes=np.linspace(300, 300, n, endpoint=True).astype(int),
# echo_params=[0.2618, 0.6311, 0.2868, 0.6311, 0.2868, 0.6311, 0.2868, 0.6311],
# regulariser=1e-6,
# init_echo_timesteps=100,
# dims_reduce=np.linspace(30, 30, n-1, endpoint=True).astype(int).tolist(),
# encoder_type='VAE', train_epochs=4)
# dhesn.initialize_input_weights(
# scales=[0.7726, 0.4788, 0.6535, 0.4788, 0.6535, 0.4788, 0.6535, 0.4788], strategies='uniform')
# dhesn.initialize_reservoir_weights(
# spectral_scales=[0.8896, 0.8948, 0.3782, 0.8948, 0.3782, 0.8948, 0.3782, 0.8948],
# strategies=['uniform']*n,
# sparsity=0.1)
dhesn = DHESN(1, 1, num_reservoirs = n,
reservoir_sizes=np.linspace(100, 500, n, endpoint=True).astype(int),
echo_params=np.linspace(0.5, 0.1, endpoint=True),
regulariser=1e-6,
init_echo_timesteps=100,
dims_reduce=np.linspace(100, 10, n-1, endpoint=True).astype(int).tolist(),
encoder_type='VAE', train_epochs=4)
dhesn.initialize_input_weights(
scales=np.linspace(0.5, 1.0, endpoint=True), strategies='uniform')
dhesn.initialize_reservoir_weights(
spectral_scales=np.linspace(0.9, 0.3, endpoint=True),
strategies=['uniform']*n,
sparsity=1.0)
# start_time = time.time()
# dhesn = DHESN(1, 1, num_reservoirs = n,
# reservoir_sizes=np.linspace(200, 400, n, endpoint=True).astype(int),
# echo_params=np.linspace(0.5, 0.1, n, endpoint=True),
# regulariser=1e-6,
# init_echo_timesteps=100,
# dims_reduce=np.linspace(30, 80, n-1, endpoint=True).astype(int).tolist(),
# encoder_type='PCA')
# dhesn.initialize_input_weights(scales=np.linspace(0.5, 0.5, n, endpoint=True).tolist(), strategies='uniform')
# dhesn.initialize_reservoir_weights(spectral_scales=np.linspace(0.4, 1.2, n, endpoint=True).tolist(),
# strategies=['uniform']*n,
# sparsity=0.1)
# start_time = time.time()
# dhesn = ESN(1, 1,
# reservoir_size=1000,
# echo_param=0.2,
# regulariser=1e-5,
# init_echo_timesteps=100
# )
# dhesn.initialize_input_weights(scale=0.5, strategy='uniform')
# dhesn.initialize_reservoir_weights(spectral_scale=1.5,
# strategy='uniform',
# sparsity=0.1)
# dhesn = DHESN(1, 1, num_reservoirs = n,
# reservoir_sizes=np.linspace(300, 300, n, endpoint=True).astype(int),
# echo_params=np.linspace(0.5, 0.1, n, endpoint=True),
# regulariser=1e-6,
# init_echo_timesteps=100,
# dims_reduce=np.linspace(60, 60, n-1, endpoint=True).astype(int).tolist(),
# encoder_type='PCA')
# dhesn.initialize_input_weights(scales=np.linspace(0.5, 0.5, n, endpoint=True).tolist(), strategies='uniform')
# dhesn.initialize_reservoir_weights(spectral_scales=np.linspace(1.2, 0.4, n, endpoint=True).tolist(),
# strategies=['uniform']*n,
# sparsity=0.1)
dhesn.train(X_train, y_train)
# generate
outs = []
u_n = X_valid[0]
print(u_n)
for _ in range(len(data[split:])):
u_n = dhesn.forward(u_n)
outs.append(u_n)
outs = np.array(outs).squeeze()
y_vals = y_valid.squeeze()
err = nrmse(y_vals, outs, MEAN_OF_DATA)
_errs.append(err)
print("NRMSE: {}".format(err))
total_time = time.time() - start_time
_times.append(total_time)
print("TIME: {}".format(total_time))
# -
print(np.mean(_times))
print(np.max(_times), np.min(_times))
print(np.mean(_errs))
import matplotlib.pyplot as plt
import pickle as pkl
import seaborn as sns
pkl.dump((_errs, _times), open("RUNS_200_FOR_DHESN_VAE_1.pkl", "wb"))
sns.set_style("whitegrid")
sns.set_context('notebook', font_scale=1.5)
sns.despine()
# +
_data = []
_title = [r'$\alpha = 0.1$', r'$\alpha = 1.0$']
_data1, _times1 = pkl.load(open("RUNS_200_FOR_DHESN_PCA.pkl", "rb"))
_data2, _times2 = pkl.load(open("RUNS_200_FOR_DHESN_PCA_5.pkl", "rb"))
_data.append(_data1)
_data.append(_data2)
f, ax = plt.subplots(1, 2, sharey=True, figsize=(12, 4))
for i,d in enumerate(_data):
hist, bins = np.histogram(d, bins=35)
centres = (bins[1:] + bins[:-1])/2.
width = (bins[1] - bins[0])
m = np.mean(d)
print(m)
ax[i].bar(centres, hist, width=width)
# ymin, ymax = ax[i].get_ylim()
ax[i].plot([m]*2, [0, 32], linestyle='--', color='red', label='mean')
ax[i].text(
m+0.02, ymax-3, '%.3f' % m, color='red', fontsize=14
)
ax[i].set_xlim((0.0, 1.0))
ax[i].set_ylim((0, 32))
ax[i].set_xlabel('NRMSE', fontsize=14)
ax[i].set_title(_title[i], fontsize=30)
plt.legend()
plt.show()
f.savefig("DHESN_PCA_distribution.pdf")
# +
# _data, _times = pkl.load(open("RUNS_200_FOR_DHESN_PCA_2.pkl", "rb"))
f, ax = plt.subplots(figsize=(6, 4))
hist, bins = np.histogram(_errs, bins=35)
centres = (bins[1:] + bins[:-1])/2.
width = (bins[1] - bins[0])
m = np.mean(_errs)
print(m)
ax.bar(centres, hist, width=width)
ymin, ymax = ax.get_ylim()
ax.plot([m]*2, [ymin, ymax], linestyle='--', color='red', label='mean')
ax.text(
m+0.02, ymax-3, '%.3f' % m, color='red', fontsize=14
)
ax.set_xlim((0.0, 1.0))
ax.set_ylim((ymin, ymax))
ax.set_xlabel('NRMSE', fontsize=14)
plt.legend()
plt.show()
f.savefig("DHESN_PCA_distribution.pdf")
# -
| old/oldNotebooks/LEVI_PLOTS_Henon_Map.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
#
# <a id='version-control'></a>
# <div id="qe-notebook-header" style="text-align:right;">
# <a href="https://quantecon.org/" title="quantecon.org">
# <img style="width:250px;display:inline;" src="https://assets.quantecon.org/img/qe-menubar-logo.svg" alt="QuantEcon">
# </a>
# </div>
# # Git, GitHub, and Version Control
# ## Contents
#
# - [Git, GitHub, and Version Control](#Git,-GitHub,-and-Version-Control)
# - [Setup](#Setup)
# - [Basic Objects](#Basic-Objects)
# - [Individual Workflow](#Individual-Workflow)
# - [Collaborative Work](#Collaborative-Work)
# - [Collaboration via Pull Request](#Collaboration-via-Pull-Request)
# - [Additional Resources and Troubleshooting](#Additional-Resources-and-Troubleshooting)
# - [Exercises](#Exercises)
# Co-authored with <NAME>
#
# An essential part of modern software engineering is using version control
#
# We use version control because
#
# - Not all iterations on a file are perfect, and you may want to revert changes
# - We want to be able to see who has changed what and how
# - We want a uniform version scheme to do this between people and machines
# - Concurrent editing on code is necessary for collaboration
# - Version control is an essential part of creating reproducible research
#
#
# In this lecture, we’ll discuss how to use Git and GitHub
# ## Setup
#
# 1. Make sure you create an account on [GitHub.com](http://github.com/)
#
# - If you are a student, be sure to use the GitHub [Student Developer Pack](https://education.github.com/pack/)
# - Otherwise, see if you qualify for a free [Non-Profit/Academic Plan](https://help.github.com/articles/about-github-education-for-educators-and-researchers/)
# - These come with things like unlimited private repositories, testing support, etc.
#
# 1. Install `git` and the GitHub Desktop application
#
# 1. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git/)
# 1. Install the [GitHub Desktop](https://desktop.github.com/) application
#
# 1. Optionally (but strongly recommended): On Windows, change the default line-ending by:
#
# 1. Opening a Windows/Powershell console, or the “Git Bash” installed in the previous step
# 1. Running the following
# + [markdown] hide-output=false
# ```text
# git config --global core.eol lf
# git config --global core.autocrlf false
# ```
#
# -
#
#
#
#
# ### Git vs. GitHub vs. GitHub Desktop
#
# To understand the relationship
#
# - Git is an infrastructure for versioning and merging files (it is not specific to GitHub and does not even require an online server)
# - GitHub provides an online service to coordinate working with Git repositories, and adds some additional features for managing projects
# - GitHub Desktop is just one of many GUI-based clients to make Git and GitHub easier to use
#
#
# Later, you may find yourself using alternatives
#
# - GitHub is the market leader for open source projects and Julia, but there are other options, e.g. [GitLab](https://about.gitlab.com/) and [Bitbucket](https://bitbucket.org)
# - Instead of the GitHub Desktop, you may directly use the Git command line, [GitKraken](https://www.gitkraken.com/), or use the Git functionality built into editors such as [Atom](https://atom.io/) or [VS Code](https://code.visualstudio.com/)
#
#
# Since these lecture notes are intended to provide a minimal path to using the technologies, here we will conflate the workflow of these distinct products
# ## Basic Objects
# ### Repositories
#
# The fundamental object in GitHub is a *repository* (or “repo”) – this is the master directory for a project
#
# One example of a repo is the QuantEcon [Expectations.jl](https://github.com/quantecon/expectations.jl/) package
#
# On the machine, a repo is a normal directory, along with a subdirectory called `.git` which contains the history of changes
# ### Commits
#
# GitHub stores history as a sequence of changes to text, called *commits*
#
# [Here](https://github.com/QuantEcon/lecture-source-jl/commit/ba59c3ea9a0dec10def3f4f3928af5e2827f3b92) is an example of a commit, which revises the style guide in a QuantEcon repo
#
# In particular, commits have the following features
#
# - An ID (formally, an “SHA-1 hash”)
# - Content (i.e., a before and after state)
# - Metadata (author, timestamp, commit message, etc.)
#
#
# **Note:** It’s crucial to remember that what’s stored in a commit is only the actual changes you make to text
#
# This is a key reason why git can store long and complicated histories without consuming massive amounts of memory
# ### Common Files
#
# In addition, each GitHub repository typically comes with a few standard text files
#
# - A `.gitignore` file, which lists files/extensions/directories that GitHub shouldn’t try to track (e.g., LaTeX compilation byproducts)
# - A `README.md` file, which is a Markdown file which GitHub puts on the repository website
# - A `LICENSE.txt` file, which describes the terms under which the repository’s contents are made available
#
#
# For an example of all three, see the [Expectations.jl](https://github.com/quantecon/expectations.jl/) repo
#
# Of these, the `README.md` is the most important, as GitHub will display it as [Markdown](https://guides.github.com/features/mastering-markdown/) when accessing the repository online
#
#
# <a id='new-repo-workflow'></a>
# ## Individual Workflow
#
# In this section, we’ll describe how to use GitHub to version your own projects
#
# Much of this will carry over to the collaborative section
# ### Creating a Repository
#
# In general, we will always want to repos for new projects using the following dropdown
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-makerepo.png" style="width:100%;height:100%">
#
#
# We can then configure repository options as such
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-makerepo-full.png" style="width:100%;height:100%">
#
#
# In this case, we’re making a public repo `github.com/quantecon_user/example_repository`, which will come with a `README.md`, is licensed under the MIT License, and will ignore Julia compilation byproducts
#
# **Note** This workflow is for creating projects *de novo*; the process for turning existing directories into git repos is a bit more complicated
#
# In particular, in that case we recommend that you create a new repo via this method, then copy in and commit your files (see below), and then delete the old directory
# ### Cloning a Repository
#
# The next step is to get this to our local machine
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-clone.png" style="width:100%;height:100%">
#
#
# This dropdown gives us a few options
#
# - “Open in Desktop” will call to the GitHub Desktop application that we’ve installed
# - “Download Zip” will download the directory *without the .git* subdirectory (avoid this option)
# - The copy/paste button next to the link lets us use the command line, i.e. `git clone https://github.com/quanteconuser/example_repository.git`
# ### Making and Managing Changes
#
# Now that we have the repository, we can start working with it
#
# For example, let’s say that we’ve amended the `README.md` (using our editor of choice), and also added a new file `economics.jl` which we’re still working on
#
# Returning to GitHub Desktop, we should see something like
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-desktop-commit.png" style="width:100%;height:100%">
#
#
# To select individual files for commit, we can use the check boxes to the left of each file
#
# Let’s say you select only the README to commit. Going to the history tab should show you our change
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-desktop-commit2.png" style="width:100%;height:100%">
#
#
# The Julia file is unchanged
# ### Pushing to the Server
#
# As of now, this commit lives only on our local machine
#
# To upload it to the server, you can simply click the “Push Origin” button at the top the screen
#
# The small “1^” to the right of the text indicates we have one commit to upload
# ### Reading and Reverting History
#
# As mentioned, one of the key features of GitHub is the ability to scan through history
#
# By clicking the “commits” tab on the repo front page,
# we see [this page](https://github.com/quanteconuser/example_repository/commits/master)
# (as an example)
#
# Clicking an individual commit gives us the difference view, (e.g., [example commit](https://github.com/quanteconuser/example_repository/commit/d0b17f5ce0f8742e88da9b604bfed418d6a16884/))
#
# Sometimes, however, we want to not only inspect what happened before, but reverse the commit
#
# - If you haven’t made the commit yet, just right-click the file in the “changes” tab and hit “discard changes” to reset the file to the last known commit
# - If you have made the commit but haven’t pushed to the server yet, go to the “history” tab as above, right click the commit and click “revert this commit.” This will create the inverse commit, shown below
#
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-revert-commit.png" style="width:100%;height:100%">
# ### Working across Machines
#
# Generally, you want to work on the same project but across multiple machines (e.g., a home laptop and a lab workstation)
#
# The key is to push changes from one machine, and then to pull changes from the other machine
#
# Pushing can be done as above
#
# To pull, simply click pull under the “repository” dropdown at the top of the screen
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-pull.png" style="width:100%;height:100%">
# ## Collaborative Work
# ### Adding Collaborators
#
# First, let’s add a collaborator to the `quanteconuser/example_repository` lecture we created earlier
#
# We can do this by clicking “settings => collaborators,” as follows
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-collab.png" style="width:100%;height:100%">
# ### Project Management
#
# GitHub’s website also comes with project management tools to coordinate work between people
#
# The main one is an *issue*, which we can create from the issues tab
#
# You should see something like this
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-issue.png" style="width:100%;height:100%">
#
#
# Let’s unpack the different components
#
# - The *assignees* dropdown lets you select people tasked to work on the issue
# - The *labels* dropdown lets you tag the issue with labels visible from the issues page, such as “high priority” or “feature request”
# - It’s possible to tag other issues and collaborators (including in different repos) by linking to them in the comments – this is part of what’s called *GitHub-Flavored Markdown*
#
#
# For an example of an issue, see [here](https://github.com/quanteconuser/example_repository/issues/1)
#
# You can see open issues at a glance from the general issues tab
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-issue-tab.png" style="width:100%;height:100%">
#
#
# The checkboxes are common in GitHub to manage project tasks
# ### Reviewing Code
#
# There are a few different ways to review people’s code in GitHub
#
# - Whenever people push to a project you’re working on, you’ll receive an email notification
# - You can also review individual line items or commits by opening commits in the difference view as [above](https://github.com/quanteconuser/example_repository/commit/d0b17f5ce0f8742e88da9b604bfed418d6a16884/)
#
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-review.png" style="width:100%;height:100%">
#
#
#
# <a id='merge-conflict'></a>
# ### Merge Conflicts
#
# Any project management tool needs to figure out how to reconcile conflicting changes between people
#
# In GitHub, this event is called a “merge conflict,” and occurs whenever people make conflicting changes to the same *line* of code
#
# Note that this means that two people touching the same file is OK, so long as the differences are compatible
#
# A common use case is when we try to push changes to the server, but someone else has pushed conflicting changes
#
# GitHub will give us the following window
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-merge-conflict.png" style="width:100%;height:100%">
#
#
# - The warning symbol next to the file indicates the existence of a merge conflict
# - The viewer tries to show us the discrepancy (I changed the word repository to repo, but someone else tried to change it to “repo” with quotes)
#
#
# To fix the conflict, we can go into a text editor (such as Atom)
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/atom-merge-conflict.png" style="width:100%;height:100%">
#
#
# Let’s say we click the first “use me” (to indicate that my changes should win out), and then save the file
#
# Returning to GitHub Desktop gives us a pre-formed commit to accept
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-merge-commit.png" style="width:100%;height:100%">
#
#
# Clicking “commit to master” will let us push and pull from the server as normal
# ## Collaboration via Pull Request
#
# One of the defining features of GitHub is that it is the dominant platform for *open source* code, which anyone can access and use
#
# However, while anyone can make a copy of the source code, not everyone has access to modify the particular version stored on GitHub
#
# A maintainer (i.e. someone with “write” access to directly modify a repository)
# might consider different contributions and “merge” the changes into the main
# repository if the changes meet their criteria
#
# A *pull request* (“PR”) allows **any** outsiders to suggest changes to open source repositories
#
# A PR requests the project maintainer to merge (“pull”) changes you’ve worked on into their repository
#
# There are a few different workflows for creating and handling PRs, which we’ll walk through below
#
# **Note:** If the changes are for a Julia Package, you will need to follow a different workflow – described in the [testing lecture](testing.html)
#
#
# <a id='web-interface'></a>
# ### Quick Fixes
#
# GitHub’s website provides an online editor for quick and dirty changes, such as fixing typos in documentation
#
# To use it, open a file in GitHub and click the small pencil to the upper right
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-quick-pr.png" style="width:100%;height:100%">
#
#
# Here, we’re trying to add the QuantEcon link to the Julia project’s `README` file
#
# After making our changes, we can then describe and propose them for review by maintainers
#
# But what if we want to make more in-depth changes?
#
#
# <a id='fork-workflow'></a>
# ### No-Access Case
#
# A common problem is when we don’t have write access (i.e. we can’t directly modify) the repo in question
#
# In that case, click the “Fork” button that lives in the top-right of every repo’s main page
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-fork-button.png" style="width:100%;height:100%">
#
#
# This will copy the repo into your own GitHub account
#
# For example, [this repo](https://github.com/ubcecon/example_repository) is a fork of our original [git setup](https://github.com/quanteconuser/example_repository/)
#
# Clone this fork to our desktop and work with it in exactly the same way as we would a repo we own
# (as the fork is in your account, you now have write access)
#
# That is, click the “clone” button on our fork
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-clone-fork.png" style="width:100%;height:100%">
#
#
# You’ll see a new repo with the same name but different URL in your GitHub Desktop repo list, along with a special icon to indicate that it’s a fork
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-repo-list.png" style="width:100%;height:100%">
#
#
# Commit some changes by selecting the files and writing a commit message
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-fork-changes.png" style="width:100%;height:100%">
#
#
# And push by using the dropdown
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-dropdown.png" style="width:100%;height:100%">
#
#
# Below, for example, we’ve committed and pushed some changes to the fork that we want to upstream into the main repo
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-edit-fork.png" style="width:100%;height:100%">
#
#
# We should make confirm that these changes are on the server (which we can get to by going to the [fork](https://github.com/ubcecon/example_repository) and clicking “commits”)
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-fork-history.png" style="width:100%;height:100%">
#
#
# Next, go to the pull requests menu and click “New Pull Request”
#
# You’ll see something like this
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-create-pr.png" style="width:100%;height:100%">
#
#
# This gives us a quick overview of the commits we want to merge in, as well as the overall differences
#
# Hit create and then click through the following form
#
# This opens a page like this on the main repo
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-create-pr-2.png" style="width:100%;height:100%">
#
#
# The key pieces are
#
# - A list of the commits we’re proposing
# - A list of reviewers, who can approve or modify our changes
# - Labels, Markdown space, assignees, and the ability to tag other git issues and PRs, just as with issues
#
#
# Here’s an [example pull request](https://github.com/quanteconuser/example_repository/pull/3)
#
# To edit a PR, simply push changes to the fork you cloned to your desktop
#
# For example, let’s say we commit a new change to the README *after* we create the PR
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-pr-modification.png" style="width:100%;height:100%">
#
#
# After pushing to the server, the change is reflected on the PR [page](https://github.com/quanteconuser/example_repository/pull/3)
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-pr-expost.png" style="width:100%;height:100%">
#
#
# That is, creating a pull request is not like bundling up your changes and delivering them, but rather like opening an *ongoing connection* between two repositories, that is only severed when the PR is closed or merged
# ### Write Access Case
#
# As you become more familiar with GitHub, and work on larger projects, you will find yourself making PRs even when it isn’t strictly required
#
# If you are a maintainer of the repo (e.g. you created it or are a collaborator) then you don’t need to create a fork, but will rather work with a *git branch*
#
# Branches in git represent parallel development streams (i.e., sequences of commits) that the PR is trying to merge
#
# First, load the repo in GitHub Desktop and use the branch dropdown
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-pr-branch.png" style="width:100%;height:100%">
#
#
# Click “New Branch” and choose an instructive name (make sure there are no spaces or special characters)
#
# This will “check out” a new branch with the same history as the old one (but new commits will be added only to this branch)
#
# We can see the active branch in the top dropdown
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-branch.png" style="width:100%;height:100%">
#
#
# For example, let’s say we add some stuff to the Julia code file and commit it
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-pr-edits.png" style="width:100%;height:100%">
#
#
# To put this branch (with changes) on the server, we simply need to click “Publish Branch”
#
# Navigating to the [repo page](https://github.com/quanteconuser/example_repository), we will see a suggestion about a new branch
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-new-branch.png" style="width:100%;height:100%">
#
#
# At which point the process of creating a PR is identical to the previous case
# ### Julia Package Case
#
# One special case is when the repo in question is actually a Julia project or package
#
# We cover that (along with package workflow in general) in the [testing lecture](testing.html)
# ## Additional Resources and Troubleshooting
#
# You may want to go beyond the scope of this tutorial when working with GitHub
#
# For example, perhaps you run into a bug, or you’re working with a setup that doesn’t have GitHub Desktop installed
#
# Here are some resources to help
#
# - <NAME>’s excellent [git flight rules](https://github.com/k88hudson/git-flight-rules/), which is a near-exhaustive list of situations you could encounter, and command-line fixes
# - The GitHub [Learning Lab](https://lab.github.com/), an interactive sandbox environment for git
# - The docs for forking on [GitHub Desktop](https://help.github.com/desktop/guides/contributing-to-projects/cloning-a-repository-from-github-to-github-desktop/) and [the GitHub Website](https://guides.github.com/activities/forking/)
#
#
#
# <a id='version-control-commandline'></a>
# ### Command-Line Basics
#
# Git also comes with a set of command-line tools
#
# They’re optional, but many people like using them
#
# Furthermore, in some environments (e.g. JupyterHub installations) you may only have access to the command line
#
# - On Windows, downloading `git` will have installed a program called `git bash`, which installs these tools along with a general Linux-style shell
# - On Linux/MacOS, these tools are integrated into your usual terminal
#
#
# To open the terminal in a directory, either right click and hit “open git bash” (in Windows), or use Linux commands like `cd` and `ls` to navigate
#
# See [here](https://www.git-tower.com/learn/git/ebook/en/command-line/appendix/command-line-101) for a short introduction to the command line
#
# As above, you can clone by grabbing the repo URL (say, GitHub’s [site-policy repo](https://github.com/github/site-policy/)) and running `git clone https://github.com/github/site-policy.git`
#
# This won’t be connected to your GitHub Desktop, so you’d need to use it manually (`File => Add Local Repository`) or drag-and-drop from the file explorer onto the GitHub Desktop
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/git-add-local.png" style="width:100%;height:100%">
#
#
# From here, you can get the latest files on the server by `cd`-ing into the directory and running `git pull`
#
# When you `pull` from the server, it will never overwrite your modified files, so it is impossible to lose local changes
#
# Instead, to do a hard reset of all files and overwrite any of your local changes, you can run `git reset --hard origin/master`
# ## Exercises
# ### Exercise 1a
#
# Follow the instructions to create a [new repository](#new-repo-workflow) for one of your GitHub accounts
# In this repository
#
# - Take the code from one of your previous assignments, such as [Newton’s method](getting_started_julia/julia_by_example.html#jbe-ex8a) in [Introductory Examples](getting_started_julia/julia_by_example.html) (either as a `.jl` file or a Jupyter notebook)
# - Put in a `README.md` with some text
# - Put in a `.gitignore` file, ignoring the Jupyter files `.ipynb_checkpoints` and the project files, `.projects`
# ### Exercise 1b
#
# Pair-up with another student who has done Exercise 1a and find out their GitHub ID, and each do the following
#
# - Add the GitHub ID as a collaborators on your repository
# - Clone the repositories to your local desktop
# - Assign each other an issue
# - Submit a commit from GitHub Desktop which references the issue by number
# - Comment on the commits
# - Ensure you can run their code without any modifications
# ### Exercise 1c
#
# Pair-wise with the results of Exercise 1b examine a merge-conflict by editing the `README.md` file for your repository that you have both setup as collaborators
#
# Start by ensuring there are multiple lines in the file so that some changes may have conflicts, and some may not
#
# - Clone the repository to your local desktops
# - Modify **different** lines of code in the file and both commit and push to the server (prior to pulling from each other)–and see how it merges things “automatically”
# - Modify **the same** line of code in the file, and deal with the [merge conflict](#merge-conflict)
# ### Exercise 2a
#
# Just using GitHub’s [web interface](#web-interface), submit a Pull Request for a simple change of documentation to a public repository
#
# The easiest may be to submit a PR for a typo in the source repository for these notes, i.e. `https://github.com/QuantEcon/lecture-source-jl`
#
# Note: The source for that repository is in `.rst` files, but you should be able to find spelling mistakes/etc. without much effort
# ### Exercise 2b
#
# Following the [instructions](#fork-workflow) for forking and cloning a public repository to your local desktop, submit a Pull Request to a public repository
#
# Again, you could submit it for a typo in the source repository for these notes, i.e. `https://github.com/QuantEcon/lecture-source-jl`, but you are also encouraged to instead look for a small change that could help the documentation in another repository.
#
# If you are ambitious, then go to the Exercise Solutions for one of the Exercises in these lecture notes and submit a PR for your own modified version (if you think it is an improvement!)
| more_julia/version_control.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.13 64-bit (''venv'': conda)'
# name: python3
# ---
import os
os.chdir('..')
import h5py
import numpy as np
import cartopy.crs as ccrs
from notebooks import config
import numpy as np
from utils.imgShow import imgShow
import matplotlib.pyplot as plt
from utils.geotif_io import readTiff
from utils.transform_xy import coor2coor
from utils.mad_std import mad_std
from scipy.optimize import curve_fit
path_img = config.root + '/data/rs_img/pine_island_S3A_20180207T115228_20180207T115528.tif'
path_atl06_spot1_A = config.root + '/data/icesat2/land_ice_antarctic/preprocessed/pineisland_atl06_spot1_A.h5'
path_atl06_spot1_D = config.root + '/data/icesat2/land_ice_antarctic/preprocessed/pineisland_atl06_spot1_D.h5'
path_atl06_A = config.root + '/data/icesat2/land_ice_antarctic/preprocessed/pineisland_atl06_A.h5'
path_atl06_D = config.root + '/data/icesat2/land_ice_antarctic/preprocessed/pineisland_atl06_D.h5'
s3_img, s3_img_info = readTiff(path_img)
print(s3_img_info)
# +
# with h5py.File(path_atl06_spot1_A,'r') as f_a:
with h5py.File(path_atl06_A,'r') as f_a:
print(f_a.keys())
lat_A = f_a['lat'][:]
lon_A = f_a['lon'][:]
h_elv_A = f_a['h_elv'][:]
t_yrs_A = f_a['t_year'][:]
spot_A = f_a['spot'][:]
# with h5py.File(path_atl06_spot1_D,'r') as f_d:
with h5py.File(path_atl06_D,'r') as f_d:
print(f_d.keys())
lat_D = f_d['lat'][:]
lon_D = f_d['lon'][:]
h_elv_D = f_d['h_elv'][:]
t_yrs_D = f_d['t_year'][:]
spot_D = f_d['spot'][:]
## filtering the invalid points.
## -- remove outlier values
## ascending
idx_valid = np.where( (h_elv_A >= -500) & (h_elv_A <= 9000) )
lat_A, lon_A, h_elv_A = lat_A[idx_valid], lon_A[idx_valid], h_elv_A[idx_valid],
t_yrs_A, spot_A = t_yrs_A[idx_valid], spot_A[idx_valid]
## descending
idx_valid = np.where( (h_elv_D >= -500) & (h_elv_D <= 9000) )
lat_D, lon_D, h_elv_D = lat_D[idx_valid], lon_D[idx_valid], h_elv_D[idx_valid]
t_yrs_D, spot_D = t_yrs_D[idx_valid], spot_D[idx_valid]
x_A, y_A = coor2coor(srs_from=4326, srs_to=s3_img_info['geosrs'], x=lon_A, y=lat_A)
x_D, y_D = coor2coor(srs_from=4326, srs_to=s3_img_info['geosrs'], x=lon_D, y=lat_D)
# +
fig = plt.figure(figsize=(9,10))
ax = plt.axes(projection=ccrs.SouthPolarStereo())
imgShow(s3_img, extent=s3_img_info['geoextent'], \
color_bands=(0, 1, 2), clip_percent=5)
# plt.scatter(lon_A_proj[::100], lat_A_proj[::100], s=3, c=h_elv_A[::100], alpha=.7, cmap='terrain')
# plt.scatter(lon_D_proj[::100], lat_D_proj[::100], s=3, c=h_elv_D[::100], alpha=.7, cmap='terrain')
plt.scatter(x_A[::100], y_A[::100], s=3, alpha=.7, c='red')
plt.scatter(x_D[::100], y_D[::100], s=3, alpha=.7, c='green')
plt.clim([100,1400])
plt.colorbar(fraction=0.0320, pad=0.02, label='Elevation (m)')
ax.coastlines('50m')
# -
# # !python utils_main/xover_.py -h
# +
# # !python utils_main/xover_.py data/icesat2/land_ice_antarctic/preprocessed/pineisland_atl06_A.h5 data/icesat2/land_ice_antarctic/preprocessed/pineisland_atl06_D.h5 -o data/icesat2/land_ice_antarctic/preprocessed/pineisland_xovers.h5 -p 3031 -d 10 -k 20 -v spot lon lat t_year h_elv
# +
with h5py.File('data/icesat2/land_ice_antarctic/preprocessed/pineisland_xovers.h5','r') as f_xo:
lon_xovers = f_xo['lon'][:]
lat_xovers = f_xo['lat'][:]
t_xovers = f_xo['t_year_as'][:]
dh_xovers = f_xo['h_elv_dif'][:]
dt_xovers = f_xo['t_year_dif'][:]
print(f_xo.keys())
## filtering invalid points, np.abs(dh) < 5
idx_valid = np.where( (np.abs(dh_xovers) <= 5) )
lon_xovers, lat_xovers = lon_xovers[idx_valid], lat_xovers[idx_valid]
t_xovers, dt_xovers, dh_xovers = t_xovers[idx_valid], dt_xovers[idx_valid], dh_xovers[idx_valid]
dhdt_xovers = dh_xovers/dt_xovers
lon_xovers_proj, lat_xovers_proj = coor2coor('4326', '3031', lon_xovers, lat_xovers)
# +
### define fitting function
def func_exp(x, a, b, c):
return a * np.exp(-b * x) + c
def func_linear(x, a, b):
return a*x + b
# popt, pcov = curve_fit(func_exp, xdata=dt_xovers, ydata=dh_xovers)
popt, pcov = curve_fit(func_linear, xdata=dt_xovers, ydata=dh_xovers)
### visulization
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(dt_xovers, dh_xovers, '.', markersize=2)
ax.plot(dt_xovers, func_linear(dt_xovers, *popt), 'r-',
label='linear fit: a=%3.3f, b=%3.3f' % tuple(popt))
# ax.plot(dt_xovers, func_exp(dt_xovers, *popt), 'r-',
# label='linear fit: a=%3.3f, b=%3.3f, c=%3.3f' % tuple(popt))
ax.axhline(y=0, color='g', label='y=0')
ax.legend()
plt.ylim(-3, 3)
plt.ylabel('dh_xovers')
plt.xlabel('dt_xovers')
# -
# ## Distribution of the crossover points:
# 1) all the crossover points;
# 2) that time interval larger 3 months.
# +
fig = plt.figure(figsize=(15,6))
ax = plt.axes(projection=ccrs.SouthPolarStereo())
### all crossover points
plt.subplot(1,2,1)
imgShow(s3_img, extent=s3_img_info['geoextent'], color_bands=(0, 1, 2), clip_percent=5)
plt.scatter(lon_xovers_proj, lat_xovers_proj, s=10, c=dh_xovers/dt_xovers, \
alpha=.7, cmap='coolwarm_r')
plt.clim([-1.,1.])
plt.colorbar(fraction=0.0320, pad=0.02, label='Elevation Change (m/yr)')
plt.title('all crossover points')
### filtered crossover points: dt > 3 month
plt.subplot(1,2,2)
ids = np.abs(dt_xovers)> 3./12
imgShow(s3_img, extent=s3_img_info['geoextent'], color_bands=(0, 1, 2), clip_percent=5)
plt.scatter(lon_xovers_proj[ids], lat_xovers_proj[ids], s=10, c=dh_xovers[ids]/dt_xovers[ids], \
alpha=.7, cmap='coolwarm_r')
plt.clim([-1.,1.])
plt.yticks([])
plt.colorbar(fraction=0.0320, pad=0.02, label='Elevation Change (m/yr)')
plt.title('filtered xover points by dt')
| notebooks/xover_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center> <h1> PNB - Contemporary Problems </h1></center>
# <center><H2> Open Science </H2></center>
# <h1></h1>
#
# <center><img src ="img/mcm-sci-pnb_left-col_eps.png"></center>
# <center><img src ="img/open_science_venn.jpg" width=60%></center>
# Dr. <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# # Compliance with Tri-Council open access policy
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# <center><img src="img/tri-agency_venn_diagram.gif"></center>
#
#
# <div class="alert alert-success">
#
# "As publicly funded organizations, the Agencies have a fundamental interest in promoting the availability of findings that result from the research they fund, including research publications and data, to the widest possible audience, and at the earliest possible opportunity."
#
# </div>
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Policy Statement
# ## Peer-reviewed Journal Publications
#
# Grant recipients are required to ensure that any peer-reviewed journal publications arising from Agency-supported research are freely accessible within 12 months of publication. Recipients can do this through one of the following routes:
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Online Repositories
# - Grant recipients can deposit their final, peer-reviewed manuscript into an institutional or disciplinary repository that will make the manuscript freely accessible within 12 months of publication. It is the responsibility of the grant recipient to determine which publishers allow authors to retain copyright and/or allow authors to archive journal publications in accordance with funding agency policies.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Journals
#
# - Grant recipients can publish in a journal that offers immediate open access or that offers open access on its website within 12 months. Some journals require authors to pay article processing charges (APCs) to make manuscripts freely available upon publication. The cost of publishing in open access journals is an eligible expense under the Use of Grant Funds.
# + [markdown] slideshow={"slide_type": "subslide"}
# These routes to open access are not mutually exclusive. Researchers are strongly encouraged to deposit a copy of the final, peer-reviewed manuscript into an accessible online repository immediately upon publication, even if the article is freely available on the journal’s website.
#
# Grant recipients must acknowledge Agency contributions in all peer-reviewed publications, quoting the funding reference number (e.g. FRN, Application ID).
# + [markdown] slideshow={"slide_type": "slide"}
# # Publication-related Research Data
# ## CIHR only
# ### Recipients of CIHR funding are required to adhere with the following responsibilities:
# - Deposit bioinformatics, atomic, and molecular coordinate data into the appropriate public database (e.g. gene sequences deposited in GenBank) immediately upon publication of research results. Please refer to the Annex for examples of research outputs and the corresponding publicly accessible repository or database.
#
# - Retain original data sets for a minimum of five years after the end of the grant (or longer if other policies apply).This applies to all data, whether published or not. The grant recipient's institution and research ethics board may have additional policies and practices regarding the preservation, retention, and protection of research data that must be respected.
# + [markdown] slideshow={"slide_type": "slide"}
# # How does this policy apply to graduate students and post-doctoral fellows?
#
# Individuals in receipt of graduate scholarships and fellowships are not required to adhere to the Tri-Agency Open Access Policy on Publications, although the Agencies encourage open access to all research publications.
# + [markdown] slideshow={"slide_type": "slide"}
# # Implementation Date
# ## CIHR
#
# - For research funded in whole or in part by CIHR, this policy applies to all grants awarded January 1, 2008 and onward. While not required, researchers holding grants that were awarded prior to January 1, 2008 are encouraged to adhere to the requirements of this policy.
# + [markdown] slideshow={"slide_type": "fragment"}
# ## NSERC and SSHRC
# - For research funded in whole or in part by NSERC or SSHRC, this policy applies to all grants awarded May 1, 2015 and onward. While not required, researchers holding grants that were awarded prior to May 1, 2015 are encouraged to adhere to the requirements of this policy.
# + [markdown] slideshow={"slide_type": "subslide"}
# # Grace periods
# - If your grant was awarded before 2008 (CHIR only) or 2015 (NSERC and SSHRC), you do not have to comply
# - It is unlikely you will be in this situation going forward.
# + [markdown] slideshow={"slide_type": "slide"}
# # Does Tri-Council Really Care?
#
#
# ## If you don't comply with this policy, here is what could happen:
#
#
# - They write you a letter
# - They ask you to fix it
# - They will stop accepting future applications
# - Terminating remaining installments of grant or award
# - Seeking a refund within a defined time frame of all or part of the funds already paid;
# - Advising the researcher that the Agency will not consider him/her to serve on agency committees (e.g. peer review, advisory boards)
# - Such other recourse available by law.
#
# It would be terrible if you lost your grant or worse because you didn't post a paper online.
# + [markdown] slideshow={"slide_type": "slide"}
# # Open access costs a lot of money
#
# <div>
# <h2>Publishers' Open Access article processing charges in USD</h2>
# </br>
# <h3>A few examples dear to home...</h3>
# <li> Journal of Neuroscience \$3,085 USD </li>
# <li> Cell \$5000 USD </li>
#
# <table border="1" cellpadding="1" cellspacing="1" style="width: 500px;"><thead><tr><th scope="col">Publisher</th>
# <th scope="col">APC from</th>
# <th scope="col">APC to</th>
# <th scope="col">Fully OA or Hybrid?</th>
# </tr></thead><tbody><tr><td><a href="http://pubs.acs.org/page/4authors/authorchoice/index.html">American Chemical Society</a></td>
# <td>\$5,000</td>
# <td>\$5,000</td>
# <td>Hybrid</td>
# </tr><tr><td><a href="https://www.biomedcentral.com/about/publication-costs-and-funding">BioMed Central</a></td>
# <td>\$1,290</td>
# <td>\$2,580</td>
# <td>Open Access</td>
# </tr><tr><td><a href="https://www.cambridge.org/core/services/open-access-policies/open-access-journals/gold-open-access-journals">Cambridge University Press</a></td>
# <td>\$600</td>
# <td>\$4,500</td>
# <td>Hybrid</td>
# </tr><tr><td><a href="https://www.elsevier.com/about/company-information/policies/pricing">Elsevier</a></td>
# <td>\$100</td>
# <td>\$5,000</td>
# <td>Hybrid</td>
# </tr><tr><td><a href="https://www.hindawi.com/apc/">Hindawi</a></td>
# <td>\$550</td>
# <td>\$2,250</td>
# <td>Open Access</td>
# </tr><tr><td><a href="http://www.mdpi.com/about/apc#amount-apc">MDPI</a></td>
# <td>CHF 300</td>
# <td>CHF 2,000</td>
# <td>Open Access</td>
# </tr><tr><td><a href="http://www.nature.com/openresearch/publishing-with-npg/nature-journals/">Nature Publishing Group</a></td>
# <td>\$1,100</td>
# <td>\$5,200</td>
# <td>Hybrid</td>
# </tr><tr><td><a href="https://www.plos.org/publication-fees">PLOS</a></td>
# <td>\$1,600</td>
# <td>\$3,000</td>
# <td>Open Access</td>
# </tr><tr><td><a href="https://uk.sagepub.com/en-gb/eur/sage-choice-journal-and-pricing-exceptions">SAGE</a></td>
# <td>\$400</td>
# <td>\$3,000</td>
# <td>Hybrid</td>
# </tr><tr><td><a href="http://www.springer.com/gp/open-access/springer-open-choice">Springer</a></td>
# <td>\$3,000</td>
# <td>\$3,000</td>
# <td>Hybrid</td>
# </tr><tr><td><a href="http://authorservices.taylorandfrancis.com/journal-list/">Taylor & Francis</a></td>
# <td>\$500</td>
# <td>\$2,950</td>
# <td>Open Access</td>
# </tr><tr><td><a href="https://authorservices.wiley.com/author-resources/Journal-Authors/open-access/article-publication-charges.html">Wiley</a></td>
# <td>\$1,300</td>
# <td>\$5,200</td>
# <td>Hybrid</td>
# </tr></tbody></table>
# </div>
#
# - From: https://www.openaccess.cam.ac.uk/paying-open-access/how-much-do-publishers-charge-open-access
#
# - Last updated: October 2018
# + [markdown] slideshow={"slide_type": "slide"}
# # This is crazy! How am I supposed to afford this?
#
# - Win the lottery
# - Get a really big grant
# - Get your fees waived from a journal
# - Use Free Online Repositories
#
# <img src = "img/more_money.jpeg" width=25%>
# + [markdown] slideshow={"slide_type": "subslide"}
# # Fee waivers
# - Do not rely on fee waivers
# - If you cannot afford to buy your journal article, contact the journal before submitting
# - Many journals have fee waiving policies
# - Do your homework
# - Having said that, I've been successfull at getting fees waived several times after submitting the paper
# - Here is how in 3 easy steps:
# 1) Do not have a grant
# 2) Send proof that you do not have grant funding to the journal
# 3) Cross fingers/send good vibes/pray
# - Be prepared to send article to a new journal that does not cost money
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Have a grant but still don't want to pay any money?
#
# ## Say no more fam. I got you!
#
#
# ## Online repositories are free and open access servers for pre and post prints
#
# <img src = "img/braveheart.jpg" width = 50%>
#
# + [markdown] slideshow={"slide_type": "slide"}
# # "Somebody" may have told you that posting your paper online violates copyright law
#
# ### Times have changed
#
# - Most journals have a 12 month embargo period
# - Go look up your journal
# - Most journals exclude preprints
# - Most journals allow publication on private websites (ie those with passwords to log in)
#
# + [markdown] slideshow={"slide_type": "subslide"}
# # Here is Elsevier's policy
#
#
# ## Author Rights for Scholarly Purposes
# I understand that I retain or am hereby granted (without the need to obtain further permission) the Author Rights (see description below), and that no rights in patents, trademarks or other intellectual property rights are transferred to Elsevier Ltd.
# The Author Rights include the right to use the Preprint, Accepted Manuscript and the Published Journal Article for Personal Use and Internal Institutional Use. They also include the right to use these different versions of the Article for Scholarly Sharing purposes, which include sharing:
#
# - the Preprint on any website or repository at any time;
# - the Accepted Manuscript on certain websites and usually after an embargo period;
# - the Published Journal Article only privately on certain websites, unless otherwise agreed by Elsevier Ltd.
# + [markdown] slideshow={"slide_type": "slide"}
# # Preprints vs Postprints
#
# ## Preprints
# <div class="alert alert-danger">
# Read about your journals' policies before posting and/or alerting a journal that you have posted a preprint. You will find that some journals still do not publish articles that have been preprinted. Other journals will not accept citations from preprints.
# </div>
#
#
# - Preprints are posted online before final acceptance from a journal
# - There is no requirement that a preprint has been peer reviewed
# - Often preprints are submitted at the same time that an author submits it to a journal.
# - We will have a session on pre-prints this afternoon
#
#
# ## Postprints
# Posting your paper online after it is accepted means it's a postprint.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## How to decide between preprint and postprint?
# - Is your paper accepted in final form already?
# - You need a postprint
# - Is your paper unpublished?
# - You could do either
# + [markdown] slideshow={"slide_type": "slide"}
# # Online Repositories
#
# ## Here is the list of websites that can help you find repositories where you could postprint your papers (from science.gc.ca)
#
#
# <p><b>  Repositories</b></p>
# <ul>
# <li><a href="http://www.carl-abrc.ca/advancing-research/institutional-repositories/">Canadian Institutional Repositories</a> – Domestic information on publicly available archives or repositories</li>
# <li><a href="http://v2.sherpa.ac.uk/opendoar/">Directory of Open Access Repositories</a> – International database of repositories</li>
# <li><a href="https://depot.erudit.org/?locale=en">Erudit</a> – French Language repository</li>
# </ul>
# + [markdown] slideshow={"slide_type": "subslide"}
# <p><b>  Open Access Resources and Directories</b></p>
#
# <ul>
# <li><a href="http://scienceetbiencommun.org/?q=node/22">Association science et bien commun</a> (available in French only) – Resources on how to promote open access to research results.</li>
# <li><a href="http://www.base-search.net/">Bielefeld Academic Search Engine (BASE)</a> – A search engine for academic open access web resources.</li>
# <li><a href="http://www.carl-abrc.ca/advancing-research/scholarly-communication/open-access/" title="This link opens in a new window">Canadian Association of Research Libraries</a> (CARL) – Resources on how to promote open access to research.</li>
# <li>CARL/ SPARC <a href="http://www.carl-abrc.ca/how-to-assess-a-journal/">Brochure</a> and <a href="http://www.carl-abrc.ca/doc/EngPubAgree.pdf">Canadian Author Addendum</a></li>
# <li><a href="http://www.doaj.org/">Directory of Open Access Journals</a> (DOAJ) - A comprehensive list of free, full text, quality controlled scientific and scholarly journals.</li>
# <li><a href="http://www.opendoar.org/">Directory of Open Access Repositories (OpenDOAR)</a> - Directory of academic open access repositories</li>
# <li><a href="http://www.sherpa.ac.uk/juliet/">SHERPA/JULIET</a> - Summaries of research funder open access policies.</li>
# <li><a href="http://www.sherpa.ac.uk/romeo.php">SHERPA/RoMEO</a> - Provides a searchable database of publisher policies on copyright and archiving</li>
# </ul>
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ### McMaster provides MacSphere for postprints and all other open access article types
# More on MacSphere next session
# + [markdown] slideshow={"slide_type": "slide"}
# # Here is a real list of places you can post postprints
#
# You would need to submit the ugly word doc final accepted version, not the pretty proofed/journal formatted version
#
# - Your own website
# - Your co-author's institutional repository
# - <a href = "https://macsphere.mcmaster.ca/">Mac Sphere</a>
# - The info session on Mac Sphere is up next.
# - Sites like <a href = "academia.edu">academia.edu</a>
#
#
#
# ### The real truth is that you could post your postprint anywhere it is allowed and publically searchable.
# You could even make youtube videos of you holding up a word doc of your paper and read it aloud.
# + [markdown] slideshow={"slide_type": "slide"}
# # Online Repositories
# ## Here is the list of repositories where you could post your preprints of your papers :
#
# - OSF Preprints
# - Selected Open Science Framework Partner Repositories:
# - PsyArxiv
# - bioRxiv
# - MediArXiv
# - MetaArXiv
# - PeerJ
# - Preprints.org
# - PsyArXiv
# - SocArXiv
# - <a href = "https://macsphere.mcmaster.ca/">Mac Sphere</a>
# - The info session on Mac Sphere is up next.
# + [markdown] slideshow={"slide_type": "slide"}
# # Where can I post my data for CHIR?
#
# - <a href="http://oad.simmons.edu/oadwiki/Data_repositories">Simmons list of open access Data Repositories</a>
# - <a href="https://data.mendeley.com/">Mendelay -owned by Elsevier</a>
# - <a href="http://www.science.gc.ca/eic/site/063.nsf/eng/h_94D49094.html">Check the Annex</a>
# + [markdown] slideshow={"slide_type": "slide"}
# # Questions and discussion
| 01-Tri-Council-Open-Access.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="I95S5GeUMsc-"
# # the imports in this cell are required when running on local device
# import os, sys
# sys.path.append(os.path.join('..', '..'))
# from utils.applyML_util import train_classification, eval_classification
# from utils.featureSelection_util import (pearson_correlation_fs,
# seleckKBest_fs, selectSequential_fs)
# + id="oxumaP1_Oxv4"
# the imports in this cell are required when running from Cloud (Colab/Kaggle)
# before running on cloud you nee to upload the .py files
# from 'Notebooks/utils' directory
from applyML_util import train_classification, eval_classification, showEvalutationGraph_classification
from featureSelection_util import (pearson_correlation_fs,
seleckKBest_fs, selectSequential_fs)
# + [markdown] id="UJ4-Bu_iQt6r"
# **MLP (aka Artificial Neural Networks) Documentation link:** https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
# + id="1tVvVKjkQsmG"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
# + id="K0b41I0RRkkE"
# global random seed
RAND_SEED = 42
# initial model with only random seed and not any hyper-parametes
initial_model = MLPClassifier(random_state=RAND_SEED)
# hyper-parameters
hidden_layer_sizes = [(32, 16), (100)]
activation = ['relu']
solver = ['adam']
alpha = [0.0001, 0.001, 0.01]
max_iter = [x*5 for x in range(1, 41)]
# dictonary of all hyperparameters
param_grid = {
'hidden_layer_sizes': hidden_layer_sizes,
'activation': activation,
'solver': solver,
'alpha': alpha,
'max_iter': max_iter
}
# variables needed for showEvalGraph_regression() function
MODEL_CLASS = MLPClassifier
class_label = 'Rainfall'
x_axis_param_name = 'max_iter'
x_axis_param_vals = max_iter
# + [markdown] id="sMvRYb6AQLj9"
# ## 1. Experimentation on the Weather Daily Dataset
# + id="bNmGnNV1QPE8"
# Load the train dataset
weather_daily_train_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/train/brri-weather_train_classification.csv')
# Load the test set
weather_daily_test_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/test/brri-weather_test_classification.csv')
# + [markdown] id="z0BMd6EBK6PQ"
# ### 1.0 No technique
# + colab={"base_uri": "https://localhost:8080/"} id="N8lrGQEjMmhO" outputId="33c11ba0-13d9-4d42-aeb8-5e5e2b2b5380"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label)
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + colab={"base_uri": "https://localhost:8080/", "height": 403} id="_D5Pz_IM2dUj" outputId="ad0d4d20-b601-47c0-9b52-4195356433e1"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + colab={"base_uri": "https://localhost:8080/"} id="fj1I73i2WBYF" outputId="17d313fa-cf48-438c-dfc1-381a3efc09e3"
# test model
test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
# + [markdown] id="VcGsKgTkDS60"
# ### 1.1 Apply Pearson Feature Selection to Daily Weather Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="xUGFfg6FDSB3" outputId="cc24bef8-6bea-44bb-c6dc-5672db7445e8"
# select features from the train dataset
weather_daily_fs1_train_df, cols_to_drop = pearson_correlation_fs(weather_daily_train_df, class_label)
# keep only selected features on the test dataset
weather_daily_fs1_test_df = weather_daily_test_df.drop(columns=cols_to_drop)
# + colab={"base_uri": "https://localhost:8080/"} id="Z4Aj4bDCEBFE" outputId="f3fc703a-2532-494e-8719-9cbebcc503a9"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df, cls=class_label)
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + colab={"base_uri": "https://localhost:8080/", "height": 408} id="Wnlx9lRX6cOT" outputId="5b58f001-817b-40e1-9577-45a5459ee7df"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + colab={"base_uri": "https://localhost:8080/"} id="Q56t8VALEOLV" outputId="611313dc-9ad5-4876-ee0b-2da1b22d594a"
# test model
test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs1_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
# + [markdown] id="r0f0shnaTaEd"
# ### 1.2 Apply SelectKBest Feature Selection to Daily Weather Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="aP0zT8cDTaEe" outputId="bec36b8a-7206-4a25-da4f-d7abb2779907"
# select features from the train dataset
weather_daily_fs2_train_df, cols_to_drop = seleckKBest_fs(weather_daily_train_df, class_label, is_regression=False)
print('features dropped:', cols_to_drop)
# keep only selected features on the test dataset
weather_daily_fs2_test_df = weather_daily_test_df.drop(columns=cols_to_drop)
# + colab={"base_uri": "https://localhost:8080/"} id="obBlfL4DTaEg" outputId="ed41bea9-bb8b-4cd9-fdcc-aab794775571"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df, cls=class_label)
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + colab={"base_uri": "https://localhost:8080/", "height": 408} id="FmIkmj59TaEh" outputId="2760d0f0-452d-41cf-a255-f767009e045f"
# r2-scores graph on the train set
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + colab={"base_uri": "https://localhost:8080/"} id="DFeHpfLrTaEh" outputId="5ff6a496-0c21-4184-bac0-1a11b418111e"
# test model
test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs2_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
# + [markdown] id="uPWtfgzLLYJI"
# ### 1.3 SMOTE on Daily Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="9Q9UhID9LYJI" outputId="7fcc4b4a-f6b1-4526-a313-084641d197fd"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df,
cls=class_label, sampling_technique='smote')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="sdm9243wLYJJ"
# r2-scores graph on the train set
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + colab={"base_uri": "https://localhost:8080/"} id="y8RoOykDLYJK" outputId="3da6a6ea-b892-43da-a90e-a8ec25988d6f"
# test model
test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
# + [markdown] id="tCwFqnH6Lq17"
# ### 1.4 Random Undersampling + SMOTE on Daily Dataset
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="lTdi5bbLLq17" outputId="6d4b9316-8685-42db-f982-bc1acd52f5bf"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df,
cls=class_label, sampling_technique='hybrid')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="76m-rcuvLq18"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + colab={"background_save": true} id="Z5o9BQOHLq18" outputId="9cccb139-ebfd-4b12-c267-5ae375b6c108"
# test model
test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
# + [markdown] id="XVMCfgbKahtE"
# ### 1.5 Pearson Feature Selection + Hybrid Sampling to Daily Weather Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="ck48Q9VQahtN" outputId="ebac768e-4c02-4e66-e9dd-8005f7aed3f7"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df,
cls=class_label, sampling_technique='hybrid')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="ioTeYyewahtN"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="yKfAZyE_ahtN" colab={"base_uri": "https://localhost:8080/"} outputId="e6734c12-5939-49cc-b2fe-a6460c9c4969"
# test model
test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs1_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
# + [markdown] id="FSPsOKznhZlT"
# ### 1.6 SelecKBest Feature Selection + Hybrid Sampling to Daily Weather Dataset
# + id="i-ZIHmixhZlV" colab={"base_uri": "https://localhost:8080/"} outputId="cf1611eb-d1a1-4d14-ff52-dff70dbd8c70"
# train model
model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df,
cls=class_label, sampling_technique='hybrid')
print(f'Selected hyperparameters: {selected_hyperparams}')
# performance on the train set
print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}')
# + id="d0CpcJB_hZlV"
# graph on train set performance
# hyper-parameters selected by GridSearchCV
selected_model_params = selected_hyperparams
selected_model_params['random_state'] = RAND_SEED
showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label,
x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals,
selected_model_params=selected_model_params)
# + id="sBlMerD3hZlW" colab={"base_uri": "https://localhost:8080/"} outputId="31c27ba2-90ed-4c2c-edb8-211f8af2115b"
# test model
test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs2_test_df, cls=class_label)
# performance on the test set
print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
| Notebooks/brri-dataset/experimentations/classification/selected_algorithms/mlp_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (spark)
# language: python
# name: spark
# ---
# # Cilia Data Analysis
# This file will try to shed light on cilia data
# %matplotlib inline
import cv2
import os
import sys
import numpy as np
import glob
import matplotlib.pyplot as plt
samplehash = '10a278dc5ebd2b93e1572a136578f9dbe84d10157cc6cca178c339d9ca762c52' #'7fafc640d446cab1872e4376b5c2649f8c67e658b3fc89d2bced3b47c929e608'#
files = sorted(glob.glob( "../data/train/data/" + samplehash + "/frame*.png" ))
images = [ cv2.imread(x,0) for x in files ]
len(images)
mask = cv2.imread( "../data/train/data/" + samplehash + "/mask.png" ,0 )
#
plt.figure()
plt.subplot(1,2,1)
plt.imshow( mask*127, cmap='gray' )
plt.subplot(1,2,2)
plt.imshow(images[0], cmap='gray')
images = np.array(images)
variances = np.var( images , axis=0)
plt.figure()
plt.imshow( variances , cmap='hot' , interpolation='nearest')
plt.plot( plt.hist(variances )[1] )
predicted_mask = np.logical_and( variances<100 , variances>10)
plt.figure()
plt.imshow( predicted_mask.astype(int)*255 )
kernel = np.ones((9,9),np.float32)/81
dst = cv2.filter2D(variances,-1,kernel)
plt.figure()
plt.imshow( dst , interpolation='nearest')
| analysis/cilia_datacheck.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ampehta/color-bert/blob/main/GCP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="qRMvFREhh10q"
# ##GCP 연결된 계정 인증
# + id="3BwNgZhLhtlR"
from google.colab import auth
auth.authenticate_user()
# + [markdown] id="338nWuHth6Cp"
# ## 모델 GCP에 저장하기
# + id="PitethGqiFqH"
PROJECT = "muhwagua" #@param {type:"string"}
BUCKET = "gs://colorbert_models" #@param {type:"string", default:"jddj"}
MODEL_NAME = "ColorBert_Tuned" #@param {type:"string"}
MODEL_VERSION = "LR.ColorRatio.Epoch" #@param {type:"string"}
# + id="oG_HVauPh5Rc"
import os
serving_model = MODEL #여기에 저장하려는 모델
export_path = os.path.join(BUCKET, MODEL_NAME, MDOEL_VERSION)
tf.saved_model.save(serving_model, export_path)
# + [markdown] id="o0DQvBJkkEcJ"
# ## GCP에서 모델 불러오기
# + id="buZQTZHDkDh9"
import tensorflow as tf
export_dir ='gs://colorbert_models/keras_export/1616858989.903349' #요것만 바꿔주면 됨
tf.saved_model.load(
export_dir, tags=None, options=None
)
| GCP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import six
import time
import os
from tqdm import tqdm
import pandas as pd
import bs4
import random
import string
# -
# # Load raw scraped dataset
scraped_data = pd.read_pickle('data/scraped/scrape.pickle.gz', compression='gzip')
print(scraped_data.shape)
df = scraped_data.reset_index()
df[df['url'].str.contains('Trapiche-Falling-Star-Ch')]
scraped_data.head()
# # Load scraped descriptions
# +
descriptions = pd.read_csv(
"data/scraped/descriptions.csv.gz",
sep='|',
header=None,
names=['URL_name', 'desc_raw'],
encoding='latin1',
compression='gzip')
descriptions['description'] = descriptions['desc_raw'].apply(
lambda x: bs4.BeautifulSoup(str(x), 'lxml').get_text()
)
descriptions.drop('desc_raw', axis=1, inplace=True)
print(descriptions.shape)
# -
descriptions.head(3)
# # Clean up descriptions
descriptions['description'] = descriptions['description'].apply(
lambda x: x.lstrip().rstrip()
)
descriptions.iloc[1,:]
# # Merge datasets
# +
scraped_data.drop_duplicates(inplace=True)
descriptions.drop_duplicates(inplace=True)
combined = pd.merge(
left=scraped_data,
right=descriptions,
how='left',
left_on='url',
right_on='URL_name'
)
print(combined.shape)
combined.drop_duplicates(subset='name',inplace=True)
combined.reset_index(drop=True, inplace=True)
print(combined.shape)
# -
# # Save
combined.to_pickle('data/scraped/scraped_with_decs.pickle.gzip', compression='gzip')
| notebooks/2a_clean_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:python3]
# language: python
# name: conda-env-python3-py
# ---
# # Preparation
# %run "../Functions/1. Game sessions.ipynb"
import unidecode
# # Tests
# # Tinkering
# +
accented_string = "Enormément"
# accented_string is of type 'unicode'
unaccented_string = unidecode.unidecode(accented_string)
unaccented_string
# unaccented_string contains 'Malaga'and is of type 'str'
# -
# #### getUserSessions tinkering
_rmDF = rmdf1522
userId = '8829514a-cb9f-47fb-aaeb-3167776f1062'
#userId = getRandomRedMetricsGUID(_rmDF)
#def getUserSessions( _rmDF, userId):
result = _rmDF.loc[:,['userId','sessionId']][_rmDF['userId']==userId]['sessionId'].drop_duplicates().dropna(how='any')
result
_sessionIndex = randint(0,len(result)-1)
_guid = result.iloc[_sessionIndex]
_guid
userId
# #### getTranslatedForm tinkering - from 0.4 GF correct answers
# questionsAnswersTranslationsFR.T
# questionsAnswersTranslationsFR.loc["Are you interested in video games?"]
# questionsAnswersTranslationsFR.loc["Do you play video games?"]
# localizedFormFR = gformFR
#
# # returns an English-indexed, English-localized answer dataframe
# # from a French-indexed, French-localized dataframe
# #def getTranslatedForm( localizedFormFR ):
# result = localizedFormFR.copy()
#
# # translate answers
# for question in result.columns:
# for index in result.index:
# answer = result.loc[index, question]
# if (0 != len(questionsAnswersTranslationsFR.loc[question])):
# if (answer in questionsAnswersTranslationsFR.loc[question]):
# result.loc[index, question] =\
# questionsAnswersTranslationsFR.loc[question][answer]
# else:
# print(question)
# #print(index)
# print(answer)
# print(questionsAnswersTranslationsFR.loc[question])
# print()
# print()
# print()
#
# # translate questions
# result = result.rename(columns=dict(zip(localizedFormFR.columns,gformEN.columns)))
#
# result.T
# len(questionsAnswersTranslationsFR.loc[QTimestamp])
# getTranslatedForm( gformFR, questionsAnswersTranslationsFR ).iloc[1]
# ### getRandomRedMetricsGUID tinkering
from random import randint
uniqueUsers = rmdf1522['userId'].dropna().unique()
userCount = len(uniqueUsers)
testlocalplayerguid = '0'
while (not isGUIDFormat(testlocalplayerguid)):
userIndex = randint(0,userCount-1)
testlocalplayerguid = uniqueUsers[userIndex]
testlocalplayerguid
sessionscount = rmdf1522["sessionId"].nunique()
sessionscount
platforms = rmdf1522["customData.platform"].unique()
platforms
# print("part100="+str(part100.head(1)))
# print("part131="+str(part131.head(1)))
# print("part132="+str(part132.head(1)))
# print("part133="+str(part133.head(1)))
# print("part140="+str(part140.head(1)))
# print("part150="+str(part150.head(1)))
# print("part151="+str(part151.head(1)))
# print("part152="+str(part152.head(1)))
# print("df="+str(df.head(1)))
testGUID = '"4dbc2f43-421c-4e23-85d4-f17723ff8c66"'
# includewithoutusers=True will count sessions that do not have any userId attached
getSessionsCount( rmdf1522, testGUID)
# print("part100="+str(part100.columns))
# print("part131="+str(part131.columns))
# print("part132="+str(part132.columns))
# print("part133="+str(part133.columns))
# print("part140="+str(part140.columns))
# print("part150="+str(part150.columns))
# print("part151="+str(part151.columns))
# print("part152="+str(part152.columns))
#
# print("dfconcat="+str(dfconcat.columns))
#
# print("df="+str(df.columns))
# df.columns
sessionsList = getUserSessions(rmdf1522, testGUID)
sessionsList
sessionsList = rmdf1522[rmdf1522['type']=='start']
sessionsList = sessionsList.drop('type', 1)
sessionsList = sessionsList.dropna(how='any')
userSessionsList = sessionsList[sessionsList['userId']==testGUID]
userSessionsList
#print(testGUID)
sessionsList = getUserSessions(rmdf1522, testGUID)
#sessionsList = getAllSessions(rmdf1522, testGUID.replace('"',''))
#print(type(sessionsList))
sessionsList.shape[0]
allSessions = rmdf1522.loc[:,['userId', 'sessionId']].drop_duplicates()
allSessions.head()
allSessions.groupby('userId').size().reset_index(name='counts').sort_values(by='counts', ascending=False).head(10)
# +
#getUserSessionsCounts(getNormalizedRedMetricsCSV(part152)).head(10)
# -
allSessions.groupby('userId').agg(['count']).head() #.sort_values(by='sessionId', ascending=False).head(10)
# +
#df2 = pd.concat([df151, rmdf1522])
#df2.head(2)
#print(df2.columns)
#df2columns = df2.columns.values
#type(df2columns)
#df2columns
#newColumns = np.concatenate((minimalInitializationColumns, df2columns))
#newColumns
#df2 = getNormalizedRedMetricsCSV(df)
# -
# #### getRandomSessionGUID tinkering
getRandomSessionGUID()
_userId = '"e8fed737-7c65-49c8-bf84-f8ae71c094f8"'
type(rmdf1522['userId'].dropna().unique()), type(getUserSessions( rmdf1522, _userId ))
_userId = 'e8fed737-7c65-49c8-bf84-f8ae71c094f8'
_uniqueSessions = getUserSessions( rmdf1522, _userId )
len(_uniqueSessions)
_uniqueSessions
# +
#_userId = ''
_userId = '"e8fed737-7c65-49c8-bf84-f8ae71c094f8"'
#def getRandomSessionGUID( _userId = '' ):
rmId = _userId
if( not(isGUIDFormat(_userId))):
rmId = getRandomRedMetricsGUID()
_uniqueSessions = getUserSessions( rmdf1522, rmId )
_sessionsCount = len(_uniqueSessions)
_guid = ""
_sessionIndex = randint(0,_sessionsCount-1)
_guid = _uniqueSessions.iloc[_sessionIndex]
_guid
# -
rmId
_sessionIndex
_sessionsCount
randint(0,0)
_uniqueSessions
getRandomSessionGUID()
# #### getFirstEventDate tinkering
# +
userId = testGUID
userId = getRandomRedMetricsGUID()
#print('----------------------uid='+str(uid)+'----------------------')
sessions = getUserSessions(rmdf1522, userId)
firstGameTime = pd.to_datetime('2050-12-31T12:59:59.000Z', utc=True)
for session in sessions:
#print('-----------------------------------------session='+str(session))
timedEvents = rmdf1522[rmdf1522['sessionId']==session]
timedEvents = timedEvents.dropna(subset=['section'])
if(len(timedEvents) > 0):
timedEvents['userTime'] = timedEvents['userTime'].map(lambda t: pd.to_datetime(t, utc=True))
earliest = timedEvents['userTime'].min()
if(earliest < firstGameTime):
firstGameTime = earliest
#else:
#print('no event with section')
#print('-----------------------------------------')
print("firstGameTime=" + str(firstGameTime))
# -
rmdf1522[rmdf1522['userId']==userId]
sessions = getUserSessions(rmdf1522, userId)
sessions
| v1.52.2/Tests/1.1 Game sessions tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Loading and displaying an image
from PIL import Image
img = Image.open('/home/dvveera/db/orl/s1/1.pgm')
print(img.format, img.size, img.mode)
display(img)
# +
# Creating training, validation and test datasets
import os
import shutil
def copy_data(dir_s, fname):
os.chdir(dir_s + '/' + fname)
dir_train = dir_s + '/train/' + fname
dir_valid = dir_s + '/valid/' + fname
dir_test = dir_s + '/test/' + fname
if not os.path.exists(dir_train):
os.makedirs(dir_train)
if not os.path.exists(dir_valid):
os.makedirs(dir_valid)
if not os.path.exists(dir_test):
os.makedirs(dir_test)
i = 1
for filename in os.listdir(dir_s + '/' + fname):
if i <= 6:
shutil.copy(dir_s + '/' + fname + '/' + str(i) + '.pgm', dir_train)
elif i <= 8:
shutil.copy(dir_s + '/' + fname + '/' + str(i) + '.pgm', dir_valid)
else:
shutil.copy(dir_s + '/' + fname + '/' + str(i) + '.pgm', dir_test)
i = i + 1
dir_src = '/home/dvveera/db/orl'
os.chdir(dir_src)
for filename in os.listdir(dir_src):
if filename.startswith('s'):
copy_data(dir_src, filename)
print('Separated Training, Validation and Test Data')
# +
# Assign Labels
def assign_addr_label(path):
dirs = os.listdir(path)
addrs = []
labels = []
for d in dirs:
os.chdir(path + '/' + d)
addr = glob.glob(path + '/' + d + '/' + '*')
label = [d for i in range(len(addr))]
addrs.extend(addr)
labels.extend(label)
return list(zip(addrs, labels))
import glob
train_path = '/home/dvveera/db/orl/train'
train_data = assign_addr_label(train_path)
train_addrs, train_labels = zip(*train_data)
valid_path = '/home/dvveera/db/orl/valid'
valid_data = assign_addr_label(valid_path)
valid_addrs, valid_labels = zip(*valid_data)
test_path = '/home/dvveera/db/orl/test'
test_data = assign_addr_label(test_path)
test_addrs, test_labels = zip(*test_data)
print('Assigned Labels to Training, Validation and Test Data')
# -
# Load images and save them
import numpy as np
train_storage = []
valid_storage = []
test_storage = []
mean_storage = []
# Training Images
for i in range(len(train_addrs)):
if i % 20 == 0 and i > 1:
print('Train data: {}/{}'.format(i, len(train_addrs)))
addr = train_addrs[i]
img = Image.open(addr)
img = np.array(img)
train_storage.append(img)
# print(np.array(img))
mean_storage.append(np.sum(img) / float(len(train_labels)))
# print(train_storage)
# Validation Images
for i in range(len(valid_addrs)):
if i % 20 == 0 and i > 1:
print('Valid data: {}/{}'.format(i, len(valid_addrs)))
addr = valid_addrs[i]
img = Image.open(addr)
img = np.array(img)
valid_storage.append(img)
# print(np.array(img))
# print(valid_storage)
# Test Images
for i in range(len(test_addrs)):
if i % 20 == 0 and i > 1:
print('Test data: {}/{}'.format(i, len(test_addrs)))
addr = test_addrs[i]
img = Image.open(addr)
img = np.array(img)
test_storage.append(img)
# print(np.array(img))
# print(test_storage)
# Combine data and labels
train_set = list(zip(train_storage, train_labels))
valid_set = list(zip(valid_storage, valid_labels))
test_set = list(zip(test_storage, test_labels))
# Linear Discriminant Analysis
from collections import Counter
# Converting images to a one-dimensional vector
train_1d = [t.flatten() for t in train_storage]
# Calculate the mean of images in each class
label_dict = Counter(train_labels)
mean = {}
for idx, t in enumerate(train_1d):
lab = train_labels[idx]
if lab not in mean:
mean[lab] = t / label_dict[lab]
else:
mean[lab] = (mean[lab] + t) / label_dict[lab]
print('Calculated the means of the images in each class')
# print(mean)
# Calculate in class scatter matrix
S = {}
for idx, t in enumerate(train_1d):
lab = train_labels[idx]
if lab not in S:
S[lab] = np.dot(np.transpose(t - mean[lab]), (t - mean[lab]))
else:
S[lab] += np.dot(np.transpose(t - mean[lab]), (t - mean[lab]))
print('Calculated the in class scatter matrix for each class')
print(S)
S = {}
P = {}
lab = 's29'
S[lab] = np.matmul((train_1d[0] - mean[lab]), np.transpose(train_1d[0] - mean[lab]))
tr = np.transpose()
print(np.transpose(), (train_1d[0] - mean[lab]), S)
P[lab] = np.dot((train_1d[0] - mean[lab]), (train_1d[0] - mean[lab]))
print(train_1d[0], mean[lab], P)
# # Create data matrix
# data_matrix = np.transpose(train_center)
# # print(np.transpose(train_center))
# # Create covariance matrix
# covariance = np.dot(data_matrix, np.transpose(data_matrix))
# # Check for symmetricity
# if(covariance.all() == np.transpose(covariance).all()):
# print('Symmetric')
| Kaustubh/lda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3 (data science)
# language: python
# name: ds
# ---
# # Homework: scikit-image
# ## Counting objects
#
# In class, we saw how to count the number of objects in a microscopy image. Here, we will repeat that exercise, but make use of some of the algorithms in `scikit-image` to segment the image, and then to determine properties of the resulting objects.
#
# As input, use the image `skimage.data.coins`. Segment the image, and then calculate the area and eccentricity of each coin.
#
# There are various ways to do this. One would be:
#
# 1. Equalize the input image (see `skimage.exposure`)
# 2. Threshold the image (`skimage.filters.otsu`)
# 3. Remove objects touching the boundary (`skimage.segmentation.clear_border`)
# 4. Apply morphological closing (`skimage.morphology.closing`)
# 5. Remove small objects (`skimage.measure.regionprops`).
# Visualize the results if you want with `skimage.color.label2rgb`.
# 7. Calculate the area and eccentricity of each coin, and display the
# original image with this information on it (`matplotlib.pyplot.text` or `matplotlib.pyplot.annotate`)
# ## Panorama stitching
#
# One of the scikit-image tutorials shows [how to do panorama stitching](https://github.com/scikit-image/skimage-tutorials/blob/master/lectures/solutions/adv3_panorama-stitching-solution.ipynb).
#
# Take 3 or 4 overlapping photos of your own, and use the procedure described to stitch your own panorama.
# ## Extra: Image Stacking
#
# Reprocess one of the datasets from http://www.rawastrodata.com/. See http://www.rawastrodata.com/pages/typesofimages.html for a description of the different kind of images.
# ## Counting objects
# +
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
# %matplotlib inline
from skimage.filters import sobel
from scipy import ndimage as ndi
from skimage.measure import regionprops
from skimage.color import label2rgb
from skimage.morphology import watershed
# -
# I follow the scikit-image tutorial on segmentation:
#
# http://scikit-image.org/docs/stable/user_guide/tutorial_segmentation.html
# +
# import coin image
coins = data.coins()
# use amplitude of gradient to construct an elevation map
elevation_map = sobel(coins)
# choose markers from extreme parts of histogram of grey value
markers = np.zeros_like(coins)
markers[coins < 30] = 1
markers[coins > 160] = 2
# use watershed to obtain segmentation
segmentation = watershed(elevation_map, markers)
# fill holes in segments
segmentation = ndi.binary_fill_holes(segmentation - 1)
# label coins
labeled_coins, _ = ndi.label(segmentation)
# +
# overlay coins with color labels
coin_label_overlay = label2rgb(labeled_coins-1, image=coins)
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.imshow(coin_label_overlay)
ax.axis('off');
for region in regionprops(labeled_coins):
# skip small areas
if region.area > 100:
minr, minc, maxr, maxc = region.bbox
annot = "Area={0}\n Ecc={1:.2g}".format(region.area, region.eccentricity)
ax.text(minc-5, minr+5, annot, color="white")
# -
# ## Panorama stitching
# See `panoroma-stitching.ipynb` for detail.
#
# <img src='images/pano-output.png' >
| hw_8/hw_8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/koiralakp5/STEM_KRISHNA/blob/main/Untitled0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="eAhBey3dlYih" outputId="bd01f707-c387-4a50-e462-628313eee657"
# We need to import the numpy package
import numpy as np
# We define the two vectors
a_vector = [1,1,2]
b_vector = [0,3,1]
# calculate the lengths of the vectors
a_length = np.linalg.norm(a_vector)
b_length = np.linalg.norm(b_vector)
#now the angle between vectors a and b according to above formula
theta = np.arccos(np.dot(a_vector, b_vector) / (a_length * b_length) )
print(f'The angle between vectors a and b is {np.degrees(theta):.2f}°' )
# + id="-8yRE2-Rl5Ni"
| Untitled0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
m=int(input('请输入一个整数m: '))
n=int(input('请输入一个小于m的整数n: '))
x=int(input('1为求和,2为求积,3为求余数,4为整除: '))
if x==1:
y=0
while n<=m:
y=total+n
n=n+1
if x==2:
y=1
while n<=m:
y*=n
n+=1
if x==3:
y=m%n
if x==4:
y=m//n
print(y)
# +
AQI=int(input('空气质量指数为: '))
if 0<=AQI<=50 :
print('空气质量为优,进行各项户外活动均佳。')
if 50<=AQI<=100 :
print('空气质量为良,建议极少数异常敏感人群应减少户外活动。')
if 100<=AQI<=150 :
print('空气质量轻度污染,建议儿童、老年人及心脏病、呼吸系统疾病患者应减少长时间、高强度的户外锻炼。')
if 150<=AQI<=200 :
print('空气质量中度污染,敏感人群避免长时间、高强度的户外锻练,一般人群适量减少户外运动。')
if 200<=AQI<=250 :
print('空气质量重度污染,敏感人群应停留在室内,停止户外运动,一般人群减少户外运动,外出请佩戴口罩。')
if 250<=AQI<=300 :
print('空气质量严重污染,建议儿童、老年人和病人应当留在室内,避免体力消耗,减少户外活动,室内打开空气净化机,外出请佩戴口罩。')
# -
i=0
print('......')
while i<10:
print(' ')
i+=1
print('是这样的空行吗?')
# +
n=int(input('请输入你想输入的整数个数'))
max1=int(input('请输入一个整数:'))
max2=int(int(input('请输入一个整数:')))
if max1>max2:
max1=temp
max2=max1
temp=max2
i=0
while i<n-2:
x=int(input('请输入一个整数: '))
if max2<x and x<max1:
max2=x
if x>max1:
max2=max1
max1=x
x+=1
print('第二大的数为: ',max2)
| chapter1/homework/computer/3-22/201611680805 (3).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Tutorial 2 for R
#
# ## Make a scenario of Dantzig's Transport Problem using the *ix modeling platform* (ixmp)
#
# <img style="float: right; height: 80px;" src="_static/R_logo.png">
#
# ### Aim and scope of the tutorial
#
# This tutorial uses teh transport problem scenario developed in the first tutorial and illustrates how the ``ixmp`` framework can be applied for scenario analysis in the sense often used in economic or environmental modeling: develop a baseline, create a clone from the baseline with altered parameters or assumptions, and solve the new model. Then, compare the results from the original and altered scenario versions.
#
# In particular, this tutorial will take you through the following steps:
# 0. Launch an ``ixmp.Platform`` instance and retrieve the ``ixmp.Scenario`` instance of Dantzig's transport problem
# 0. Retrieve some data from the scenario for illustration of filters
# 0. Make a clone of the baseline scenario, then check out the clone and make changes:
# in this case, add a new demand location and transport costs to that city
# 0. Solve the new scenario
# 0. Display the solution of both the baseline and the new scenario
# ### Launching the platform, loading and cloning an existing datastructure
#
# This datastructure will be used to clone an existing scenario and, after varying some input parameters, to solve again the Dantzig's transport problem as well as the solution after solving it in GAMS.
# load the rixmp package source code
library("retixmp")
ixmp <- import('ixmp')
# launch the ix modeling platform using the local default database
mp <- ixmp$Platform(dbtype="HSQLDB")
# +
scen_list <- mp$scenario_list()
scen_list
# TODO: the conversion of the Java output of the `scenario_list()` function to a clean R dataframe is not yet implemented
# -
# details for loading an existing datastructure from the IX modeling platform
model <- "transport problem"
scenario <- "standard"
# load the default version scenario from the first tutorial
scen <- mp$Scenario(model, scenario)
# ### Retrieve some data from the scenario for illustration of filters
#
# Before cloning a scenario and editing data, this section illustrates two-and-a-half methods to retrieve data for a parameter from a scenario.
# load the distance parameter
d = scen$par("d")
d
# show only the distances for connections from Seattle
d[d['i'] == "seattle",]
# +
# for faster access or more complex filtering,
# it may be easier to only load specific parameter elements using a dictionary
ele_filter = {}
ele_filter$i = c('seattle')
ele_filter$j = c('chicago', 'topeka')
d_filtered = scen$par("d", ele_filter)
d_filtered
# -
# ### Make a clone of the baseline scenario, then check out the clone and edit the scenario
#
# For illustration of a scenario analysis workflow, we add a new demand location ``detroit`` and add a demand level and transport costs to that city.
# Because the production capacity does not allow much slack for increased production, we also reduce the demand level in ``chicago``.
# create a new scenario by cloning the datastructure (without keeping the solution)
scen_detroit <- scen$clone(model, 'detroit', annotation='extend the Transport problem by a new city', keep_sol=FALSE)
# check out the datastructure to make changes
scen_detroit$check_out()
# +
# reduce demand
scen_detroit$add_par('b', 'chicago', 200, 'cases')
# add a new city with demand and distances
scen_detroit$add_set('j', 'detroit')
scen_detroit$add_par('b', 'detroit', 150, 'cases')
scen_detroit$add_par('d', c('seattle', 'detroit'), 1.7, 'cases')
scen_detroit$add_par('d', c('san-diego', 'detroit'), 1.9, 'cases')
# -
# save changes to database
comment = "add new city 'detroit' with demand, reduce demand in 'chicago'"
scen_detroit$commit(comment)
scen_detroit$set_as_default()
# ### Solve the new scenario
scen_detroit$solve(model='transport_ixmp')
# ### Display and analyze the results
#
# For comparison between the baseline scenario, i.e., the original transport problem, and the "detroit" scenario, we show the solution for both cases.
# display the objective value of the solution in the baseline scenario
scen$var("z")
# display the objective value of the solution in the "detroit" scenario
scen_detroit$var("z")
# display the quantities transported from canning plants to demand locations in the baseline scenario
scen$var("x")
# display the quantities transported from canning plants to demand locations in the "detroit" scenario
scen_detroit$var("x")
# display the quantities and marginals (=shadow prices) of the demand balance constraints in the baseline scenario
scen$equ("demand")
# display the quantities and marginals (=shadow prices) of the demand balance constraints in the "detroit" scenario
scen_detroit$equ("demand")
# ### Close the database connection of the ix modeling platform
# close the connection of the platform instance to the local ixmp database files
mp$close_db()
| tutorial/transport/R_transport_scenario_ret.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 基本程序设计
# - 一切代码输入,请使用英文输入法
print('joker is bad man')
# ## 编写一个简单的程序
# - 圆公式面积: area = radius \* radius \* 3.1415
# ### 在Python里面不需要定义数据的类型
radius = 100 # 定义变量
area = radius * radius * 3.14 # 普通代码,* 代表乘法
print(area) # 最后打印出结果
# ## 控制台的读取与输入
# - input 输入进去的是字符串
# - eval
# - 在jupyter用shift + tab 键可以跳出解释文档
variable = input('请输入一个数字')
print(variable)
# ## 变量命名的规范
# - 由字母、数字、下划线构成
# - 不能以数字开头 \*
# - 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)
# - 可以是任意长度
# - 驼峰式命名
print(12)
import os
def go(num):
os.system('echo hahah')
print = go
print(12)
# ## 变量、赋值语句和赋值表达式
# - 变量: 通俗理解为可以变化的量
# - x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式
# - test = test + 1 \* 变量在赋值之前必须有值
x = 100
x = 2 * x + 1 # 赋值语句,在赋值之前,一定要有值
print(x)
a = eval(input('数字'))
print(type(a))
print(a * 3)
# ## 同时赋值
# var1, var2,var3... = exp1,exp2,exp3...
Joekr, Mistt,hahah,lalal = 'lalal',120,120.33333,True
print(Joekr,Mistt,hahah,lalal)
# ## 定义常量
# - 常量:表示一种定值标识符,适合于多次使用的场景。比如PI
# - 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的
chart = 100.1
chart = 'hahahah'
chart = True
print(chart)
import math
print(math.pi)
# ## 数值数据类型和运算符
# - 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次
# <img src = "../Photo/01.jpg"></img>
# ## 运算符 /、//、**
number1 = 100
number2 = 500
print(number1 + number2)
number3 = 100.0
number4 = 500.0
print(number3 + number4)
number3 = 100.0
number4 = 500.0
print(number3 - number4)
number3 = 100.0
number4 = 500.0
print(number3 * number4)
number3 = 100.0
number4 = 500.0
print(number3 / number4)
number3 = 100.0
number4 = 500.0
print(number3 // number4)
number3 = 100.0
number4 = 2
print(number3 ** number4)
# ## 运算符 %
number3 = 100.0
number4 = 500.0
print(number3 % number4)
# ## EP:
# - 25/4 多少,如果要将其转变为整数该怎么改写
# - 输入一个数字判断是奇数还是偶数
# - 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒
# - 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
res = 25//4
print(res)
input_number = input('input number')
input_number_int = eval(input_number)
if input_number_int == int:
if input_number_int % 2 == 0:
print('偶数')
else:
print('奇数')
else:
if input_number_int % 2.0 == 0.0:
print('偶数')
else:
print('奇数')
time = eval(input('input'))
fen = time // 60
miao = time % 60
print(fen,'分',miao,'秒')
print('%d分%d秒'%(fen,miao))
time1 = 6
time2 = eval(input('输入'))
result = (time1 + time2) % 7
print(result)
# ## 科学计数法
# - 1.234e+2
# - 1.234e-2
num1=1.234e+2
num2 = 1.234e-2
print(num1,num2)
# ## 计算表达式和运算优先级
# <img src = "../Photo/02.png"></img>
# <img src = "../Photo/03.png"></img>
x = eval(input('x'))
y = eval(input('y'))
a = eval(input('a'))
b = eval(input('b'))
c = eval(input('c'))
part_1 = (3 + 4 * x) / 5
part_2 = (10 * (y-5)* (a+b+c))/ x
part_3 = 9*(4/x + (9+x)/y)
print(part_1 - part_2 + part_3)
# ## 增强型赋值运算
# <img src = "../Photo/04.png"></img>
a = 1
a += 100 # a = a + 100
print(a)
# ## 类型转换
# - float -> int
# - 四舍五入 round
int(25 / 4) # 转换成整型
str(25 / 4) # 转换成字符串
float(25//5) # 转换成浮点
round(25/4,1) # 四舍五入
# ## EP:
# - 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)
# - 必须使用科学计数法
water_floawer = 153
bai = 153 //100
shi = 153 //10 % 10
ge = 153 % 10
if water_floawer == bai ** 3 + shi **3 + ge **3:
print('是水仙花')
else:
print('NO')
round(197.55e+2 * 0.06e-2,2)
# # Project
# - 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
# 
贷款数 = eval(input('请输入贷款数'))
月利率 = eval(input('月利率'))
年限 = eval(input('年限'))
月供= ( (贷款数 * 月利率) / (1-(1/(1+月利率)**(年限*12))))
总还款数 = 月供 * 年限 * 12
print(总还款数)
import time
print(time.time())
# # Homework
# - 1
# <img src="../Photo/06.png"></img>
celsius = eval(input("Enter a degree in Celsius:"))
fahrenheit = (9 / 5)* celsius + 32
print(celsius,"Celsius is" ,fahrenheit ,"Fahrenheit")
# - 2
# <img src="../Photo/07.png"></img>
radius,length = eval(input("Enter the radius and length of a cylinder:"))
area = radius * radius * 3.14
volume = area * length
print("The area is",area)
print("The volume is",volume)
# - 3
# <img src="../Photo/08.png"></img>
feet = eval(input("Enter a value for feet: "))
meters=feet*0.305
print(feet , "feet is" , meters ,"meters")
# - 4
# <img src="../Photo/10.png"></img>
M = eval(input("Enter the amount of water in kilograms: "))
initialtemperature = eval(input("Enter the initial temperature: "))
finaltemperature = eval(input("Enter the final temperature: "))
Q = M *(finaltemperature - initialtemperature) * 4184
print("The energy needed is" , Q )
# - 5
# <img src="../Photo/11.png"></img>
chae,nianlilv =eval(input("Enter balance and interest rate(e.g., 3 for 3%): "))
lixi = chae*(nianlilv/1200)
print("The interest is" ,lixi )
# - 6
# <img src="../Photo/12.png"></img>
v0,v1,t =eval(input("Enter v0,v1,and t: "))
a=(v1-v0)/t
print("The average acceleration is" ,a)
# - 7 进阶
# <img src="../Photo/13.png"></img>
cunkuanshu =eval(input("Enter the monthly saving amount: "))
sum1 =cunkuanshu*(1+0.00417)
sum2 =(cunkuanshu+sum1)*(1+0.00417)
sum3 =(cunkuanshu+sum2)*(1+0.00417)
sum4 =(cunkuanshu+sum3)*(1+0.00417)
sum5 =(cunkuanshu+sum4)*(1+0.00417)
sum6 =(cunkuanshu+sum5)*(1+0.00417)
print("After the sixth month, the account value is" ,sum6 )
# - 8 进阶
# <img src="../Photo/14.png"></img>
a =eval(input("Enter a number between 0 and 1000: "))
bai = a //100
shi = a//10 % 10
ge = a % 10
sum=bai+shi+ge
print("The sum of the digits is" ,sum)
| 7.16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import sys
sys.path.append('../')
sys.path.append('../Nets/')
from glob import glob
from os.path import join, isfile, basename
from multiprocessing import Pool
import pickle
from scipy.ndimage.interpolation import rotate
from keras.optimizers import adam
from keras.callbacks import ModelCheckpoint
from tqdm import tqdm
from keras.callbacks import Callback
from functools import partial
import seaborn as sns
import time
from NoduleClfOld import *
# from CancerClf import *
import pandas as pd
import csv
from paths import *
# %pylab inline
# %load_ext autoreload
# %autoreload 1
# -
sns.set_style('white')
data = pd.read_csv(join(PATH['DATA_OUT'], 'DATAFRAMES', 'top_patches'))
data['patientid'] = data.patchid.apply(lambda x: x[:32])
train = data.merge(pd.read_csv(PATH['LABELS']),
left_on='patientid',
right_on='id').drop(['patientid'], axis=1)
rint = random.randint(0, len(data))
patch = load(join(PATH['ENHANCED_CROPPED'], data.patchid.values[rint]))
print(data.patchid[rint])
imshow(patch[32]);
axis('off');
train = train.groupby('id', sort=False)#.drop('id', axis=1)
# train.patchid = train.patchid.apply(lambda x: join(PATH['ENHANCED_CROPPED'], x))
32
CPU = 24
SHIFT = 5
TRAIN_VAL_RAIT = .3
OVERSAMPLING = 30
FULL_CHANNELS_N = 10
UNDERSAMPLING = 3
LOWER = -1000
UPPER = 400
BATCH_SIZE = 32
SHAPE = (18, 48, 48)
def augment_patch(patch, shape, shift):
center = array(patch.shape) // 2
shift = random.randint(-shift, shift, 3)
center += shift
rotate = random.randint(0, 2)
patch = swapaxes(patch, 0, 2)
patch = swapaxes(patch, 1, 0)
# x y z
patch = rot90(patch, k = 2 * rotate)
patch = swapaxes(patch, 0, 2)
patch = swapaxes(patch, 1, 2)
left = array(shape) // 2
right = array(shape) - left
patch = [patch[center[0] - left[(0 + i) % 3]:center[0] + right[(0 + i) % 3],
center[1] - left[(1 + i) % 3]:center[1] + right[(1 + i) % 3],
center[2] - left[(2 + i) % 3]:center[2] + right[(2 + i) % 3]] for i in range(3)]
patch[1] = swapaxes(patch[1], 0, 2)
patch[2] = swapaxes(patch[2], 0, 1)
patch = [expand_dims(array(patch[i]), -1)
for i in range(3)]
patch = clip(patch, LOWER, UPPER)
patch = (patch - LOWER) / float(UPPER - LOWER)
return patch
def batch_generator(data,
shape=(18, 48, 48),
shift=SHIFT,
CPU=24,
oversampling=OVERSAMPLING):
with Pool(CPU) as pool:
patch_list = pool.map(load, data)
augment = partial(augment_patch,
shape=shape,
shift=shift)
oversampled = list()
for i in range(oversampling):
with Pool(CPU) as pool:
augmented = pool.map(augment, patch_list)
oversampled.append([patch for patch in swapaxes(asarray(augmented), 0, 1)])
return oversampled
# msk = random.rand(len(train)) < 0.8
# valid = train[msk]
# train = train[msk]
# train.to_csv(join(PATH['DATA_OUT'], 'DATAFRAMES', 'train_cancer'))
# valid.to_csv(join(PATH['DATA_OUT'], 'DATAFRAMES', 'valid_cancer'))
train = pd.read_csv(join(PATH['DATA_OUT'], 'DATAFRAMES', 'train_cancer'))
valid = pd.read_csv(join(PATH['DATA_OUT'], 'DATAFRAMES', 'valid_cancer'))
# +
clf_model, coders_model, bottle_neck = predictor(dropout_conv=.2,
dropout_dence=.3,
shape=(18, 48, 48, 1),
shared_layers=True
)
clf_model.compile('adam', 'categorical_crossentropy')
clf_model.load_weights(join(PATH['WEIGHTS'], '3DCNN_nodule_clf_shared_tested'))
# -
candidates = dict()
for name, group in train:
candidates[name] = group
path = '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/KAGGLE_CANDIDATES/'
for patient, data in tqdm(candidates.items()):
paths = list()
for candidate in data.patchid:
paths.append(join(PATH['ENHANCED_CROPPED'], candidate))
batches = batch_generator(paths)
predicted = list()
for batch in batches:
predicted.append(bottle_neck.predict(batch))
fold = 'BENING'
if data.cancer.values[0]:
fold = 'CANCER'
break
for i, pred in enumerate(predicted):
save(join(path, fold, patient + '_BN_' + str(i)), pred[0])
save(join(path, fold, patient + '_DENCE_' + str(i)), pred[1])
def apply(layers, tnsr):
for layer in layers:
tnsr = layer(tnsr)
return tnsr
# +
from keras.regularizers import l1, l2
def cancer_clf(input_chanels=10,
shape=(4, 3, 3, 256),
dropout_conv=.2,
dropout_dence=.2):
# Determine proper input shape
if K.image_dim_ordering() != 'tf':
print('Wrong dim ordering: should be TF')
inputs = [Input(shape=shape)
for i in range(input_chanels)]
x = merge(inputs, mode='ave')
x = Convolution3D(1024, 3, 3, 3,
border_mode='same',
W_regularizer=l2(0.01)
# activity_regularizer=l1(0.01)
)(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = AveragePooling3D((2, 2, 2))(x)
if dropout_conv is not None:
x = Dropout(dropout_conv)(x)
x = Flatten()(x)
x = Dense(512, activation='relu',
W_regularizer=l2(0.01))(x)
if dropout_dence is not None:
x = Dropout(dropout_dence)(x)
x = Dense(output_dim=2,
activation='softmax',
name='is_cancer')(x)
return Model(inputs, x)
# +
from keras.regularizers import l1, l2
def cancer_clf(input_chanels=10,
shape=(4, 3, 3, 256),
dropout_conv=.2,
dropout_dence=.2):
# Determine proper input shape
if K.image_dim_ordering() != 'tf':
print('Wrong dim ordering: should be TF')
inputs = [Input(shape=shape)
for i in range(input_chanels)]
interim = [Flatten(),
Dropout(dropout_dence),
Dense(256, activation='relu',
W_regularizer=l2(0.01)),
]
x = [apply(interim, tensor)
for tensor in inputs]
x = merge(x, mode='concat')
x = Dropout(dropout_dence)(x)
x = Dense(256, activation='relu',
W_regularizer=l2(0.01))(x)
x = Dropout(dropout_dence)(x)
x = Dense(output_dim=2,
activation='softmax',
name='is_cancer')(x)
return Model(inputs, x)
# -
def train_test_split(savef=False, prefix='BN'):
cancer_files = glob(join(path, 'CANCER',
'*' + prefix + '*'))
bening_files = glob(join(path, 'BENING',
'*' + prefix + '*'))
patients_cancer = set([basename(patch)[:32] for patch in cancer_files])
patients_bening = set([basename(patch)[:32] for patch in bening_files])
patients_cancer = list(patients_cancer)
random.shuffle(patients_cancer)
patients_bening = list(patients_bening)
random.shuffle(patients_bening)
train = patients_bening[int(len(patients_bening) * TRAIN_VAL_RAIT):]
train += patients_cancer[int(len(patients_cancer) * TRAIN_VAL_RAIT):]
valid = patients_bening[:int(len(patients_bening) * TRAIN_VAL_RAIT)]
valid += patients_cancer[:int(len(patients_cancer) * TRAIN_VAL_RAIT)]
train = [path for path in cancer_files + bening_files
if basename(path)[:32] in train]
valid = [path for path in cancer_files + bening_files
if basename(path)[:32] in valid]
if savef:
pickle.dump(train,
open(join(PATH['WEIGHTS'], 'train_files_cancer_clf'), 'wb'))
pickle.dump(valid,
open(join(PATH['WEIGHTS'], 'valid_files_cancer_clf'), 'wb'))
return train, valid
# +
# train, valid = train_test_split(savef=True)
# -
train = pickle.load(open(join(PATH['WEIGHTS'], 'train_files_cancer_clf'), 'rb'))
valid = pickle.load(open(join(PATH['WEIGHTS'], 'valid_files_cancer_clf'), 'rb'))
def batch_generator(patch_paths,
channels=9,
batch_size=32,
augment=False,
CPU=24):
counter = 0
if augment:
random.shuffle(patch_paths)
number_of_batches = ceil(len(patch_paths)
/ batch_size)
while True:
batch_files = patch_paths[batch_size * counter:
batch_size * (counter + 1)]
with Pool(CPU) as pool:
patch_list = pool.map(load, batch_files)
counter += 1
labels = [[1, 0] if 'CANCER' in patch_path else [0, 1]
for patch_path in batch_files]
mask = asarray([True] * channels + [False] * (FULL_CHANNELS_N - channels))
for i, patient in enumerate(patch_list):
if augment:
random.shuffle(mask)
patch_list[i] = patient[mask]
if augment:
random.shuffle(patch_list[i])
patch_list = swapaxes(asarray(patch_list), 0, 1)
patch_list = [nodule for nodule in patch_list]
yield patch_list, asarray(labels)
if counter == number_of_batches:
if augment:
random.shuffle(patch_paths)
counter = 0
model = cancer_clf(input_chanels=7, dropout_conv=.2, dropout_dence=.2)
model.compile('adam', 'categorical_crossentropy')
# model.load_weights(join(PATH['WEIGHTS'], '3D_cancer_cancer'))
train_generator = batch_generator(train, batch_size=64, channels=7, augment=True)
valid_generator = batch_generator(valid, batch_size=64, channels=7, augment=False)
# +
checkpoint = ModelCheckpoint(filepath=join(PATH['WEIGHTS'], '3D_cancer_cancer_a'),
verbose=1,
save_best_only=True)
history = model.fit_generator(train_generator,
samples_per_epoch=(len(train) // BATCH_SIZE) * BATCH_SIZE ,
nb_epoch=100,
callbacks=[checkpoint],
validation_data=valid_generator,
nb_val_samples=len(valid),
nb_worker=1)
model.save_weights(join(PATH['WEIGHTS'], '3D_cancer_cancer_finala'))
# -
model.save_weights(join(PATH['WEIGHTS'], '3D_cancer_cancer_finala'))
| IPython/3D_Cancer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Sieve
# * To form a sieve, you remove the first element, then strike out multiples of the element, and recursively invoke sieve on the remainder.
# * The first element, the one you earlier removed, is known to be prime. So you add that value to the result of the recursive call on sieve.
# * The expression a[0:1] returns a list containing the first element, as opposed to a[0], which would return the first element itself. Keeping the first element in list form allows the use of the + operator to append the two lists.
def sieve(a):
if a: # that is, if a is non-empty
return a[0:1]+sieve(list(filter(lambda x: x % a[0] != 0, a[1:])))
else:
return [ ]
sieve(list(range(2, 100)))
| handy_syntax/prime_sieve.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1 style="font-size:42px; text-align:center; margin-bottom:30px;"><span style="color:SteelBlue">Module 3:</span> Principal Component Analysis</h1>
# <hr>
#
# Welcome to <span style="color:royalblue">Module 3: Principal Component Analysis</span>!
#
# Principal Component Analysis, or PCA, is a popular dimensionality reduction technique.
#
# PCA seeks to create new features by finding linear combinations of your original ones. These new features, called **principal components**, are meant to maximize the **"explained variance,"** which we'll explain further in the module.
#
# * In this module, we'll prepare individual item features for our clustering algorithms, except this time we'll use PCA instead of thresholding.
# * PCA is especially effective when you have many correlated features.
# * Important: PCA creates new features that replace the original ones.
#
#
# <br><hr id="toc">
#
# ### In this module...
#
# In this module, we'll cover:
# 1. [Toy example: oval blob](#oval-blob)
# 2. [Principal components](#components)
# 3. [Dimensionality reduction](#reduction)
# 4. [Item data PCA](#item-data)
# 5. [Explained variance](#explained-variance)
#
# <br><hr>
# ### First, let's import libraries and load the item data.
#
# First, import libraries that you'll need.
# +
# print_function for compatibility with Python 3
from __future__ import print_function
print('Print function ready!')
# NumPy for numerical computing
import numpy as np
# Pandas for DataFrames
import pandas as pd
pd.set_option('display.max_columns', 100)
# Matplotlib for visualization
from matplotlib import pyplot as plt
# display plots in the notebook
# %matplotlib inline
# Seaborn for easier visualization
import seaborn as sns
# StandardScaler from Scikit-Learn
from sklearn.preprocessing import StandardScaler
# PCA from Scikit-Learn (added later)
from sklearn.decomposition import PCA
# -
# Next, let's import the full item data that we saved in the previous module (before applying thresholds).
# * Remember, we saved it as <code style="color:crimson">'item_data.csv'</code>.
# * This time, we'll also pass in the argument <code style="color:steelblue">index_col=0</code> to tell Pandas to treat the first column (CustomerID) as the index.
# Read item_data.csv
df = pd.read_csv('item_data.csv', index_col=0)
# Just to confirm, this dataset should have 2574 features:
# Display item_data's shape
df.shape
# <span id="oval-blob"></span>
# # 1. Toy example: oval blob
#
# PCA is an elegant technique that's very practical, but it can be a bit hard to try it on our larger dataset right away.
#
# Instead, let's first use one last toy example to break down the intuition behind PCA.
#
# <br>
# **First, we'll create an "oval blob" synthetic dataset.**
# * Instead of both input features and a target variable, we'll only have 2 input features.
# * $x_1$ will be random samples from a normal distribution.
# * $x_2$ will be random samples from a normal distribution **plus $x_1$**.
# +
# Set random seed
np.random.seed(101)
# Create first feature: x1
x1 = np.random.normal(0, 1, 100)
# Create second feature: x2
x2 = x1 + np.random.normal(0, 1, 100)
# -
# **Let's stack them together to form <code style="color:steelblue">X</code>, our input feature matrix.**
# * We'll skip putting them in a Pandas DataFrame.
# * Instead, we'll just stack them together as two separate columns in a matrix.
# +
# Stack together as columns
X = np.stack([x1, x2], axis=1)
# Print shape of X
print( X.shape)
# -
# **Create a scaled version of your feature matrix and name it <code style="color:steelblue">X_scaled</code>.**
# * You'll want to use <code style="color:steelblue">StandardScaler</code>.
# * **Tip:** To fit and apply a transformation in one line of code, you can use the <code style="color:steelblue">.fit_transform()</code> function.
# * Then, display the first 5 rows.
# * You can't use <code style="color:steelblue">.head()</code> because it's not a dataframe.
# * You must **index/slice the array** instead.
# +
# Initialize instance of StandardScaler
scaler = StandardScaler()
# Fit and transform X
X_scaled = scaler.fit_transform(X)
# Display first 5 rows of X_scaled
X_scaled[0:5]
# -
# **Plot a scatterplot of the scaled $x_1$ against the scaled $x_2$.**
# * Label your axes as <code style="color:crimson">x1 (scaled)</code> and <code style="color:crimson">x2 (scaled)</code>.
# * **Tip:** Call <code style="color:steelblue">plt.axis('equal')</code> after your scatterplot to put your plot's axes on the same scale.
# * We have a recommended plot below. See the Answer Key for how to create it.
# +
# Plot scatterplot of scaled x1 against scaled x2
plt.scatter(X_scaled[:,0], X_scaled[:,1] )
# Put plot axes on the same scale
plt.axis('equal')
# Label axes
plt.xlabel('x1 (scaled)')
plt.ylabel('x2 (scaled)')
# Clear text residue
plt.show()
# -
# <p style="text-align:center; margin: 40px 0 40px 0; font-weight:bold">
# <a href="#toc">Back to Contents</a>
# </p>
# <span id="components"></span>
# # 2. Principal components
#
# As it turns out, this type of distribution is very helpful for visualizing the mechanics behind PCA.
#
# <br>
# **First, add the following import line to your library imports above. Then, re-run that code cell.**
#
# <pre style="color:steelblue">
# from sklearn.decomposition import PCA
# </pre>
#
# <br>
# **Next, we can initialize an instance of the PCA transformation.**
# * This is just as we did for the StandardScaler transformation.
# * Then, we can fit the instance with just one line of code.
# +
# Initialize instance of PCA transformation
pca = PCA()
# Fit the instance
pca.fit(X_scaled)
# -
# **Let's display the principal components.**
# Display principal components
pca.components_
# **Next, let's overlay the principal components on our oval blob.**
# * We'll make the oval blob semi-transparent so we can see the principal components more easily.
# * For clarity, we'll plot the first principal component in **black**, and we'll make it twice as long.
# * We'll plot the first principal component in <strong style="color:red">red</strong>.
# +
# Plot scaled dataset and make it partially transparent
plt.scatter(X_scaled[:,0], X_scaled[:,1], alpha=0.3)
# Plot first principal component in black
plt.plot([0, 2*pca.components_[0,0]], [0, 2*pca.components_[0,1]], 'k')
# Plot second principal component in red
plt.plot([0, pca.components_[1,0]], [0, pca.components_[1,1]], 'r')
# Set axes
plt.axis('equal')
plt.xlabel('x1 (scaled)')
plt.ylabel('x2 (scaled)')
# Clear text residue
plt.show()
# -
# **Finally, we can transform our original features using the fitted PCA instance.**
# * Remember, PCA generates new features that **replace** the original ones.
# * We'll name these new features <code style="color:crimson">PC</code> (short for "principal component") to indicate that they are generated from PCA.
# Generate new features
PC = pca.transform(X_scaled)
# +
# Plot transformed dataset
plt.scatter(PC[:,0], PC[:,1], alpha=0.3, color='g')
# Plot first principal component in black
plt.plot([0,2], [0,0], 'k')
# Plot second principal component in red
plt.plot([0,0], [0,1], 'r')
# Set axes
plt.axis('equal')
plt.xlabel('PC1')
plt.ylabel('PC2')
# Clear text residue
plt.show()
# -
# <p style="text-align:center; margin: 40px 0 40px 0; font-weight:bold">
# <a href="#toc">Back to Contents</a>
# </p>
# <span id="reduction"></span>
# # 3. Dimensionality reduction
#
# Here's where dimensionality reduction comes into play. You can just keep some number of the "best" components, a.k.a. the ones that explain the most variance.
#
# <br>
# **First, display the "explained variance" ratio of each component.**
# Display explained variance ratio
pca.explained_variance_ratio_
# **To keep only 1 component, we can simply initialize and fit another PCA transformation.**
# * This time, we'll simply pass the argument <code style="color:steelblue">n_components=1</code>.
# Initialize and fit a PCA transformation, only keeping 1 component
pca = PCA(n_components=1)
pca.fit(X_scaled)
# Display principal components
pca.components_
# Generate new features
PC = pca.transform(X_scaled)
print(PC.shape)
# +
# Plot transformed dataset
plt.scatter(PC[:,0], len(PC)*[0], alpha=0.3, color='g')
# Plot first principal component in black
plt.plot([0, 2], [0,0], 'k')
# Set axes
plt.axis('equal')
plt.xlabel('PC1')
# Clear text residue
plt.show()
# -
# <p style="text-align:center; margin: 40px 0 40px 0; font-weight:bold">
# <a href="#toc">Back to Contents</a>
# </p>
# <span id="item-data"></span>
# # 4. Item data PCA
#
# Now let's get back to our project.
#
# <br>
# **First, scale <code style="color:crimson">item_data</code>, which you imported at the beginning of this module.**
# * Name the scaled dataset <code style="color:crimson">item_data_scaled</code>.
# * Then, display the first 5 rows.
# * You can't use <code style="color:steelblue">.head()</code> because it's not a dataframe.
# * You must **index/slice the array** instead.
# +
# Initialize instance of StandardScaler
scaler = StandardScaler()
# Fit and transform item_data
item_data_scaled = scaler.fit_transform(df)
# Display first 5 rows of item_data_scaled
item_data_scaled[0:5]
# -
# **Next, initialize and fit an instance of the PCA transformation.**
# * Keep all of the components for now (just don't pass in any argument).
# Initialize and fit a PCA transformation
pca = PCA()
pca.fit(item_data_scaled)
# **Finally, generate new "principal component features" from <code style="color:crimson">item_data_scaled</code>.**
# * Name the new array <code style="color:crimson">PC_items</code>.
# * Then, display the first 5 rows.
# +
# Generate new features
PC_items = pca.transform(item_data_scaled)
# Display first 5 rows
PC_items[0:5]
# -
# <p style="text-align:center; margin: 40px 0 40px 0; font-weight:bold">
# <a href="#toc">Back to Contents</a>
# </p>
# <span id="explained-variance"></span>
# # 5. Explained variance
#
# Earlier, we already saw how to display the **explained variance ratio** for each of the principal components.
#
# <br>
# **It's very helpful to calculate and plot the <u>cumulative</u> explained variance.**
# * This will tell us the total amount of variance we'd capture if we kept up to the n-th component.
# * First, we'll use <code style="color:steelblue">np.cumsum()</code> to calculate the cumulative explained variance.
# * Then, we'll plot it so we can see how many $PC$ features we'd need to keep in order to capture most of the original variance.
# +
# Cumulative explained variance
cumulative_explained_variance = np.cumsum(pca.explained_variance_ratio_)
# Plot cumulative explained variance
plt.plot(range(len(cumulative_explained_variance)), cumulative_explained_variance)
plt.show()
# -
# How much variance we'd capture with the first 125 components
cumulative_explained_variance[125]
# **Initialize and fit another PCA transformation.**
# * This time, only keep 125 components.
# * Generate the principal component features from the fitted instance and name the new matrix <code style="color:crimson">PC_items</code>.
# * Then, display the shape of <code style="color:crimson">PC_items</code> to confirm it only has 125 features.
# +
# Initialize PCA transformation, only keeping 125 components
pca = PCA(n_components=125)
# Fit and transform item_data_scaled
PC_items = pca.fit_transform(item_data_scaled)
# Display shape of PC_items
PC_items.shape
# -
# **Next, for convenience, let's put <code style="color:crimson">PC_items</code> into a new dataframe.**
# * We'll also name the columns and update its index to be the same as the orginal <code style="color:crimson">item_data</code>'s index.
# +
# Put PC_items into a dataframe
items_pca = pd.DataFrame(PC_items)
# Name the columns
items_pca.columns = ['PC{}'.format(i + 1) for i in range(PC_items.shape[1])]
# Update its index
items_pca.index = df.index
# Display first 5 rows
items_pca.head()
# -
# **Finally, save this item dataframe with PCA features as <code style="color:crimson">'pca_item_data.csv'</code>.**
# * In <span style="color:royalblue">Module 4</span>, we'll compare the clusters made from using these features against those in <code style="color:crimson">'threshold_item_data.csv'</code>.
# * Do **not** set <code style="color:steelblue">index=None</code> because we want to keep the CustomerID's as the index.
# Save pca_item_data.csv
items_pca.to_csv('pca_item_data.csv')
# <p style="text-align:center; margin: 40px 0 40px 0; font-weight:bold">
# <a href="#toc">Back to Contents</a>
# </p>
# <br>
# ## Next Steps
#
# Congratulations for making it through Project 4's Principal Component Analysis!
#
# As a reminder, here are a few things you did in this module:
# * You saw how PCA works on an oval blob.
# * You contrasted plots of the original features against plots of the principal component features.
# * You learned how to intepret cumulative explained variance.
# * And you reduced the dimensionality of the item dataset using PCA.
#
# In the next module, <span style="color:royalblue">Module 4: Cluster Analysis</span>, we'll see how different feature sets lead to different clusters. We'll apply K-Means clustering to the analytical base table, the threshold item features, and the PCA item features.
#
# <p style="text-align:center; margin: 40px 0 40px 0; font-weight:bold">
# <a href="#toc">Back to Contents</a>
# </p>
| Module 3 - Principal Component Analysis.ipynb |