code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# nbi:hide_in
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.collections import LineCollection
import viewscad
from solid import *
# Jupyter Specifics
import matplotlib as mpl
from IPython.display import HTML
from ipywidgets.widgets import interact, Output, FloatSlider, FloatText, Button, VBox
radO = 0.0
radI = 0.0
lenN = 0.0
lenS = 0.0
# -
# # Elliptical
#
# This shape is popular in subsonic flight (such as model rocketry) due to the blunt nose and tangent base and are generally considered superior for model rocketry altitude optimisation use. This is not a shape normally found in professional rocketry, which almost always flies at much higher velocities where other designs are more suitable.
#
# The profile is defined as $y=R{\sqrt {1-{x^{2} \over L^{2}}}}$
# If $R = L$, this is a hemisphere.
#
# | Description | Variable | Formula |
# | -- | -- | -- |
# | Outside radius | oRadius | $R$ |
# | Inside radius | iRadius | n/a |
# | Nosecone length | nLength | $L$ |
# | Shoulder Length | sLength | n/a |
# +
# nbi:hide_in
r = viewscad.Renderer()
p = cylinder(r=5, h=2)
def plot_elliptical(oRadius, iRadius, nLength, sLength, faces):
fig, axe = plt.subplots(figsize=(6, 2))
axe.set_aspect("equal")
x = np.linspace(0, int(nLength), int(nLength*5))
[line.remove() for line in axe.lines]
axe.plot(x, oRadius * np.sqrt(1-(x**2/nLength**2)), color='C0')
axe.plot([nLength, -sLength, -sLength, 0, 0], [0, 0, iRadius, iRadius, oRadius], color='C0')
zero = np.array([0])
x = np.linspace(0, int(nLength), int(nLength*5))
f = lambda x: oRadius * np.sqrt(1-(x**2/nLength**2))
y = f(x)
xplt = np.concatenate((zero, x, zero))
yplt = np.concatenate((zero, y, zero))
global p
p = rotate_extrude(360, segments=faces)(polygon(np.vstack((yplt, xplt)).T))
if sLength != 0:
p += translate([0, 0, -sLength])(cylinder(r=iRadius, h=sLength, segments=faces))
interact(plot_elliptical,
oRadius=FloatText(value=24.79),
iRadius=FloatText(value=24.13),
nLength=FloatText(value=90),
sLength=FloatText(value=10),
faces=FloatSlider(min=5, max=100, step=1, value=10)
);
def render_stl(p):
r.render(p, outfile='elliptical.stl')
button = Button(description='Render STL')
out = Output()
def on_button_clicked(_):
# what happens when we press the button
with out:
out.clear_output()
button.description = "Rendering ..."
render_stl(p)
button.description = "Render STL"
# linking button and function together using a button's method
button.on_click(on_button_clicked)
VBox([button,out])
# -
# # Parabolic
#
# This nose shape is not the blunt shape that is envisioned when people commonly refer to a "parabolic" nose cone. The parabolic series nose shape is generated by rotating a segment of a parabola around a line parallel to its latus rectum. This construction is similar to that of the tangent ogive, except that a parabola is the defining shape rather than a circle. Just as it does on an ogive, this construction produces a nose shape with a sharp tip. For the blunt shape typically associated with a parabolic nose, see power series below. (The parabolic shape is also often confused with the elliptical shape.)
#
# For $0 \leq K^\prime \leq 1 : y=R \Biggl({2({x \over L})-K^\prime({x \over L})^{2} \over 2-K^\prime}\Biggr)$
#
# $K^\prime$ can vary anywhere between $0$ and $1$, but the most common values used for nose cone shapes are:
#
# | Parabola Type | $K^\prime$ Value |
# | --- | --- |
# | Cone | $0$ |
# | Half | $\frac {1}{2}$ |
# | Three Quarter| $3 \over 4$ |
# | Full | $1$ |
#
# For the case of the full parabola $(K^\prime = 1)$ the shape is tangent to the body at its base, and the base is on the axis of the parabola. Values of $K^\prime \lt 1$ result in a slimmer shape, whose appearance is similar to that of the secant ogive. The shape is no longer tangent at the base, and the base is parallel to, but offset from, the axis of the parabola.
# +
# nbi:hide_in
def plot_parabolic(oRadius, iRadius, nLength, sLength, K):
fig, axp = plt.subplots(figsize=(6, 2))
axp.set_aspect("equal")
x = np.linspace(0, int(nLength))
[line.remove() for line in axp.lines]
axp.plot(x, oRadius*(((2*(x/nLength))-(K*(x/nLength)**2))/(2-K)), color='C0')
axp.plot([0, sLength + nLength, sLength + nLength, nLength, nLength], [0, 0, iRadius, iRadius, oRadius], color='C0')
interact(plot_parabolic,
oRadius=FloatText(value=24.79),
iRadius=FloatText(value=24.13),
nLength=FloatText(value=90),
sLength=FloatText(value=10),
K=FloatSlider(min=0, max=1, step=0.05, value=.75)
);
# -
# # Haack series
#
# Unlike all of the nose cone shapes above, the Haack Series shapes are not constructed from geometric figures. The shapes are instead mathematically derived for the purpose of minimizing drag. While the series is a continuous set of shapes determined by the value of $C$ in the equations below, two values of $C$ have particular significance: when $C = 0$, the notation $LD$ signifies minimum drag for the given length and diameter, and when $C = {1 \over 3}$, $LV$ indicates minimum drag for a given length and volume. The Haack series nose cones are not perfectly tangent to the body at their base except for the case where $C = {2 \over 3}$. However, the discontinuity is usually so slight as to be imperceptible. For $C > {2 \over 3}$, Haack nose cones bulge to a maximum diameter greater than the base diameter. Haack nose tips do not come to a sharp point, but are slightly rounded.
#
# $\theta = \arccos \Bigl(1 - {2X \over L}\Bigr)$
#
# $y = {R \over \sqrt{\pi}} \sqrt{\theta-{\sin({2\theta})\over2}+C \sin^3({\theta})}$
#
# Where:
#
# $C = {1 \over 3}$ for LV-Haack
#
# $C = 0$ for LD-Haack
# +
# nbi:hide_in
def plot_haack(oRadius, iRadius, nLength, sLength, C):
fig, axh = plt.subplots(figsize=(6, 2))
axh.set_aspect("equal")
x = np.linspace(0, int(nLength))
[line.remove() for line in axh.lines]
axh.plot(x, (oRadius/np.sqrt(np.pi))*np.sqrt((np.arccos(1 - (2*x)/nLength)) - (np.sin(2 * (np.arccos(1 - (2*x)/nLength))))/2 + C * np.sin((np.arccos(1 - (2*x)/nLength)))**3), color='C0')
axh.plot([0, sLength + nLength, sLength + nLength, nLength, nLength], [0, 0, iRadius, iRadius, oRadius], color='C0')
interact(plot_haack,
oRadius=FloatText(value=24.79),
iRadius=FloatText(value=24.13),
nLength=FloatText(value=90),
sLength=FloatText(value=10),
C=FloatSlider(min=0, max=1, step=0.01, value=.33)
);
# -
| elliptical.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
from PIL import Image
path = "../BelgiumTS/Training/00056/00303_00000.ppm"
path = "../BelgiumTS/Training/00056/00297_00002.ppm"
path = "../BelgiumTS/Training/00056/01200_00000.ppm"
display(Image.open(path))
data_dir = '/home/jzornig/devel/BelgiumTS/Training/'
def load_split_train_test(datadir, valid_size = .2):
train_transforms = transforms.Compose([transforms.Resize((57,57)),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([transforms.Resize((57,57)),
transforms.ToTensor(),
])
train_data = datasets.ImageFolder(datadir,
transform=train_transforms)
test_data = datasets.ImageFolder(datadir,
transform=test_transforms)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.shuffle(indices)
from torch.utils.data.sampler import SubsetRandomSampler
train_idx, test_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
test_sampler = SubsetRandomSampler(test_idx)
trainloader = torch.utils.data.DataLoader(train_data,
sampler=train_sampler, batch_size=64)
testloader = torch.utils.data.DataLoader(test_data,
sampler=test_sampler, batch_size=64)
return trainloader, testloader
trainloader, testloader = load_split_train_test(data_dir, .2)
print(trainloader.dataset.classes)
print(len(trainloader.dataset.classes))
# +
import os
rootdir = '/home/jzornig/devel/BelgiumTS/Training/'
for subdir, dirs, files in os.walk(rootdir):
for file in files:
print(os.path.join(subdir, file))
# -
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
#model = models.vgg19(pretrained=True)
model = models.resnet50(pretrained=True)
print(model)
# +
for param in model.parameters():
#param.requires_grad = True # VGG19
param.requires_grad = False # ResNet50
model.fc = nn.Sequential(nn.Linear(2048, 512),
nn.ReLU(),
nn.Dropout(0.2),
#nn.Linear(512, 62),
nn.Linear(512, 80),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.fc.parameters(), lr=0.003)
model.to(device)
# -
epochs = 1
steps = 0
running_loss = 0
print_every = 10
train_losses, test_losses = [], []
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(testloader):.3f}.. "
f"Test accuracy: {accuracy/len(testloader):.3f}")
running_loss = 0
model.train()
torch.save(model, 'HelloPyTorch.pth')
plt.plot(train_losses, label='Training loss')
plt.plot(test_losses, label='Validation loss')
plt.legend(frameon=False)
plt.show()
## save
torch.save(model.state_dict(), "./HelloPyTorch_resnet50_weights.pt")
| 01_HelloPyTorch-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('contagionMap')
# language: python
# name: python3
# ---
# ## Tutorial for the simplest use case of the truncated contagion map
# +
# %load_ext autoreload
# %autoreload 2
import cmap as conmap
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns;
sns.set_theme()
import pandas as pd
# For pretty colourmaps
import palettable
from matplotlib.colors import ListedColormap
sns.set_style("white")
from sklearn.decomposition import PCA
# -
# ## Constructing a noisy ring lattice
noisyRL = conmap.constructNoisyRingLattice(numberNodes=400,geometricDegree=6,nongeometricDegree=2)
# ## Computing a truncated contagion map
# choose parameters
t=0.3 # threshold for Watts' model
nStep=30 # number of steps to include
truncatedContagionMap = conmap.runTruncatedContagionMap(noisyRL,threshold=t,numberSteps=nStep,symmetric=True)
# compute Ring stability of the contagion map with ripser
ringStabilityTruncated = conmap.callRipser(truncatedContagionMap)
ringStabilityTruncated
# ## Compare this with a full contagion map
fullContagionMap = conmap.runTruncatedContagionMap(noisyRL,threshold=t,numberSteps=np.Inf,symmetric=True)
# compute Ring stability of the contagion map with ripser
ringStabilityFull = conmap.callRipser(fullContagionMap)
ringStabilityFull
# We observe that the truncated contagion map recovers the ring manifold better than the full contagion map because the ring stability is larger for the truncated contagion map than for the full contagion map.
| python/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Initialization
# + pycharm={"name": "#%%\n"}
import st2w_lib
stw2 = st2w_lib.st2w(dataroot='../data/')
# -
# Print the labels from the three vehicles
# + pycharm={"name": "#%%\n", "is_executing": true}
print(stw2.getLabel('bike'))
print(stw2.getLabel('motorbike'))
print(stw2.getLabel('scooter'))
# + [markdown] pycharm={"name": "#%% md\n"}
# Print only the labels from the three vehicles for an explicit step
# + pycharm={"name": "#%%\n"}
step = 10 # with 2 Hz step = 10 is equivalent to 5 seconds
print(stw2.getLabel('bike', step))
print(stw2.getLabel('motorbike', step))
print(stw2.getLabel('scooter', step))
# -
# ## Visualize the dataset
# plot the trajecotories with the velocity colored
# + pycharm={"name": "#%%\n"}
stw2.Visualizer.plot_trajectory_velocity(stw2.getLabel('bike'), 'Cyclist', 'bike_velocity')
# -
# plot the trajecotories with the roll angle colored
# + pycharm={"name": "#%%\n"}
stw2.Visualizer.plot_trajectory_rollangle(stw2.getLabel('bike'), 'Cyclist', 'bike_rollangle')
# + [markdown] pycharm={"name": "#%% md\n"}
# Visualize the lidar-data with the labeled bounding-box
# + pycharm={"name": "#%%\n", "is_executing": true}
stw2.Visualizer.view_pointcloud(stw2.pcds['bike'][step], stw2.bboxes['bike'][step])
| python-sdk/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Retinal Warping
# +
import matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy import optimize
npa = np.array
import sys
sys.path.insert(0, '..')
# -
import os
print(os.getcwd())
os.chdir()
# The delta_lookup function simulates cone density as a function of eccentricity.
#
# > delta_lookup: $r \rightarrow r'$
#
# > $d[i] = 1 / \sqrt{\pi \cdot \rho_{fovea}} \cdot exp(a/2 * r[i]) $
#
# > $r'[i] = r[i-1] + d[i-1]$
#
# where
#
# $r$: cone position along radial axis in input space (same as eccentricity)
#
# $r^\prime$: cone position along radial axis in output space (can be considered as the index of cones)
#
# $d$: distance between cones
#
# $\rho_{fovea}$: cones density at fovea
#
# $a: \frac{log(\text{max ratio})}{r_{max}}$
#
# max_ratio: (cone density @ fovea) / (cone density @ periphery). This ratio equal 10 for 4 degrees of visual field in primates.
#
# c.f. Watson, <NAME>. (2016). A formula for human retinal ganglion cell receptive field density as a function of visual field location. Journal of Vision, 14(2014), 1–17. https://doi.org/10.1167/14.7.15.doi
#
# ## Determine the cone density at fovea
# The value of rho_fovea needs to be tuned such that the computed r covers the entire input space.
# This number depends on both input and output sizes. The retinawarp function warps the image data within the image of size "input_size" into an image of size "output_size".
#
# `get_rf_value` function could be called to find the optimal $\rho_{fovea}$ value.
# +
from retina.common import get_rf_value
# Use newton's law to find the solution instead of manual approach above.
# Last r should be equal half the in_size
in_size=440
out_size=220
optimal_rf = get_rf_value(in_size, out_size)
print('Optimal RF for input size [{0}x{0}] and output size [{1}x{1}]: {2:.2f}'.format(in_size, out_size, optimal_rf))
# -
# +
from retina.common import delta_lookup, fit_func
rprime, r= delta_lookup(in_size, out_size, max_ratio=10.)
# find_retina_mapping(fit_mode='quad')
func = lambda x, a, b: a * x ** 2 + b * x
retina_func = func
popt, pcov = fit_func(func, rprime, r)
retina_pars = popt
# simulated version
r_s = np.arange(out_size/2+1, step=16)
r_simulated = np.tile(r_s, (20,1)).T.flatten()
theta = np.tile(np.linspace(-np.pi, np.pi, 20), (1, len(r_s)))
r_simulated = retina_func(r_simulated, *retina_pars)
x_simulated = in_size/2. + r_simulated*np.cos(theta)
y_simulated = in_size/2. + r_simulated*np.sin(theta)
# real sampling version
#xy = warp_func(xy_out, in_size, retina_func, retina_pars, shift=None)
xy_out = np.indices((out_size, out_size))[:,::16,::16][:,1:,1:].reshape(2,-1)
xy_out = xy_out - out_size/2.
r_out = np.linalg.norm(xy_out, axis=0)
theta = np.arctan2(xy_out[1], xy_out[0])
r = retina_func(r_out, *retina_pars)
x = in_size/2. + r*np.cos(theta)
y = in_size/2. + r*np.sin(theta)
# Plot
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].add_patch(matplotlib.patches.Rectangle((0,0), in_size, in_size, fill=False))
ax[0].scatter(x_simulated, y_simulated, color='r')
ax[0].axvline(in_size/2., ls='--', c='b')
ax[0].axhline(in_size/2., ls='--', c='b')
ax[0].axis('equal')
ax[0].set_title('simulated cones \n(for visualization)')
ax[1].add_patch(matplotlib.patches.Rectangle((0,0), in_size, in_size, fill=False))
ax[1].scatter(x, y, color='r')
ax[1].axvline(in_size/2., ls='--', c='b')
ax[1].axhline(in_size/2., ls='--', c='b')
ax[1].axis('equal')
ax[1].set_title('simulated sampling')
plt.show()
# -
# # Retina Transform With Numpy
# +
from retina.retina import warp_image
import imageio
from skimage.transform import resize
import matplotlib.pyplot as plt
# %matplotlib inline
RESIZE_SCALE = 0.75
img_orig = imageio.imread('../data/homer.jpg')
img = resize(img_orig, np.array(RESIZE_SCALE*npa(img_orig.shape[:2]), dtype=int))
ret_img = warp_image(img, output_size=700, input_size=350)
fig, axes = plt.subplots(ncols=2, figsize=(10,10))
axes[0].imshow(img_orig)
axes[1].imshow(ret_img)
plt.show()
# -
# # Retina Transform with Tensorflow
# +
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
from retina.retina_tf import warp_image
import imageio
from skimage.transform import resize
import matplotlib.pyplot as plt
# %matplotlib inline
RESIZE_SCALE = .75
import tensorflow as tf
with tf.Session() as sess:
img_orig = imageio.imread('../data/homer.jpg')
img = resize(img_orig, RESIZE_SCALE*npa(img_orig.shape[:2]))
retina_img = warp_image(img, output_size=700, input_size=350)
retina_img = retina_img.eval()
fig, axes = plt.subplots(ncols=2, figsize=(10,10))
axes[0].imshow(img_orig)
axes[1].imshow(retina_img)
plt.show()
| dev/2019-12-11_RetinaWarp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('/home/jovyan/rocketry/open_rocketry/library')
from nosecone.shear_nosecone import *
from nosecone.nosecone_threaded_bases import *
from nosecone.nosecone_bases import *
from nosecone.standard_nosecones import EllipticalNoseCone
from bodytubes.semroc import bt5
from nosecone_library.specific_noses import BNC5V
from bodytubes.modelrockets_us import _3_00
from misc.utils import render as rd
import viewscad
vr = viewscad.Renderer()
enc = EllipticalNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3)
nc = ThreadedBaseOutset(enc, shoulder=0.5)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
vr.render(nc.mate, dollar_sign_vars=dict(fn=200))
nc=ThreadedBaseFlat(enc, shoulder=0.5)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
vr.render(nc.mate, dollar_sign_vars=dict(fn=200))
nc = ThreadedBaseInset(enc, shoulder=0.5)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
vr.render(nc.mate, dollar_sign_vars=dict(fn=200))
nc = OpenBase(enc, shoulder=0.5)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
nc = BaseWithRing(HollowBaseCutout(enc, shoulder=0.5))
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
nc = BaseWithElbow(HollowBaseCutout(enc, shoulder=0.5))
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
nc = SolidBaseCutout(enc, shoulder=0.5)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
nc = HollowBaseCutout(enc, shoulder=0.5)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
nc = SolidBaseWithScrewHole(enc,
shoulder=0.5,
screw_diameter=1 / 16.,
screw_length=0.25)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
nc = HollowBaseWithScrewHole(enc,
shoulder=0.5,
screw_diameter=1 / 16.,
screw_length=0.25)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
# +
nc = HollowBase(enc, shoulder=0.5)
vr.render(nc.cone, dollar_sign_vars=dict(fn=200))
# -
#enc = EllipticalNoseCone(2.75, bodytube=bt55, thickness=1 / 16.0)
enc = BNC5V(scale_bodytube=_3_00, thickness = 3/32.)
sib = ScrewInBase(nc, shoulder=0.5, thread_height=0.75, thread_diameter=1.2)
vr.render(sib.cone, dollar_sign_vars=dict(fn=200))
# +
""" Generate Examples"""
from standard_nosecones import EllipticalNoseCone
from nosecone_threaded_bases import *
from bodytubes.semroc import bt55
from bodytubes.modelrockets_us import _3_00
from nosecone_library.specific_noses import BNC5V
from misc import utils
nc = EllipticalNoseCone(2.75, bodytube=bt55, thickness=1 / 16.0)
nc = BNC5V(scale_bodytube=_3_00, thickness = 3/32.)
sib = ScrewInBase(nc, shoulder=0.5, thread_height=0.75, thread_diameter=1.2)
sib = ScrewInBaseWithScrewHole(nc, shoulder=0.5, thread_height=0.75, thread_diameter=1.2, screw_diameter=1 / 16., screw_length=0.25)
sib = ThreadedBaseOutsetScrewInBase(nc, shoulder=1.5, thread_height=1.0, thread_diameter=2.8)
array = utils.array(4, to_mm(5), [
sib.crosssection(),
nc.crosssection(sib.center_mate),
nc.crosssection(sib.mate)])
#ThreadedBaseFlat(nc, shoulder=0.5).cone_section1,
# ThreadedBaseFlat(nc, shoulder=0.5).cone_section2])
utils.render_to_file(array, "test.scad")
| notebooks/nose_bases.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Входные данные**: два массива (списка) x, u задающие кривую.
#
# **Ожидаемый результат**: значение x0, в котором необходимо нарушить непрерывность кривой, чтобы сохранить однозначность u = u(x).
#
# **Метод решения**: найти значение x0 исходя из предположения о равенстве отсекаемых площадей
x = [1.49066127e-06, 1.00024454e-02, 2.00039718e-02, 3.00063867e-02, 4.00101677e-02, 5.00160261e-02, 6.00250086e-02, 7.00386374e-02, 8.00590993e-02, 9.00894983e-02, 1.00134185e-01, 1.10199182e-01, 1.20292721e-01, 1.30425906e-01, 1.40613524e-01, 1.50874996e-01, 1.61235486e-01, 1.71727136e-01, 1.82390409e-01, 1.93275480e-01, 2.04443599e-01, 2.15968314e-01, 2.27936438e-01, 2.40448564e-01, 2.53618982e-01, 2.67574773e-01, 2.82453905e-01, 2.98402141e-01, 3.15568647e-01, 3.34100210e-01, 3.54134113e-01, 3.75789783e-01, 3.99159480e-01, 4.24298431e-01, 4.51214920e-01, 4.79860987e-01, 5.10124440e-01, 5.41822943e-01, 5.74700902e-01, 6.08429771e-01, 6.42612264e-01, 6.76790724e-01, 7.10459615e-01, 7.43081815e-01, 7.74108085e-01, 8.02998761e-01, 8.29246539e-01, 8.52398993e-01, 8.72079469e-01, 8.88004992e-01, 9.00000000e-01, 9.08004992e-01, 9.12079469e-01, 9.12398993e-01, 9.09246539e-01, 9.02998761e-01, 8.94108085e-01, 8.83081815e-01, 8.70459615e-01, 8.56790724e-01, 8.42612264e-01, 8.28429771e-01, 8.14700902e-01, 8.01822943e-01, 7.90124440e-01, 7.79860987e-01, 7.71214920e-01, 7.64298431e-01, 7.59159480e-01, 7.55789783e-01, 7.54134113e-01, 7.54100210e-01, 7.55568647e-01, 7.58402141e-01, 7.62453905e-01, 7.67574773e-01, 7.73618982e-01, 7.80448564e-01, 7.87936438e-01, 7.95968314e-01, 8.04443599e-01, 8.13275480e-01, 8.22390409e-01, 8.31727136e-01, 8.41235486e-01, 8.50874996e-01, 8.60613524e-01, 8.70425906e-01, 8.80292721e-01, 8.90199182e-01, 9.00134185e-01, 9.10089498e-01, 9.20059099e-01, 9.30038637e-01, 9.40025009e-01, 9.50016026e-01, 9.60010168e-01, 9.70006387e-01, 9.80003972e-01, 9.90002445e-01]
u = [3.72665317e-06, 6.11356797e-06, 9.92950431e-06, 1.59667839e-05, 2.54193465e-05, 4.00652974e-05, 6.25215038e-05, 9.65934137e-05, 1.47748360e-04, 2.23745794e-04, 3.35462628e-04, 4.97955422e-04, 7.31802419e-04, 1.06476624e-03, 1.53381068e-03, 2.18749112e-03, 3.08871541e-03, 4.31784001e-03, 5.97602290e-03, 8.18870101e-03, 1.11089965e-02, 1.49207861e-02, 1.98410947e-02, 2.61214099e-02, 3.40474547e-02, 4.39369336e-02, 5.61347628e-02, 7.10053537e-02, 8.89216175e-02, 1.10250525e-01, 1.35335283e-01, 1.64474457e-01, 1.97898699e-01, 2.35746077e-01, 2.78037300e-01, 3.24652467e-01, 3.75311099e-01, 4.29557358e-01, 4.86752256e-01, 5.46074427e-01, 6.06530660e-01, 6.66976811e-01, 7.26149037e-01, 7.82704538e-01, 8.35270211e-01, 8.82496903e-01, 9.23116346e-01, 9.55997482e-01, 9.80198673e-01, 9.95012479e-01, 1.00000000e+00, 9.95012479e-01, 9.80198673e-01, 9.55997482e-01, 9.23116346e-01, 8.82496903e-01, 8.35270211e-01, 7.82704538e-01, 7.26149037e-01, 6.66976811e-01, 6.06530660e-01, 5.46074427e-01, 4.86752256e-01, 4.29557358e-01, 3.75311099e-01, 3.24652467e-01, 2.78037300e-01, 2.35746077e-01, 1.97898699e-01, 1.64474457e-01, 1.35335283e-01, 1.10250525e-01, 8.89216175e-02, 7.10053537e-02, 5.61347628e-02, 4.39369336e-02, 3.40474547e-02, 2.61214099e-02, 1.98410947e-02, 1.49207861e-02, 1.11089965e-02, 8.18870101e-03, 5.97602290e-03, 4.31784001e-03, 3.08871541e-03, 2.18749112e-03, 1.53381068e-03, 1.06476624e-03, 7.31802419e-04, 4.97955422e-04, 3.35462628e-04, 2.23745794e-04, 1.47748360e-04, 9.65934137e-05, 6.25215038e-05, 4.00652974e-05, 2.54193465e-05, 1.59667839e-05, 9.92950431e-06, 6.11356797e-06]
# +
import matplotlib.pyplot as plt
plt.plot(x, u) # plot u=u(x)
x_start = 7.54100210e-01 # x[71]
x_end = 9.12398993e-01 # x[53]
plt.axvspan(x_start, x_end, facecolor='r', alpha=0.3) # ambiguous region
plt.show()
# -
# Область неоднозначности ограничена справа величиной x_end, в которой значения x начинаеют убывать, а слева величиной x_start, в которой значения x начинают возрастать. Внутри этой области существует точка x0, слева и справа от которой отсекаемые площади равны. Для примера рассмотрим 3 произвольные точки отсечения.
# +
_, axs = plt.subplots(1, 3, figsize=(15,5))
for ax in axs:
ax.plot(x, u)
# if one set x0 = 0.785
axs[0].axvline(x=0.785, color='r', linestyle='dashed')
axs[0].fill(x[45:78], u[45:78], color='r', alpha = 0.5)
# if one set x0 = 0.82
axs[1].axvline(x=0.82, color='r', linestyle='dashed')
axs[1].fill(x[46:82], u[46:82], color='r', alpha = 0.5)
# if one set x0 = 0.870
axs[2].axvline(x=0.87, color='r', linestyle='dashed')
axs[2].fill(x[48:87], u[48:87], color='r', alpha = 0.5)
plt.show()
# -
# **Пояснение, где возникает подобная задача**
#
# При решении уравнения в частных производных u<sub>t</sub> + u u<sub>x</sub> = 0 возникает необходимость избавления неоднозначности в решении
# +
import numpy as np
import matplotlib.pyplot as plt
x0 = np.arange(0, 1, 0.01) # uniform split on x axis
u0 = np.exp(-(x0-0.5)**2/(2*0.1**2)) # solution for t=0
x1 = x0 + u0*0.4 # solution for t=0.5
u1 = u0
plt.plot(x0, u0, label='u(t=0, x)')
plt.plot(x1, u1, label='u(t=1, x)')
plt.legend()
plt.show()
# -
u1
| task1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/blauveltmr/Data-Analysis/blob/master/DetailedSignals_StrategyAnalyzer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="EaKP0V4gECXY"
start_date = '2020-01-01'
end_date = '2021-02-17'
date_fmt = '%Y-%m-%d'
etflist = ['agq','bib','bis','boil','brzu','bzq','drn','drv','dust','edc','edz','erx','ery','fas','faz','fngd','fngu','gll','jdst','jnug','jpnl','kold','labd','labu','nrgd','nrgu',
'nugt','rusl','russ','sdow','soxl','soxs','spxl','spxs','sqqq','svxy','tecl','tecs','tmf','tmv','tna','tqqq','tza','udow','ugl','uvxy','yang','yinn','zsl']
init_balance = 15000.00
#etflist = ['sdow']
import time
tic = time.time()
# + id="WxAJDE42fRYG"
# !pip install yfinance
# !pip install ta
# + id="jW6S8Tqpfj34"
import yfinance as yf
import ta
import pandas as pd
from datetime import date, timedelta, datetime
from IPython.display import clear_output
# + [markdown] id="q3j0VwoRftGV"
# **Compute Buffer**
#
#
# + id="ZADlnMVKfxIK"
start_date_buffer = datetime.strptime(start_date, date_fmt) - timedelta(days=365)
start_date_buffer = start_date_buffer.strftime(date_fmt)
start_date_buffer
# + id="dbqMN7UtgPdx"
class StockBacktestData:
def __init__(self, ticker, start_date, end_date):
self._ticker = ticker
self._backtest_start_buffer_days = 365
self._buffer_days = 90
init_start_date, init_end_date = self._get_buffer_start_end_dates(start_date, end_date)
self._data = self._download_stock_backtest_data(self._ticker, init_start_date, init_end_date)
def _get_buffer_start_end_dates(self, start_date, end_date):
date_fmt = '%Y-%m-%d'
init_start_date = datetime.strptime(start_date, date_fmt) - timedelta(
days=(self._backtest_start_buffer_days + self._buffer_days)
)
init_start_date = init_start_date.strftime(date_fmt)
init_end_date = datetime.strptime(end_date, date_fmt) + timedelta(days=self._buffer_days)
if init_end_date > datetime.today():
init_end_date = datetime.today()
init_end_date = init_end_date.strftime(date_fmt)
return init_start_date, init_end_date
def _get_backtest_start_date(self, start_date):
date_fmt = '%Y-%m-%d'
start_date_buffer = datetime.strptime(start_date, date_fmt) - timedelta(
days=self._backtest_start_buffer_days
)
start_date_buffer = start_date_buffer.strftime(date_fmt)
return start_date_buffer
def _download_stock_backtest_data(self, ticker, start_date, end_date):
df = yf.download(ticker, start=start_date, end=end_date)
return df
def get_stock_backtest_data(self, start_date, end_date):
start_date_buffer = self._get_backtest_start_date(start_date)
df = self._data[(self._data.index >= start_date_buffer) & (self._data.index <= end_date)]
return df.copy()
# + [markdown] id="uKmJrLkagdIW"
# **Strategies**
# + id="uEh73LTusU3r"
def strategy_KeltnerChannel_origin(df, **kwargs):
n = kwargs.get('n', 10)
data = df.copy()
k_band = ta.volatility.KeltnerChannel(data.High, data.Low, data.Close, n)
data['K_BAND_UB'] = k_band.keltner_channel_hband().round(4)
data['K_BAND_LB'] = k_band.keltner_channel_lband().round(4)
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = (data.Close <= data.K_BAND_LB) & (data.CLOSE_PREV > data.K_BAND_LB)
data['EXIT_LONG'] = (data.Close >= data.K_BAND_UB) & (data.CLOSE_PREV < data.K_BAND_UB)
data['SHORT'] = (data.Close >= data.K_BAND_UB) & (data.CLOSE_PREV < data.K_BAND_UB)
data['EXIT_SHORT'] = (data.Close <= data.K_BAND_LB) & (data.CLOSE_PREV > data.K_BAND_LB)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_KeltnerChannel_origin_long(df, **kwargs):
n = kwargs.get('n', 10)
data = df.copy()
k_band = ta.volatility.KeltnerChannel(data.High, data.Low, data.Close, n)
data['K_BAND_UB'] = k_band.keltner_channel_hband().round(4)
data['K_BAND_LB'] = k_band.keltner_channel_lband().round(4)
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = (data.Close <= data.K_BAND_LB) & (data.CLOSE_PREV > data.K_BAND_LB)
data['EXIT_LONG'] = (data.Close >= data.K_BAND_UB) & (data.CLOSE_PREV < data.K_BAND_UB)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="MktGZihTsi_r"
def strategy_BollingerBands(df, **kwargs):
n = kwargs.get('n', 10)
n_rng = kwargs.get('n_rng', 2)
data = df.copy()
boll = ta.volatility.BollingerBands(data.Close, n, n_rng)
data['BOLL_LBAND_INDI'] = boll.bollinger_lband_indicator()
data['BOLL_UBAND_INDI'] = boll.bollinger_hband_indicator()
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = data.BOLL_LBAND_INDI == 1
data['EXIT_LONG'] = data.BOLL_UBAND_INDI == 1
data['SHORT'] = data.BOLL_UBAND_INDI == 1
data['EXIT_SHORT'] = data.BOLL_LBAND_INDI == 1
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_BollingerBands_long(df, **kwargs):
n = kwargs.get('n', 10)
n_rng = kwargs.get('n_rng', 2)
data = df.copy()
boll = ta.volatility.BollingerBands(data.Close, n, n_rng)
data['BOLL_LBAND_INDI'] = boll.bollinger_lband_indicator()
data['BOLL_UBAND_INDI'] = boll.bollinger_hband_indicator()
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = data.BOLL_LBAND_INDI == 1
data['EXIT_LONG'] = data.BOLL_UBAND_INDI == 1
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="LnqEV38FszDC"
def strategy_MA(df, **kwargs):
n = kwargs.get('n', 50)
ma_type = kwargs.get('ma_type', 'sma')
ma_type = ma_type.strip().lower()
data = df.copy()
if ma_type == 'sma':
sma = ta.trend.SMAIndicator(data.Close, n)
data['MA'] = sma.sma_indicator().round(4)
elif ma_type == 'ema':
ema = ta.trend.EMAIndicator(data.Close, n)
data['MA'] = ema.ema_indicator().round(4)
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = (data.Close > data.MA) & (data.CLOSE_PREV <= data.MA)
data['EXIT_LONG'] = (data.Close < data.MA) & (data.CLOSE_PREV >= data.MA)
data['SHORT'] = (data.Close < data.MA) & (data.CLOSE_PREV >= data.MA)
data['EXIT_SHORT'] = (data.Close > data.MA) & (data.CLOSE_PREV <= data.MA)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_MACrossover(df, **kwargs):
n_slow = kwargs.get('n_slow', 13)
n_fast = kwargs.get('n_fast', 5)
n_middle = kwargs.get('n_middle', 8)
data = df.copy()
sma_fast = ta.trend.SMAIndicator(data.Close, n_fast)
data['SMA_FAST'] = sma_fast.sma_indicator().round(4)
sma_slow = ta.trend.SMAIndicator(data.Close, n_slow)
data['SMA_SLOW'] = sma_slow.sma_indicator().round(4)
sma_middle = ta.trend.SMAIndicator(data.Close, n_middle)
data['SMA_MIDDLE'] = sma_middle.sma_indicator().round(4)
data['SMA_FAST_PREV'] = data.SMA_FAST.shift(1)
data['SMA_MIDDLE_PREV'] = data.SMA_MIDDLE.shift(1)
data['SMA_SLOW_PREV'] = data.SMA_SLOW.shift(1)
data['LONG'] = (data.SMA_FAST > data.SMA_MIDDLE) & (data.SMA_FAST > data.SMA_SLOW) & (data.SMA_MIDDLE > data.SMA_SLOW) & ((data.SMA_FAST_PREV < data.SMA_MIDDLE_PREV) | (data.SMA_FAST_PREV < data.SMA_SLOW_PREV) | (data.SMA_MIDDLE_PREV < data.SMA_SLOW_PREV))
data['EXIT_LONG'] = (data.SMA_FAST < data.SMA_MIDDLE) & (data.SMA_FAST < data.SMA_SLOW) & (data.SMA_MIDDLE < data.SMA_SLOW) & ((data.SMA_FAST_PREV > data.SMA_MIDDLE_PREV) | (data.SMA_FAST_PREV > data.SMA_SLOW_PREV) | (data.SMA_MIDDLE_PREV > data.SMA_SLOW_PREV))
data['SHORT'] = (data.SMA_FAST < data.SMA_MIDDLE) & (data.SMA_FAST < data.SMA_SLOW) & (data.SMA_MIDDLE < data.SMA_SLOW) & ((data.SMA_FAST_PREV > data.SMA_MIDDLE_PREV) | (data.SMA_FAST_PREV > data.SMA_SLOW_PREV) | (data.SMA_MIDDLE_PREV > data.SMA_SLOW_PREV))
data['EXIT_SHORT'] = (data.SMA_FAST > data.SMA_MIDDLE) & (data.SMA_FAST > data.SMA_SLOW) & (data.SMA_MIDDLE > data.SMA_SLOW) & ((data.SMA_FAST_PREV < data.SMA_MIDDLE_PREV) | (data.SMA_FAST_PREV < data.SMA_SLOW_PREV) | (data.SMA_MIDDLE_PREV < data.SMA_SLOW_PREV))
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_MA_long(df, **kwargs):
n = kwargs.get('n', 50)
ma_type = kwargs.get('ma_type', 'sma')
ma_type = ma_type.strip().lower()
data = df.copy()
if ma_type == 'sma':
sma = ta.trend.SMAIndicator(data.Close, n)
data['MA'] = sma.sma_indicator().round(4)
elif ma_type == 'ema':
ema = ta.trend.EMAIndicator(data.Close, n)
data['MA'] = ema.ema_indicator().round(4)
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = (data.Close > data.MA) & (data.CLOSE_PREV <= data.MA)
data['EXIT_LONG'] = (data.Close < data.MA) & (data.CLOSE_PREV >= data.MA)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="-DqJY60gs6W2"
def strategy_MACD(df, **kwargs):
n_slow = kwargs.get('n_slow', 26)
n_fast = kwargs.get('n_fast', 12)
n_sign = kwargs.get('n_sign', 9)
data = df.copy()
macd = ta.trend.MACD(data.Close, n_slow, n_fast, n_sign)
data['MACD_DIFF'] = macd.macd_diff().round(4)
data['MACD_DIFF_PREV'] = data.MACD_DIFF.shift(1)
data['LONG'] = (data.MACD_DIFF > 0) & (data.MACD_DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.MACD_DIFF < 0) & (data.MACD_DIFF_PREV >= 0)
data['SHORT'] = (data.MACD_DIFF < 0) & (data.MACD_DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.MACD_DIFF > 0) & (data.MACD_DIFF_PREV <= 0)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_MACD_long(df, **kwargs):
n_slow = kwargs.get('n_slow', 26)
n_fast = kwargs.get('n_fast', 12)
n_sign = kwargs.get('n_sign', 9)
data = df.copy()
macd = ta.trend.MACD(data.Close, n_slow, n_fast, n_sign)
data['MACD_DIFF'] = macd.macd_diff().round(4)
data['MACD_DIFF_PREV'] = data.MACD_DIFF.shift(1)
data['LONG'] = (data.MACD_DIFF > 0) & (data.MACD_DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.MACD_DIFF < 0) & (data.MACD_DIFF_PREV >= 0)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="MJhTIE6us8QK"
def strategy_RSI(df, **kwargs):
n = kwargs.get('n', 14)
data = df.copy()
rsi = ta.momentum.RSIIndicator(data.Close, n)
data['RSI'] = rsi.rsi().round(4)
data['RSI_PREV'] = data.RSI.shift(1)
data['LONG'] = (data.RSI > 30) & (data.RSI_PREV <= 30)
data['EXIT_LONG'] = (data.RSI < 70) & (data.RSI_PREV >= 70)
data['SHORT'] = (data.RSI < 70) & (data.RSI_PREV >= 70)
data['EXIT_SHORT'] = (data.RSI > 30) & (data.RSI_PREV <= 30)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_RSI_long(df, **kwargs):
n = kwargs.get('n', 14)
data = df.copy()
rsi = ta.momentum.RSIIndicator(data.Close, n)
data['RSI'] = rsi.rsi().round(4)
data['RSI_PREV'] = data.RSI.shift(1)
data['LONG'] = (data.RSI > 30) & (data.RSI_PREV <= 30)
data['EXIT_LONG'] = (data.RSI < 70) & (data.RSI_PREV >= 70)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="TezLN0fQtCZ-"
def strategy_WR(df, **kwargs):
n = kwargs.get('n', 14)
data = df.copy()
wr = ta.momentum.WilliamsRIndicator(data.High, data.Low, data.Close, n)
data['WR'] = wr.williams_r().round(4)
data['WR_PREV'] = data.WR.shift(1)
data['LONG'] = (data.WR > -80) & (data.WR_PREV <= -80)
data['EXIT_LONG'] = (data.WR < -20) & (data.WR_PREV >= -20)
data['SHORT'] = (data.WR < -20) & (data.WR_PREV >= -20)
data['EXIT_SHORT'] = (data.WR > -80) & (data.WR_PREV <= -80)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_WR_long(df, **kwargs):
n = kwargs.get('n', 14)
data = df.copy()
wr = ta.momentum.WilliamsRIndicator(data.High, data.Low, data.Close, n)
data['WR'] = wr.williams_r().round(4)
data['WR_PREV'] = data.WR.shift(1)
data['LONG'] = (data.WR > -80) & (data.WR_PREV <= -80)
data['EXIT_LONG'] = (data.WR < -20) & (data.WR_PREV >= -20)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="cljNrJtAtKkA"
def strategy_Stochastic_fast(df, **kwargs):
k = kwargs.get('k', 20)
d = kwargs.get('d', 5)
data = df.copy()
sto = ta.momentum.StochasticOscillator(data.High, data.Low, data.Close, k, d)
data['K'] = sto.stoch().round(4)
data['D'] = sto.stoch_signal().round(4)
data['DIFF'] = data['K'] - data['D']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_Stochastic_fast_long(df, **kwargs):
k = kwargs.get('k', 20)
d = kwargs.get('d', 5)
data = df.copy()
sto = ta.momentum.StochasticOscillator(data.High, data.Low, data.Close, k, d)
data['K'] = sto.stoch().round(4)
data['D'] = sto.stoch_signal().round(4)
data['DIFF'] = data['K'] - data['D']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="-urCVDkKtQBG"
def strategy_Stochastic_slow(df, **kwargs):
k = kwargs.get('k', 20)
d = kwargs.get('d', 5)
dd = kwargs.get('dd', 3)
data = df.copy()
sto = ta.momentum.StochasticOscillator(data.High, data.Low, data.Close, k, d)
data['K'] = sto.stoch().round(4)
data['D'] = sto.stoch_signal().round(4)
ma = ta.trend.SMAIndicator(data.D, dd)
data['DD'] = ma.sma_indicator().round(4)
data['DIFF'] = data['D'] - data['DD']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_Stochastic_slow_long(df, **kwargs):
k = kwargs.get('k', 20)
d = kwargs.get('d', 5)
dd = kwargs.get('dd', 3)
data = df.copy()
sto = ta.momentum.StochasticOscillator(data.High, data.Low, data.Close, k, d)
data['K'] = sto.stoch().round(4)
data['D'] = sto.stoch_signal().round(4)
ma = ta.trend.SMAIndicator(data.D, dd)
data['DD'] = ma.sma_indicator().round(4)
data['DIFF'] = data['D'] - data['DD']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="SmeFE6xGtVte"
def strategy_Ichmoku(df, **kwargs):
n_conv = kwargs.get('n_conv', 9)
n_base = kwargs.get('n_base', 26)
n_span_b = kwargs.get('n_span_b', 26)
data = df.copy()
ichmoku = ta.trend.IchimokuIndicator(data.High, data.Low, n_conv, n_base, n_span_b)
data['BASE'] = ichmoku.ichimoku_base_line().round(4)
data['CONV'] = ichmoku.ichimoku_conversion_line().round(4)
data['DIFF'] = data['CONV'] - data['BASE']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
def strategy_Ichmoku_long(df, **kwargs):
n_conv = kwargs.get('n_conv', 9)
n_base = kwargs.get('n_base', 26)
n_span_b = kwargs.get('n_span_b', 26)
data = df.copy()
ichmoku = ta.trend.IchimokuIndicator(data.High, data.Low, n_conv, n_base, n_span_b)
data['BASE'] = ichmoku.ichimoku_base_line().round(4)
data['CONV'] = ichmoku.ichimoku_conversion_line().round(4)
data['DIFF'] = data['CONV'] - data['BASE']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = False
data['EXIT_SHORT'] = False
data.LONG = data.LONG.shift(0)
data.EXIT_LONG = data.EXIT_LONG.shift(0)
data.SHORT = data.SHORT.shift(0)
data.EXIT_SHORT = data.EXIT_SHORT.shift(0)
return data
# + id="OVPfeZp2Gjiu"
def get_stock_backtest_data(ticker, start_date, end_date):
date_fmt = '%Y-%m-%d'
start_date_buffer = datetime.strptime(start_date, date_fmt) - timedelta(days=365)
start_date_buffer = start_date_buffer.strftime(date_fmt)
df = yf.download(ticker, start=start_date_buffer, end=end_date)
return df
# + id="mimRJtMrG6ka"
def prepare_stock_ta_backtest_data(df, start_date, end_date, strategy, **strategy_params):
df_strategy = strategy(df, **strategy_params)
bt_df = df_strategy[(df_strategy.index >= start_date) & (df_strategy.index <= end_date)]
return bt_df
# + id="QxxfFaMnBod1"
def run_stock_ta_backtest(bt_df, stop_loss_lvl=None, etf_type=None, benchmark=None, strategy=None, params=None):
balance = init_balance
inc_balance = init_balance
market_value = init_balance
inc_market_value = init_balance
pnl = 0.00
position = 0
inc_position = 0
stopthresh = 'no'
last_signal = 'hold'
last_price = 0.00
c = 0
trade_date_start = []
trade_date_end = []
trade_days = []
trade_side = []
trade_pnl = []
trade_ret = []
trade_balance = []
trade_stop = []
trade_type = []
trade_benchmark = []
trade_inc_ret = []
trade_strategy = []
trade_params = []
cum_value = []
cum_incValue = []
for index, row in bt_df.iterrows():
# check and close any positions
if row.EXIT_LONG and last_signal == 'long' and position > 0:
trade_date_end.append(row.name)
trade_days.append(c)
pnl = (row.Close - last_price) * position
trade_pnl.append(pnl)
trade_ret.append((row.Close / last_price - 1) * 100)
if row.name >= datetime.strptime('2020-11-9 00:00:00','%Y-%m-%d %H:%M:%S') :
trade_inc_ret.append((row.Close / last_price - 1) * 100)
inc_balance = inc_balance + (row.Close * inc_position)
else:
trade_inc_ret.append(0)
balance = balance + (row.Close * position)
trade_balance.append(balance)
trade_stop.append(stop_loss_lvl)
trade_type.append(etf_type)
trade_benchmark.append(benchmark)
trade_strategy.append(strategy)
trade_params.append(params)
pnl = 0
position = 0
last_signal = 'hold'
c = 0
# check signal and enter any possible position
if row.LONG and last_signal != 'long':
last_signal = 'long'
last_price = row.Close
trade_date_start.append(row.name)
trade_side.append('long')
position = int(balance / row.Close)
cost = position * row.Close
balance = balance - cost
if row.name >= datetime.strptime('2020-11-9 00:00:00','%Y-%m-%d %H:%M:%S') :
inc_position = int(inc_balance / row.Close)
inc_cost = inc_position * row.Close
inc_balance = inc_balance - inc_cost
c = 0
if row.SHORT and last_signal != 'short':
last_signal = 'hold'
c = 0
if stop_loss_lvl:
# check stop loss
if position > 0 and ((row.Low / last_price)- 1) * 100 <= stop_loss_lvl and row.LONG == False:
c = c + 1
trade_date_end.append(row.name)
trade_days.append(c)
stop_loss_price = last_price + round(last_price * (stop_loss_lvl / 100), 4)
pnl = (stop_loss_price - last_price) * position
trade_pnl.append(pnl)
trade_ret.append(((stop_loss_price / last_price) - 1) * 100)
if row.name >= datetime.strptime('2020-11-9 00:00:00','%Y-%m-%d %H:%M:%S') :
trade_inc_ret.append((row.Close / last_price - 1) * 100)
else:
trade_inc_ret.append(0)
balance = balance + (stop_loss_price * position)
if row.name >= datetime.strptime('2020-11-9 00:00:00','%Y-%m-%d %H:%M:%S') :
inc_balance = inc_balance + (stop_loss_price * inc_position)
trade_balance.append(balance)
trade_stop.append(stop_loss_lvl)
trade_type.append(etf_type)
trade_benchmark.append(benchmark)
trade_strategy.append(strategy)
trade_params.append(params)
pnl = 0
position = 0
inc_position = 0
last_signal = 'hold'
c = 0
# compute market value and count days for any possible poisition
if last_signal == 'hold':
market_value = balance
inc_market_value = inc_balance
elif last_signal == 'long':
c = c + 1
market_value = (position * row.Close) + balance
if row.name >= datetime.strptime('2020-11-9 00:00:00','%Y-%m-%d %H:%M:%S') :
inc_market_value = (inc_position * row.Close) + inc_balance
else:
c = c + 1
#market_value = (row.Close - last_price) * position + balance
#market_value = balance
cum_value.append(market_value)
cum_incValue.append(inc_market_value)
# generate analysis
# performance over time
cum_ret_df = pd.DataFrame(cum_value, index=bt_df.index, columns=['CUM_RET'])
cum_ret_df['CUM_RET'] = (market_value / init_balance - 1) * 100
cum_ret_df['BUY_HOLD'] = (bt_df.Close / bt_df.Open.iloc[0] - 1) * 100
cum_ret_df['ZERO'] = 0
cum_ret_df['Market_Value'] = market_value
cum_ret_df['INC_CUM_RET'] = (inc_market_value / init_balance - 1) * 100
# trade stats
size = min(len(trade_date_start), len(trade_date_end))
tarde_dict = {
'START': trade_date_start[:size],
'END': trade_date_end[:size],
'SIDE': trade_side[:size],
'DAYS': trade_days[:size],
'PNL': trade_pnl[:size],
'RET': trade_ret[:size],
'BAL': trade_balance[:size],
'STOP':trade_stop[:size],
'TYPE':trade_type[:size],
'BENCH':trade_benchmark[:size],
'INC_RET':trade_inc_ret[:size],
'STRATEGY':trade_strategy[:size],
'PARAMS':trade_params[:size]
}
trade_df = pd.DataFrame(tarde_dict)
num_trades = trade_df.groupby('SIDE').count()[['START']]
num_trades_win = trade_df[trade_df.PNL > 0].groupby('SIDE').count()[['START']]
avg_days = trade_df.groupby('SIDE').mean()[['DAYS']]
avg_ret = trade_df.groupby('SIDE').mean()[['RET']]
avg_ret_win = trade_df[trade_df.PNL > 0].groupby('SIDE').mean()[['RET']]
avg_ret_loss = trade_df[trade_df.PNL < 0].groupby('SIDE').mean()[['RET']]
std_ret = trade_df.groupby('SIDE').std()[['RET']]
detail_df = pd.concat([
num_trades, num_trades_win, avg_days,
avg_ret, avg_ret_win, avg_ret_loss, std_ret
], axis=1, sort=False)
detail_df.columns = [
'NUM_TRADES', 'NUM_TRADES_WIN', 'AVG_DAYS',
'AVG_RET', 'AVG_RET_WIN', 'AVG_RET_LOSS', 'STD_RET'
]
detail_df.round(2)
# max drawdown
mv_df = pd.DataFrame(cum_value, index=bt_df.index, columns=['MV'])
days = len(mv_df)
roll_max = mv_df.MV.rolling(window=days, min_periods=1).max()
drawdown_val = mv_df.MV - roll_max
drawdown_pct = (mv_df.MV / roll_max - 1) * 100
# return all stats
return {
'cum_ret_df': cum_ret_df,
'max_drawdown': {
'value': round(drawdown_val.min(), 0),
'pct': round(drawdown_pct.min(), 2)
},
'trade_stats': detail_df,
'trade_details': trade_df
}
# + id="V347xLr9ueTW"
from pandas import ExcelWriter
writer = ExcelWriter("DetailedSignals:" + str(end_date) + ".xlsx")
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
for elt_id, elt in enumerate(etflist):
ticker = elt
if ticker == 'sdow':
benchmark = 'dia'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 3
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'udow':
benchmark = 'dia'
etf_type = 'bull'
stop_loss_lvl = -4
strategy = 'strategy_BollingerBands'
_n = 34
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'edz':
benchmark = 'eem'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'edc':
benchmark = 'eem'
etf_type = 'bull'
stop_loss_lvl = -3
strategy = 'strategy_BollingerBands'
_n = 5
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'jpnl':
benchmark = 'ewj'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 5
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'bzq':
benchmark = 'ewz'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 3
_n_fast = 4
_n_sign = 3
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'brzu':
benchmark = 'ewz'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'fngd':
benchmark = 'dia'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 3
_n_fast = 5
_n_sign = 3
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'fngu':
benchmark = 'fang'
etf_type = 'bull'
stop_loss_lvl = -3
strategy = 'strategy_MACD'
_n_slow = 5
_n_fast = 3
_n_sign = 3
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'yang':
benchmark = 'fxi'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'yinn':
benchmark = 'fxi'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 5
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'dust':
benchmark = 'gdx'
etf_type = 'bear'
stop_loss_lvl = -3
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'nugt':
benchmark = 'gdx'
etf_type = 'bull'
stop_loss_lvl = -3
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'jdst':
benchmark = 'gdxj'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 10
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'jnug':
benchmark = 'gdxj'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'gll':
benchmark = 'gld'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 15
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'ugl':
benchmark = 'gld'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'tza':
benchmark = 'iwm'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 7
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'tna':
benchmark = 'iwm'
etf_type = 'bull'
stop_loss_lvl = -4
strategy = 'strategy_BollingerBands'
_n = 33
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'sqqq':
benchmark = 'qqq'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 4
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'tqqq':
benchmark = 'qqq'
etf_type = 'bull'
stop_loss_lvl = -5
strategy = 'strategy_BollingerBands'
_n = 10
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'russ':
benchmark = 'rsx'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 6
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'rusl':
benchmark = 'rsx'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 3
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'drv':
benchmark = 'rwr'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'drn':
benchmark = 'rwr'
etf_type = 'bull'
stop_loss_lvl = -5
strategy = 'strategy_BollingerBands'
_n = 34
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'zsl':
benchmark = 'slv'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'agq':
benchmark = 'slv'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'soxs':
benchmark = 'smh'
etf_type = 'bear'
stop_loss_lvl = -3
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 3
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'soxl':
benchmark = 'smh'
etf_type = 'bull'
stop_loss_lvl = -5
strategy = 'strategy_BollingerBands'
_n = 8
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'spxs':
benchmark = 'spy'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 3
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'spxl':
benchmark = 'spy'
etf_type = 'bull'
stop_loss_lvl = -3
strategy = 'strategy_BollingerBands'
_n = 5
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'tmv':
benchmark = 'tlt'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 8
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'tmf':
benchmark = 'tlt'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'kold':
benchmark = 'ung'
etf_type = 'bear'
stop_loss_lvl = -5
strategy = 'strategy_BollingerBands'
_n = 16
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'boil':
benchmark = 'ung'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'svxy':
benchmark = 'vxxb'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 7
_n_rng = 2
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'uvxy':
benchmark = 'vxxb'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 3
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'labd':
benchmark = 'xbi'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 3
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'labu':
benchmark = 'xbi'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 7
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'bis':
benchmark = 'xbi'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'bib':
benchmark = 'xbi'
etf_type = 'bull'
stop_loss_lvl = -5
strategy = 'strategy_BollingerBands'
_n = 27
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'ery':
benchmark = 'xle'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_MACD'
_n_slow = 2
_n_fast = 4
_n_sign = 2
params = "(n_slow =" + str(_n_slow ) + ", n_fast =" + str(_n_fast ) + ", n_sign =" + str(_n_sign ) + ")"
if ticker == 'erx':
benchmark = 'xle'
etf_type = 'bull'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 6
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'faz':
benchmark = 'xlf'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 3
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'fas':
benchmark = 'xlf'
etf_type = 'bull'
stop_loss_lvl = -5
strategy = 'strategy_BollingerBands'
_n = 27
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'tecs':
benchmark = 'xlk'
etf_type = 'bear'
stop_loss_lvl = -2
strategy = 'strategy_BollingerBands'
_n = 7
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'tecl':
benchmark = 'xlk'
etf_type = 'bull'
stop_loss_lvl = -4
strategy = 'strategy_BollingerBands'
_n = 7
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
if ticker == 'nrgd':
benchmark = 'uso'
etf_type = 'bear'
stop_loss_lvl = -4
strategy = 'strategy_Stochastic_slow'
_k=25
_d=8
_dd=2
params = "(k=" + str(_k) + ", d=" + str(_d) + ", dd=" + str(_dd) + ")"
if ticker == 'nrgu':
benchmark = 'uso'
etf_type = 'bull'
stop_loss_lvl = -5
strategy = 'strategy_BollingerBands'
_n = 5
_n_rng = 1
params = "(n=" + str(_n) + ", n_rng=" + str(_n_rng) + ")"
#Get the OHLC and Volume
df = yf.download(ticker, start=start_date_buffer, end=end_date)
df = get_stock_backtest_data(ticker, start_date, end_date)
bt_df = df[(df.index >= start_date) & (df.index <= end_date)]
if strategy == 'strategy_KeltnerChannel_origin':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_KeltnerChannel_origin, n=_n
)
if strategy == 'strategy_KeltnerChannel_origin_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_KeltnerChannel_origin_long, n=_n
)
if strategy == 'strategy_BollingerBands':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_BollingerBands, n=_n, n_rng=_n_rng
)
if strategy == 'strategy_BollingerBands_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_BollingerBands_long, n=_n, n_rng=_n_rng
)
if strategy == 'strategy_MA':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_MA, n=_n, ma_type=_ma_type
)
if strategy == 'strategy_MACrossover':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_MACrossover, n_slow=_n_slow, n_fast=_n_fast, n_middle=_n_middle
)
if strategy == 'strategy_MA_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_MA_long, n=_n, ma_type=_ma_type
)
if strategy == 'strategy_MACD':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_MACD, n_slow=_n_slow, n_fast=_n_fast, n_sign=_n_sign
)
if strategy == 'strategy_MACD_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_MACD_long, n_slow=_n_slow, n_fast=_n_fast, n_sign=_n_sign
)
if strategy == 'strategy_RSI':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_RSI, n=_n
)
if strategy == 'strategy_RSI_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_RSI_long, n=_n
)
if strategy == 'strategy_WR':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_WR, n=_n
)
bt_df.head()
if strategy == 'strategy_WR_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_WR_long, n=_n
)
bt_df.head()
if strategy == 'strategy_Stochastic_fast':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_Stochastic_fast, k=_k, d=_d
)
if strategy == 'strategy_Stochastic_fast_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_Stochastic_fast_long, k=_k, d=_d
)
if strategy == 'strategy_Stochastic_slow':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_Stochastic_slow, k=_k, d=_d, dd=_dd
)
if strategy == 'strategy_Stochastic_slow_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_Stochastic_slow_long, k=_k, d=_d, dd=_dd
)
if strategy == 'strategy_Ichmoku':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_Ichmoku, n_conv=_n_conv, n_base=_n_base, n_span_b=_n_span_b
)
bt_df.head()
if strategy == 'strategy_Ichmoku_long':
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_Ichmoku_long, n_conv=_n_conv, n_base=_n_base, n_span_b=_n_span_b
)
print(ticker)
bt_df.to_excel(writer, ticker)
writer.save()
result = run_stock_ta_backtest(bt_df, stop_loss_lvl, etf_type, benchmark, strategy, params)
df_details = result['trade_details']
df_details.to_excel(writer, ticker + '_det')
writer.save()
cum_ret_df = result['cum_ret_df']
cum_ret_df.to_excel(writer, ticker + '_cum')
writer.save()
print(cum_ret_df.tail(1))
print('End ' + ticker)
print('')
# + id="OF7Ow6H7GQMA"
toc = time.time()
print("Minutes taken = " + str((toc-tic)/60.0))
| DetailedSignals_StrategyAnalyzer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="H_BDOe3HYd6t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 470} outputId="f2601ab2-614c-4e1d-8ec2-943d352838ad" executionInfo={"status": "ok", "timestamp": 1583512131323, "user_tz": -60, "elapsed": 14069, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10838340120858483589"}}
# !pip install --upgrade tables
# !pip install eli5
# !pip install xgboost
# !pip install hyperopt
# + id="LuP-2BL0Y4S3" colab_type="code" colab={}
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score
from hyperopt import hp, fmin, tpe, STATUS_OK
import eli5
from eli5.sklearn import PermutationImportance
# + id="13LG6lvIZ8Qn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="fd92a76f-0905-4a45-e5e3-87b3a4de8dee" executionInfo={"status": "ok", "timestamp": 1583512153242, "user_tz": -60, "elapsed": 650, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10838340120858483589"}}
# cd "/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car"
# + id="CaAG3FmgaDkK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="591a9fa7-1d5f-431d-d910-80f02978660f" executionInfo={"status": "ok", "timestamp": 1583512158596, "user_tz": -60, "elapsed": 3673, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "10838340120858483589"}}
df = pd.read_hdf('data/car.h5')
df.shape
# + [markdown] id="FohTjEcpaQ3w" colab_type="text"
# ## Feature Engineering
# + id="2qOLLjd7aJhC" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_value = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_value
else:
df[feat + SUFFIX_CAT] = factorized_value
# + id="9VsfubE4aUwc" colab_type="code" colab={}
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split(' ')[0]) )
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')) )
# + id="euIGhGmPbE7i" colab_type="code" colab={}
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error' )
return np.mean(scores), np.std(scores)
# + id="tHyqfeh4ckFJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 83} outputId="0f1575dd-cfd2-41da-b244-193e913493be" executionInfo={"status": "ok", "timestamp": 1583512198714, "user_tz": -60, "elapsed": 13432, "user": {"displayName": "Wkosz w", "photoUrl": "", "userId": "10838340120858483589"}}
feats = ['param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat']
xgb_params = {
'max_depth': 5,
'n_estimators' : 50,
'learning_rate' : 0.1,
'seed' : 0
}
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + [markdown] id="e9Ks8PxEetjw" colab_type="text"
# ## Hyperopt
# + id="EQKzPGrijtPt" colab_type="code" colab={}
def obj_func(params):
print("Training with params: ")
print(params)
mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)
return {'loss': np.abs(mean_mae), 'status': STATUS_OK }
# + id="GnRzXXetctI8" colab_type="code" colab={}
# space
xgb_reg_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)),
'subsample': hp.quniform('subsample', 0.5, 1, 0.1),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.1),
'objective': 'reg:squarederror',
'n_estimators': 100,
'seed': 0,
}
# + id="qJ-asDfqlnfs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 953} outputId="0d2f2f8d-34cc-4ea1-b170-a3ef0fe48e12" executionInfo={"status": "ok", "timestamp": 1583513890712, "user_tz": -60, "elapsed": 1397080, "user": {"displayName": "Wkosz w", "photoUrl": "", "userId": "10838340120858483589"}}
# run
best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25)
best
# + id="I-a7V7s3ln7v" colab_type="code" colab={}
| day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# ## The Exit Building Problem
# In this problem, there are 6 rooms and our agent starts out in a random room. The doors between rooms can be bidirectional or directional. All transitions between rooms give 0 reward except transitions to room 5 (terminal state) which gives a reward of 100. The objective is to learn a sequence of steps to take us to the exit (room 5). Naturally, the problem can be modelled using a directed graph as shown below. We apply Q-learning to learn action-values and solve the problem.
# + deletable=true editable=true
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# %matplotlib inline
# + deletable=true editable=true
img = mpimg.imread('exit_building.png');
plt.axis('off')
plt.imshow(img)
# + deletable=true editable=true
class BuildingEnv:
def __init__(self):
self.nS = 6 # no. of states
self.nA = 6 # no. of actions
# A[s] is a list of actions that can be taken from state s
A = {}
A[0] = [4]
A[1] = [3, 5]
A[2] = [3]
A[3] = [1, 4]
A[4] = [0, 3, 5]
A[5] = [5]
self.A = A
# Holds all possible transitions for states in the environment
# T[s][a] is a (s', r, done) tuple
# You will receive reward r and transition to state s' if you take
# action a while currently in state s. Done is true if you have
# reached the exit
T = {}
exit = self.nS - 1
for s in range(self.nS):
T[s] = {}
for a in A[s]:
reward = 100 if a == exit else 0
done = True if a == exit else False
T[s][a] = ((a, reward, done))
self.T = T
# Resets the environment and returns the initial state
def reset(self):
# s is the current state
self.s = np.random.choice(self.nS, 1)[0]
return self.s
# Executes the given action in the current state and returns
# (s', r, done) which means you will receive reward r and transition to
# state s' if you execute action in the current state, s. Done is true if
# you have reached the exit
def step(self, action):
if not action in self.A[self.s]:
raise ValueError('Cannot execute action %d in state %d' % (action, self.s))
# Update the state based on the action taken
obs = self.T[self.s][action]
self.s = obs[0]
return obs
# + deletable=true editable=true
def q_learning(env, discount_factor = 1, num_episodes=100):
'''
Evaluates a policy given an environment. Assumes equiprobable selection of actions
Args:
env: Represents the dynamics of the environment
discount_factor: smaller values favor instantaneous reward, larger values are far-sighted
num_episodes: the maximum number of episodes to run
all (s,a) pairs.
'''
Q = np.zeros((env.nS, env.nA))
for episode in range(num_episodes):
s = env.reset()
done = False
while not done:
# Select random action from set of all actions that can be taken from this state
a = np.random.choice(env.A[s])
# Execute the action and receive next state and reward
s_, r, done = env.step(a)
# Update Q(s,a) based on possible actions that can be taken from next state
action_values = [Q[s_,a_] for a_ in env.A[s_]]
Q[s,a] = r + discount_factor * np.max(action_values)
# Go to next state
s = s_
return Q
# + deletable=true editable=true
env = BuildingEnv()
Q = q_learning(env, discount_factor=0.5, num_episodes=500)
print(Q)
# + [markdown] deletable=true editable=true
# Since we use a random policy to learn the action-values, we run the environment several times to ensure that most transitions are executed. The task is also episodic (eventually ends, as opposed to continuous) and I have used a discount factor of 0.5. You can experiment with different values.
#
# The results are what we expect. The best (and only) move to take from 0 is to go to 4. The best move from 1 is to go directly to 5 (notice how we can also go to 3, but that has less value). From 3, going to 1 or 4 has equal value (so we could pick randomly) and so on.
#
# Here is an example run to see how an agent can make decisions using these action-values:
# + deletable=true editable=true
s = env.reset()
states = []
done = False
while not done:
states.append(s)
action = np.argmax(Q[s])
s_, r, done = env.step(action)
s = s_
states.append(s)
print(" -> ".join([str(s) for s in states]))
# + [markdown] deletable=true editable=true
# Although we learned the values of actions, we can also use Q-learning to learn the values of states. At each step, we would then select action that takes us to the state with the highest value. The GridWorld problem in Example 4.1 of [1] can be modeled and solved in a similar fashion.
#
# ## References
# 1. <NAME>, <NAME> (1998). Reinforcement Learning: An Introduction. MIT Press.
| temporal difference/exit_building.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import plotly.graph_objects as go
import pandas as pd
data = pd.read_csv('./data/mt_bruno_elevation.csv')
del data['index']
data.head()
height = data.values
surface = go.Surface(z = height)
fig = go.Figure(surface)
fig.show()
# ## customized surface
import numpy as np
x = np.arange(-5, 6)
y = np.arange(-5, 6)
xv, yv = np.meshgrid(x, y)
xv
yv
z = xv**2 + yv**2
z
surface = go.Surface(x = xv, y = yv, z = z)
fig = go.Figure(surface)
fig.show()
| plotly/example4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problema de Classificação: Usando Regressão Logística
# #### Prevendo se os microchips de uma planta de fabricação passam na garantia de qualidade
# > "*Suponha que você seja o gerente de produto da fábrica e tenha os resultados dos testes de alguns microchips em dois testes diferentes. A partir desses dois testes, você gostaria de determinar se os microchips devem ser aceitos ou rejeitados. Para ajudá-lo a tomar a decisão, você tem um conjunto de dados de resultados de teste em microchips anteriores, a partir do qual pode construir um modelo de regressão logística.*"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
dados = pd.read_csv('ex2data2.txt', header = None) # extraindo os dados
X = dados.iloc[:, 0:2] # recebe as duas primeiras colunas de 'dados'
y = dados.iloc[:, 2] # recebe a última coluna de dados
# visualizando os dados
dados.head() # visualiza os 5 primeiros exemplos nos dados
# visualizando os dados
print('Atributos previsores:\n', X)
print('\nClasses dos dados:\n',y)
# - Temos dois dados independentes em **X** (X[:, 0] e X[:, 1]) e um dado dependente **y**.
# ### Visualizando Graficamente os Dados
#definindo a dimensão do gráfico
plt.figure(figsize = (15, 10))
# preparando os dados no gráfico
auxiliar = y == 1 # define True para 1 e False para 0
aprovado = plt.scatter(X[auxiliar][0].values, X[auxiliar][1].values) # todos os exemplos em X cujo y indica classe 1
reprovado = plt.scatter(X[~auxiliar][0].values, X[~auxiliar][1].values) # todos os exemplos em X cujo y indica classe 0
# definindo legendaas ao gráfico
plt.title('Análise da qualidade de microchips')
plt.xlabel('Pontuação no primeiro teste')
plt.ylabel('Pontuação no segundo teste')
plt.legend((aprovado, reprovado), ('Aprovado', 'Reprovado'))
# ### Mapa de Características
# 
def mapaCaracteristica(X1, X2):
'''
1 - Criando mais recursos com os dados (ajustar melhor os dados);
2 - mapear os recursos de todos os termos polinomiais de x1 e x2 até a sexta potência;
'''
grau = 6
saida = np.ones(X.shape[0])[:,np.newaxis]
for i in range(1, grau+1):
for j in range(i+1):
saida = np.hstack((saida, np.multiply(np.power(X1, i-j), np.power(X2, j))[:,np.newaxis]))
return saida
# 
# visualizando os passos da potência (2 + 3 + 4 + 5 + 6 + 7) + bias = 28
for i in range(1, 6+1):
for j in range(i+1):
print(i-j, j)
X = mapaCaracteristica(X.iloc[:,0], X.iloc[:,1])
X.shape # observando a dimensão dos meus novos dados
# ### CUIDADO COM OS AJUSTES EXCESSIVOS!
# ### Implementação da Regressão Logística
def sigmoide(X):
'''
Implementação da função sigmoide
'''
return 1 / (1+ np.exp(-X))
# 
# 
def funcaoCusto(teta, X, y, lmbd):
'''
Implementando a função de custo com um regularizador
'''
m = len(y)
J = (-1/m) * ((y.T).dot(np.log(sigmoide(X.dot(teta)))) + (1 - y.T).dot(np.log(1 - sigmoide(X.dot(teta)))))
regularizador = (lmbd/(2*m)) * ((teta[1:].T).dot(teta[1:]))
J = J + regularizador
return J
# 
def gradiente(teta, X, y, lmbd):
'''
Implementando o gradiente descendente com um regularizador
'''
m = len(y)
grad = np.zeros([m,1])
grad = (1/m) * (X.T).dot((sigmoide(X.dot(teta)) - y))
grad[1:] = grad[1:] + (lmbd / m) * teta[1:]
return grad
(m, n) = X.shape
y = y[:, np.newaxis]
teta = np.zeros((n,1))
lmbd = 1
J = funcaoCusto(teta, X, y, lmbd)
print(J)
# usando o otimizado do scipy para minimizar a função de custo
output = opt.fmin_tnc(func = funcaoCusto, x0 = teta.flatten(), fprime = gradiente, args = (X, y.flatten(), lmbd))
lmbd
teta = output[0]
print(teta) # visualizando os valores otimizados de teta
# ### Acurácia do Modelo
pred = [sigmoide(np.dot(X, teta)) >= 0.5] # realizando a predição com os dados
np.mean(pred == y.flatten()) * 100 # visualizando a média de acertos
# ### Visualizando o Modelo Treinando
# +
u = np.linspace(-1, 1.5, 50) # 50 valores randômicos para u de [-1, 1.5]
v = np.linspace(-1, 1.5, 50) # 50 valores randômicos para v de [-1, 1.5]
z = np.zeros((len(u), len(v))) # matriz de zeros [50,50]
def mapaCaracteristicaPlotar(X1, X2):
'''
Função para plotar o mapa de características obtido (polinômio até a sexta ordem)
'''
grau = 6
saida = np.ones(1)
for i in range(1, grau + 1):
for j in range(i + 1):
saida = np.hstack((saida, np.multiply(np.power(X1, i-j), np.power(X2, j))))
return saida
for i in range(len(u)):
for j in range(len(v)):
z[i,j] = np.dot(mapaCaracteristicaPlotar(u[i], v[j]), teta)
plt.figure(figsize = (15, 10))
# preparando os dados no gráfico
auxiliar = y.flatten() == 1 # define True para 1 e False para 0
X = dados.iloc[:,:-1]
aprovado = plt.scatter(X[auxiliar][0], X[auxiliar][1]) # todos os exemplos em X cujo y indica classe 1
reprovado = plt.scatter(X[~auxiliar][0], X[~auxiliar][1]) # todos os exemplos em X cujo y indica classe 0
plt.contour(u,v,z, 0) # visualizando dados em 3-D usando um gráfico 2-D (usando curvas de nível)
# definindo legendaas ao gráfico
plt.title('Análise da qualidade de microchips')
plt.xlabel('Pontuação no primeiro teste')
plt.ylabel('Pontuação no segundo teste')
plt.legend((aprovado, reprovado), ('Aprovado', 'Reprovado'))
| semana06-23-10-2020/.ipynb_checkpoints/parte02-regressao-logistica-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="BisMznacjFmQ" executionInfo={"status": "ok", "timestamp": 1633680414757, "user_tz": -330, "elapsed": 20390, "user": {"displayName": "CE092_Nevil_Parmar", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9T81yfbDRxuhI7y-Gst_G2LV8s-TlxcR_gwbLFg=s64", "userId": "04067904006451101897"}} outputId="333ac1eb-fb29-48ef-cbdd-eca46918ed12"
from google.colab import drive
drive.mount('/content/drive')
# + id="mXswlC4NhpKa" executionInfo={"status": "ok", "timestamp": 1633680575368, "user_tz": -330, "elapsed": 26825, "user": {"displayName": "CE092_Nevil_Parmar", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9T81yfbDRxuhI7y-Gst_G2LV8s-TlxcR_gwbLFg=s64", "userId": "04067904006451101897"}}
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torchvision.transforms as tt
# + colab={"base_uri": "https://localhost:8080/"} id="ba6W87V3hpKf" executionInfo={"status": "ok", "timestamp": 1633680584339, "user_tz": -330, "elapsed": 991, "user": {"displayName": "CE092_Nevil_Parmar", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9T81yfbDRxuhI7y-Gst_G2LV8s-TlxcR_gwbLFg=s64", "userId": "04067904006451101897"}} outputId="4d5bea0a-7a0c-4e74-de98-dbace74fa7c3"
root_path = '/content/drive/MyDrive/Colab Notebooks/ML LAB 10'
classes = os.listdir(root_path + '/Training')
print(classes)
# + colab={"base_uri": "https://localhost:8080/"} id="-Vz0L_J4hpKi" executionInfo={"status": "ok", "timestamp": 1633680594284, "user_tz": -330, "elapsed": 4732, "user": {"displayName": "CE092_Nevil_Parmar", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9T81yfbDRxuhI7y-Gst_G2LV8s-TlxcR_gwbLFg=s64", "userId": "04067904006451101897"}} outputId="a85ffe39-1d01-4d6c-f91e-2e5f3d8bfd93"
train_dataset = ImageFolder(root_path + "/Training", transform=tt.ToTensor())
print(train_dataset)
test_dataset = ImageFolder(root_path + "/Test", transform=tt.ToTensor())
print(test_dataset)
# + colab={"base_uri": "https://localhost:8080/", "height": 318} id="YndHzBiOhpKj" executionInfo={"status": "ok", "timestamp": 1633680634930, "user_tz": -330, "elapsed": 23903, "user": {"displayName": "CE092_Nevil_Parmar", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9T81yfbDRxuhI7y-Gst_G2LV8s-TlxcR_gwbLFg=s64", "userId": "04067904006451101897"}} outputId="f4e26de6-f2f8-4cc5-8838-2042dc27ccf8"
dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True)
for images, labels in dataloader:
print(labels[0])
print(train_dataset.class_to_idx)
print(train_dataset.classes[labels[0]])
plt.imshow(images[0].permute(1, 2, 0))
break
# + id="a0nSTSaThpKj" executionInfo={"status": "ok", "timestamp": 1633680646467, "user_tz": -330, "elapsed": 383, "user": {"displayName": "CE092_Nevil_Parmar", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9T81yfbDRxuhI7y-Gst_G2LV8s-TlxcR_gwbLFg=s64", "userId": "04067904006451101897"}}
# Create CNN Model
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
######################################################################
#### DESIGN LAYERS :
### SEQUENCE: CONV1,ACTIVATION1,POOLING1, CONV2,ACTIVATION2,POOLING2, LINEAR(FC)
self.layer1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)
self.relu = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.layer2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5)
self.pool2 = nn.MaxPool2d(4)
self.layer3 = nn.Linear(32 * 11 * 11, len(classes))
def forward(self, x):
# COMBINE LAYERS
## 1) CONV1
out = self.layer1(x)
## 2) ACTIVATION1
out = self.relu(out)
## 3) POOLING1
out = self.pool1(out)
## 4) CONV2
out = self.layer2(out)
## 5) ACTIVATION2
out = self.relu(out)
## 6) POOLING2
out = self.pool2(out)
## 7) flatten ########## DURING LAB WE JUST FORGOT FOLLOWING FLATTEN LAYER ###############
out = out.view(out.size(0), -1)
## 8) LINEAR(FC)
return self.layer3(out)
# batch_size, epoch and iteration
batch_size = 100
num_epochs = (len(train_dataset.samples) / batch_size)
num_epochs = int(num_epochs)
# data loader
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
# Create CNN
model = CNNModel()
# Cross Entropy Loss
error = nn.CrossEntropyLoss()
# SGD Optimizer
learning_rate = 0.1
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# + colab={"base_uri": "https://localhost:8080/"} id="mpBRVa8yhpKm" executionInfo={"status": "ok", "timestamp": 1633681088838, "user_tz": -330, "elapsed": 146056, "user": {"displayName": "CE092_Nevil_Parmar", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9T81yfbDRxuhI7y-Gst_G2LV8s-TlxcR_gwbLFg=s64", "userId": "04067904006451101897"}} outputId="9eb19cc6-1bf9-4fbc-8fd9-d0ed2e7e5490"
# CNN model training
count = 0
loss_list = []
iteration_list = []
accuracy_list = []
for i, (images, labels) in enumerate(train_loader):
train = images
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = model(train)
# Calculate softmax and ross entropy loss
loss = error(outputs, labels)
# Calculating gradients
loss.backward()
# Update parameters
optimizer.step()
count += 1
if count % 10 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
test = images
# Forward propagation
outputs = model(test)
# Get predictions from the maximum value
predicted = torch.max(outputs.data, 1)[1]
# Total number of labels
total += len(labels)
correct += (predicted == labels).sum()
accuracy = 100 * correct / float(total)
# store loss and iteration
loss_list.append(loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
print('Iteration: {} Loss: {} Accuracy: {} %'.format(count, loss.data, accuracy))
| LAB 10/CNN_Fruit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import iirsBenchmark.regressors as regressors
import Auxiliary
import re
from IPython.display import display
from CriticalDiagrams import draw_cd_diagram
# Setting everything up
Auxiliary.create_global_file_path_variables(results_path='../../results')
Auxiliary.set_mpl_sns_params(abnt=False)
# %matplotlib inline
analyzed_path = f'../post hoc analysis files/' # Where to save the plots
# -
# ## Post processing local explanation robustness
def post_processing_local_explanation_robustness(*, train_or_test, metric):
"""Returns an dataframe where each row is an explainer and each column is a
regression method. The value of the cells are the median observed for all
data sets.
Metric can be one of [stability, infidelity, jaccard].
"""
data = []
for regressor in regressors.__all__ + ['Feynman_regressor', 'Operon_regressor']:
if train_or_test == 'train':
df = pd.read_csv(f'{Auxiliary.local_path_train}{regressor}_{metric}.csv')
else:
df = pd.read_csv(f'{Auxiliary.local_path_test}{regressor}_{metric}.csv')
# Converting necessary data. Each column will have the metric
# evaluated for the observation obs_i
for col_name in df.columns:
if col_name.startswith('obs_'):
df[col_name] = df[col_name].map(Auxiliary.convert_to_array_of_floats)
df.dropna(inplace=True)
# The robustness metrics can only have positive values. Here we calculate
# the mean metric value for the local explanations for a given data set/rep
mean_metric = df.loc[:, [c for c in df.columns if c.startswith('obs_')]].mean(axis=1, skipna=True)
mean_metric.name = 'mean_metric_value_in_local_expl'
# Creating a column with the mean
new_df = df[['dataset', 'explainer']].join(mean_metric)
# Improving the name of explainers
new_df['explainer'] = new_df['explainer'].map(lambda x: x.replace('_explainer', ''))
# Grouping for the given data sets (or all data sets if none is given)
# And taking the mean over 30 repetitions
df_metric = new_df.groupby(['dataset', 'explainer']).mean().reset_index(level=[0,1])
# Now taking the median of the data sets
df_metric = df_metric.groupby(['explainer']).median()['mean_metric_value_in_local_expl']
# Making extreme outliers become nan
df_metric = df_metric[df_metric < 5000]
df_metric.name = regressor.replace('_regressor', '')
data.append(df_metric)
return pd.concat(data, axis=1)
# +
colormaps = {
'jaccard' : sns.color_palette("rocket", as_cmap=True),
'infidelity' : sns.color_palette("rocket_r", as_cmap=True),
'stability' : sns.color_palette("rocket_r", as_cmap=True)
}
for metric in ['jaccard', 'infidelity', 'stability']:
print(metric)
all_regressors = digest_traintest_results_local_explanations(
train_or_test='test', metric=metric).T
all_regressors = all_regressors[
['PartialEffects', 'SHAP', 'IntegratedGradients', 'LIME', 'RandomImportance']
]
all_regressors.columns = ['\n'.join(re.findall('[A-Z]+[a-z]*', col)) for col in all_regressors.columns]
all_regressors = (all_regressors.T)[
['KNN', 'Linear', 'Lasso', 'DecisionTree', 'RF', 'MLP', 'SVM', 'XGB', 'Operon', 'ITEA', 'Feynman']
]
fig, axs = plt.subplots(1, 1, figsize=(10, 2.75))
sns.heatmap(all_regressors, linewidths=.5, ax=axs,
annot=True, fmt=".2f", #english format
#annot=all_regressors.applymap(lambda x: f"{x:.2f}".replace('.',',')).values, fmt='', # pt-br format
cmap = colormaps[metric],
)
axs.set_xticklabels(
[s.get_text() for s in axs.get_xticklabels()],
rotation = 30, fontsize = 12, ha='right')
plt.savefig(f'{analyzed_path}{metric}_heatmap.pdf', bbox_inches='tight')
plt.show()
# -
# ## More detailed information about the metrics
def boxplot_cds_explainer(metric, explainer, train_or_test='test'):
"""Will generate a boxplot and a critical diagram for each
row in the heatmap, allowing to compare the distributions instead
of only looking the median value over all data sets.
"""
applicable_regressors = ['ITEA_regressor', 'Feynman_regressor', 'Operon_regressor', 'Linear_regressor', 'Lasso_regressor']
y_size = 2.0
cd_size = y_size
ratio = [10, 8]
if explainer is not 'PartialEffects':
applicable_regressors = regressors.__all__ + ['Feynman_regressor', 'Operon_regressor']
y_size = 4
cd_size = 2.15
ratio = [16, 8]
data = []
for regressor in applicable_regressors:
if train_or_test == 'train':
df = pd.read_csv(f'{Auxiliary.local_path_train}{regressor}_{metric}.csv')
else:
df = pd.read_csv(f'{Auxiliary.local_path_test}{regressor}_{metric}.csv')
# Converting necessary data. Each column will have the metric
# evaluated for the observation obs_i
for col_name in df.columns:
if col_name.startswith('obs_'):
df[col_name] = df[col_name].map(Auxiliary.convert_to_array_of_floats)
df.dropna(inplace=True)
# The robustness metrics can only have positive values. Here we calculate
# the mean metric value for the local explanations for a given data set/rep
mean_metric = df.loc[:, [c for c in df.columns if c.startswith('obs_')]].mean(axis=1, skipna=True)
mean_metric.name = 'mean_metric_value_in_local_expl'
# Creating a column with the mean
new_df = df[['dataset', 'explainer']].join(mean_metric)
# Improving the name of explainers
new_df['explainer'] = new_df['explainer'].map(lambda x: x.replace('_explainer', ''))
df_metric = new_df.groupby(['dataset', 'explainer']).mean().reset_index(level=[0,1])
# Now taking the median of the data sets
df_metric_pivoted = df_metric.loc[df_metric['explainer']==explainer,
['dataset', 'mean_metric_value_in_local_expl']]
# Making extreme outliers become nan
df_metric_pivoted = df_metric_pivoted[df_metric_pivoted['mean_metric_value_in_local_expl'] < 5000]
df_metric_pivoted = df_metric_pivoted.set_index('dataset')['mean_metric_value_in_local_expl']
df_metric_pivoted.name = regressor.replace('_regressor', '')
data.append(df_metric_pivoted)
data = pd.concat(data, axis=1)
# Sorting x and y axis
data = data[data.median().sort_values(
ascending=(False if metric is 'jaccard' else True)).index]
# Boxplot ----------------------------------------------------------------
fig, axs = plt.subplots(1, 1, figsize=(6, y_size)) #, gridspec_kw={'height_ratios': [16, 8]})
flierprops = dict(marker='o', markerfacecolor='black', markersize=2, markeredgecolor='black')
sns.boxplot(data=data, orient="h",medianprops={'color': 'k'},
showfliers=False, flierprops=flierprops, ax=axs)
for box, color in zip(axs.artists, sns.color_palette("Blues", len(axs.artists))):
box.set_color(color)
for spine in ['right', 'top', 'bottom']:
axs.spines[spine].set_visible(False)
axs.grid()
plt.tight_layout()
plt.savefig(f'{analyzed_path}{metric}_{explainer}_boxplot.pdf', bbox_inches='tight')
plt.show()
# Critical diagrams ----------------------------------
fig, axs = plt.subplots(1, 1, figsize=(8, cd_size))
melted_res = pd.melt(data.reset_index(), id_vars=['dataset'])
melted_res.columns = ['dataset_name', 'classifier_name', 'accuracy']
if not metric == 'jaccard':
melted_res['accuracy'] = np.max(melted_res['accuracy']) - melted_res['accuracy']
draw_cd_diagram(
df_perf=melted_res,
labels=False, ax=axs, width=8, textspace=1.0, reverse=False)
plt.tight_layout()
plt.savefig(f'{analyzed_path}{metric}_{explainer}_criticaldiagram.pdf', bbox_inches='tight')
plt.show()
for metric in ['jaccard', 'stability', 'infidelity']:
for explainer in ['PartialEffects', 'SHAP', 'IntegratedGradients', 'LIME']:
print(metric, explainer)
boxplot_cds_explainer(metric=metric, explainer=explainer, train_or_test='test')
| post hoc analysis/scripts and notebooks/Local explanations robustness.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# coding: utf-8
import pandas as pd
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda, Flatten
from keras.layers import Embedding
from keras.layers import Convolution1D, MaxPooling1D
from keras.datasets import imdb
from keras import backend as K
import re
from keras.utils import np_utils
from keras.preprocessing import text
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l2
# 生成的 word vector 的 dimension
maxlen = 1041
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789,.!? '
datatrain = pd.read_csv("train.csv", header=0)
datatest = pd.read_csv("test.csv", header=0)
chars = set(alphabet)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# 创建 len(docs)个, 1 * maxlen 的矩阵
X_train = np.ones((datatrain.shape[0], maxlen), dtype = np.int64) * 0
docs = []
labels = []
print('zipping the data:')
epoch = 0
for cont,title,label in zip(datatrain.content, datatrain.title, datatrain.classes):
content = title + cont
content = re.sub("[^a-z0-9\,\.\!\?]", " ", content)
docs.append(content)
label = label - 1
labels.append(label)
epoch = epoch + 1
if (epoch % 20000 == 0):
print('zipping the training data:', epoch)
print('Success!')
print('There are training set:', datatrain.shape[0])
print('Doing one hot encoding:')
# One-Hot encoding 另外应该是反过来进行 encode 的,,稀疏部分用0代替
for i, doc in enumerate(docs):
# 倒着数后面的maxlen个数字,但是输出顺序不变
for t, char in enumerate(doc[-maxlen:]):
X_train[i, (maxlen-1-t)] = char_indices[char]
print('Success!')
Y_train = np.array(labels)
print('Convert class vector to binary class matrix (for use with categorical_crossentropy)')
nb_classes = 5
print(nb_classes, 'classes in the dataset')
Y_train = np_utils.to_categorical(Y_train, nb_classes)
print('Success!')
X_test = np.ones((datatest.shape[0], maxlen), dtype = np.int64) * 0
docs = []
labels = []
print('zipping the test data:')
epoch = 0
for cont,title,label in zip(datatest.content, datatest.title, datatest.classes):
content = title + cont
content = re.sub("[^a-z0-9\,\.\!\?]", " ", content)
docs.append(content)
label = label - 1
labels.append(label)
epoch = epoch + 1
if (epoch % 20000 == 0):
print('zipping the test data:', epoch)
print('Success!')
print('There are test set:', datatest.shape[0])
print('Doing one hot encoding:')
# One-Hot encoding 另外应该是反过来进行 encode 的,,稀疏部分用-1代替
for i, doc in enumerate(docs):
# 倒着数后面的maxlen个数字,但是输出顺序不变
for t, char in enumerate(doc[-maxlen:]):
X_test[i, (maxlen-1-t)] = char_indices[char]
print('Success!')
Y_test = np.array(labels)
print('Convert class vector to binary class matrix (for use with categorical_crossentropy)')
nb_classes = 5
print(nb_classes, 'classes in the dataset')
Y_test = np_utils.to_categorical(Y_test, nb_classes)
print('Success!')
print("All of the pre-processde work is done.")
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(input_dim = 41, output_dim = 50, input_length = maxlen, init = 'he_normal', W_regularizer=l2(0.01)) )
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter = 128, filter_length = 3, W_regularizer=l2(0.01), init = 'he_normal', border_mode='same', activation='relu', subsample_length=1))
model.add(Convolution1D(nb_filter = 128, filter_length = 3, W_regularizer=l2(0.01), init = 'he_normal', border_mode='same', activation='relu', subsample_length=1))
# we use max pooling:
model.add(MaxPooling1D(pool_length = model.output_shape[1]))
#model.add(MaxPooling1D(pool_length = 2))
#print(model.output_shape[1], "pooling shape")
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(100))
#model.add(Dropout(0.1))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(5))
model.add(Activation('softmax'))
checkpointers = ModelCheckpoint("parameters/weights.{epoch:02d}-{val_acc:.4f}.hdf5", monitor='val_acc', verbose=0, save_best_only=False, mode='auto')
#model.load_weights("parameters/weights.39-0.32.hdf5")
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size = 128, nb_epoch = 20, validation_data=(X_test, Y_test), callbacks = [checkpointers])
| Models/2008_models/char-models(CNN2 Full dataset).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TCGAのRNA-seqデータのグラフをプロットする
# ## データの取得
#
# TCGAの腎明細胞癌(Kidney Clear Cell Carcinoma(KIRC))の遺伝子発現データを扱うため、まずデータをダウンロードします。
#
# ### RNA-seqデータの取得
# RNA-seqデータは https://xenabrowser.net/datapages/ からダウンロードします。
#
# 1. 上のURLにアクセスし、`GDC TCGA Kidney Clear Cell Carcinoma (KIRC) (15 datasets)`をクリックします。
# 2. 'gene expression RNAseq'の 'HTSeq - FPKM-UQ' をクリックすると、ダウンロードが始まります。
# 3. `.gz`という圧縮ファイルですが、解凍せずにそのままにします。
#
# ### サンプルのメタデータの取得
# サンプルのメタデータは、Genomic Data Commons (GDC) Data Portal https://portal.gdc.cancer.gov/ からダウンロードします。
#
# 1. 上のURLにアクセスし、
#
# ## データ読み込み
# まずはpandasでRNA-seqデータを読み込みましょう。
import pandas as pd
# 注意: ここのファイル名は適宜変える
df = pd.read_csv('Downloads/TCGA-KIRC.htseq_fpkm-uq.tsv.gz', sep='\t')
# ここで、列名を確認します。
#
# `Ensembl_ID` は Ensemblの遺伝子IDです。
#
# その他の`TCGA-`で始まる列名はサンプルIDを表しています。
df.columns
# `head()`で表の値の概要をざっくり確認しましょう。
df.head()
# ## データ前処理
# Ensembl_IDをIndexにしましょう
df.set_index('Ensembl_ID', inplace=True)
# 「行が遺伝子、列がサンプル」の表データを「列がサンプル、行が遺伝子」の表データに変更したい。
# そのために、`transpose()`を使って表を転置(transpose)します。
df_t = df.transpose()
# うまくできているか確認します。
df_t.head()
df_t.rename(columns={'Ensembl_ID': 'Sample_ID'}, inplace=True)
# 列名に当たる遺伝子IDを加工します。
#
# 遺伝子IDの末尾には `.整数` がついています。これは遺伝子IDのバージョンに当たります。
# このバージョンに当たる部分はしばしばデータの統合や抽出のじゃまになるので、ここでは除きます。
#
# 以下では、`re`ライブラリで正規表現を用いた置換を行う関数である`sub()`を使用します。
import re
# `sub()`では以下のように、3番目の引数の文字列から、1番目の引数の正規表現でマッチした文字列を、2番めの引数で置換します。
x = 'ENSG00000242268.2'
print(x)
print(re.sub('\\.[^-]+$', '', x))
# 上のように置換した列名に入れ替えます。
df_t.columns = [re.sub('\\.[^-]+$', '',x) for x in df_t.columns]
# + active=""
# 列名が変わったか確認します。
# -
df_t.head()
# ## データの前処理2:サンプルのメタデータとの対応
# KIRCのデータには、患者1人に対応する複数のRNA-seqサンプル(レプリケート=繰り返し実験などのため)があります。
#
# このような重複を除くにはいくつか方法がありますが、ここでは最初に出てきたデータを用いることとします。
# まず、サンプルIDは実は「患者ID+別のID」となっています。これによって、同一患者由来のデータをIDで管理することができるのです。
#
# しかし、これを患者IDと対応付けるために、「別のID」を除去しようと思います。
x = "TCGA-B2-4101-B32"
print(x)
print(re.sub('-[^-]+$', '',x))
# Indexから「別のID」を除去します。
df_t.index = [re.sub('-[^-]+$', '',x) for x in df_t.index]
# 次に、サンプルIDが重複したものについて、一番最初にそのサンプルIDが出たサンプル(行)のみを残すようにしました、
df_t_uniq = df_t.reset_index().drop_duplicates(subset='index',keep='first').set_index('index')
# 530サンプルに減りました。
df_t_uniq.shape
df_t_uniq.head()
# ## データの読み込み(サンプルメタデータ)
df_clinical = pd.read_csv("clinical.cases_selection.2019-09-03/clinical.tsv", sep='\t')
df_clinical.shape
df_clinical.head()
# 癌のステージを集計します。
df_clinical['tumor_stage'].value_counts()
# submitter_id (患者ID)と tumor_stageだけのデータフレームを作ります。
df_clinical_stage = df_clinical[['submitter_id', 'tumor_stage']].set_index('submitter_id')
# ## データをプロットする
# ある遺伝子に注目して、その遺伝子の発現量とがんのステージの関係を調べます。
#
# そのためにまず、患者IDをキーとして2つのデータフレームを併合します。
df_t_uniq_merge = pd.merge(df_clinical_stage,
df_t_uniq,
left_index=True, right_index=True)
df_t_uniq_merge.shape
# tumor_stageが 'not reported' である行を除きます。
df_t_uniq_merge = df_t_uniq_merge[df_t_uniq_merge.tumor_stage != 'not reported']
df_t_uniq_merge.shape
df_merge.head()
# matplotlibとseabornを用いて、バイオリンプロットを描きます。
#
# バイオリンプロットについてはこちらをご覧ください:
# https://ja.wikipedia.org/wiki/%E3%83%90%E3%82%A4%E3%82%AA%E3%83%AA%E3%83%B3%E5%9B%B3
import matplotlib.pyplot as plt
import seaborn as sns
# +
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
sns.violinplot(x='tumor_stage', y='ENSG00000141510', data=df_t_uniq_merge, hue='tumor_stage', dodge=True,
jitter=True, color='black', palette='Set3', ax=ax)
ax.set_xlabel('stage')
ax.set_ylabel('log2(FPKM)')
ax.set_ylim(0, 25)
ax.legend()
plt.show()
# -
# ## 違いを見つける
# pandasのデータフレーム(の発現量の部分)をnumpy array に変換します。
import numpy as np
X = df_t_uniq_merge.drop('tumor_stage', axis=1).to_numpy()
# ステージを目標変数とします。
#
# そのために、カテゴリカル変数を任意の数値に変換します。
Y = df_t_uniq_merge['tumor_stage'].map({'stage i': 0, 'stage ii': 0, 'stage iii': 1, 'stage iv':1})
# ### LASSOの実行
#
# LASSO は教師あり変数選択の手法の一つです。
#
# まず、LASSOのハイパーパラメータの値を決定するために交差検証法(クロスバリデーション)を行います。
# +
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LassoCV
scaler = StandardScaler()
clf = LassoCV(alphas=10 ** np.arange(-6, 1, 0.1), cv=5)
scaler.fit(X)
clf.fit(scaler.transform(X), Y)
print(clf.alpha_)
print(clf.coef_)
print(clf.intercept_)
# -
# 次にLASSOを実行します
# +
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso
scaler = StandardScaler()
clf = Lasso(alpha=clf.alpha_)
scaler.fit(X)
clf.fit(scaler.transform(X), Y)
print(clf.coef_)
print(clf.intercept_)
# -
# ### 係数がトップの遺伝子を調べる
max_index = np.argmax(clf.coef_)
print(max_index)
df_t_uniq_merge.columns[max_index]
# その遺伝子の発現量を可視化します。
# +
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
sns.violinplot(x='tumor_stage', y=df_t_uniq_merge.columns[max_index], data=df_t_uniq_merge,
hue='tumor_stage', dodge=True,
jitter=True, color='black', palette='Set3', ax=ax)
ax.set_xlabel('stage')
ax.set_ylabel('log2(FPKM+1)')
ax.set_ylim(0, 25)
ax.legend()
plt.show()
# -
# ### 係数トップ10の遺伝子発現量を可視化する
df_t_uniq_merge.columns[np.argsort(-clf.coef_)[:10]]
for i in np.argsort(-clf.coef_)[:10]:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
sns.violinplot(x='tumor_stage', y=df_t_uniq_merge.columns[i], data=df_t_uniq_merge,
hue='tumor_stage', dodge=True,
jitter=True, color='black', palette='Set3', ax=ax)
ax.set_xlabel('stage')
ax.set_ylabel('log2(FPKM+1)')
ax.set_ylim(0, 25)
ax.legend()
plt.show()
# 遺伝子ごと、stageごとの発現量の統計量を表示します。
df_t_uniq_merge[['tumor_stage', 'ENSG00000083896']].groupby('tumor_stage').describe().unstack(1)
# +
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LassoCV
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
scaler = StandardScaler()
clf = LassoCV(alphas=10 ** np.arange(-6, 1, 0.1), cv=5)
scaler.fit(x_train)
clf.fit(scaler.transform(x_train), y_train)
y_pred = clf.predict(scaler.transform(x_test))
mse = mean_squared_error(y_test, y_pred)
print(mse)
# +
from matplotlib import pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(y_pred, y_test)
ax.set_xlabel('predicted')
ax.set_ylabel('true')
ax.set_aspect('equal')
fig.show()
# -
| notebooks/5_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import pymongo
import matplotlib.pyplot as plt
from bson.objectid import ObjectId
import temas
from jupyterthemes import jtplot
jtplot.style()
client = pymongo.MongoClient()
db = client["mediaNet"]
collection = db["posts"]
doc = collection.find({})
doc = ({"id": x["_id"], "fuente": x["from"]["name"], "enlace": x["link"], "titulo": x["post_message"] if "post_message" in x else ""} for x in doc)
df = pd.DataFrame(doc)
df.head()
df.to_csv("fuente.csv")
| Resumen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''sktimeenv'': conda)'
# language: python
# name: python3
# ---
# # Model Building
#
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.compose import make_column_selector, make_column_transformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.metrics import accuracy_score, balanced_accuracy_score, ConfusionMatrixDisplay
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.regularizers import l2
from tensorflow.keras import utils
from tensorflow.keras.losses import CategoricalCrossentropy, SparseCategoricalCrossentropy
# -
# #### Data Reading
#read in data
df = pd.read_csv("../data/classification_data.csv")
df_demo = pd.read_csv("../data/demographics-data/classification_data_demographics.csv")
df_demo = df_demo.drop(columns='pct_Asian')
#move target to end
df_demo = df_demo[[col for col in df_demo if col != 'labels']+ ['labels']]
df_demo.head()
df.head()
# ## Set X and y
# +
X = df.iloc[:,:-1]
y = df.iloc[:,-1]
X_demo = df_demo.iloc[:,:-1]
y_demo = df_demo.iloc[:,-1]
# +
#Encode Features
ct = make_column_transformer(
(OneHotEncoder(sparse=False, handle_unknown='ignore'), make_column_selector(dtype_include=object)),
remainder='passthrough',
verbose_feature_names_out=False
)
X_encoded = ct.fit_transform(X)
X_encoded = pd.DataFrame(X_encoded, columns=ct.get_feature_names_out())
X_encoded_demo = ct.fit_transform(X_demo)
X_encoded_demo = pd.DataFrame(X_encoded_demo, columns=ct.get_feature_names_out())
#Scale X
X_ss = StandardScaler().fit_transform(X_encoded)
X_ss_demo = StandardScaler().fit_transform(X_encoded_demo)
# -
# ### Train test split
X_train, X_test, y_train, y_test = train_test_split(X_ss, y, stratify=y, random_state=13)
X_train_demo, X_test_demo, y_train_demo, y_test_demo = train_test_split(X_ss_demo, y_demo, stratify=y_demo, random_state=13)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
print(X_train_demo.shape, X_test_demo.shape, y_train_demo.shape, y_test_demo.shape)
# ## Model Building
# ### Baseline Model
#
# We have a very skewed dataset so our null baseline accuracy is 72.84%, however our null balanced accuracy score is 33.33%
y_train.value_counts(normalize=True)
null_preds = np.full_like(y_test, 1)
balanced_accuracy_score(y_test, null_preds)
#Create Arrays of Model Scores
model_scores = [['Null Model', '.728433', '.33333','.728433', '.33333']]
demo_model_scores = [['Null Model', '.728433', '.33333','.728433', '.33333']]
#column names
cols = ["model", 'train_acc', 'train_balanced_acc', 'test_acc', 'test_balanced_acc']
# ### Logistic Regression
# #### Without Demographic Data
# +
params = {
'C' : [.005,.01,.05],
'class_weight' : [None]
}
log_reg = GridSearchCV(LogisticRegression(max_iter=10_000), params, n_jobs=-1, scoring='balanced_accuracy')
log_reg.fit(X_train, y_train)
print(log_reg.best_params_)
# +
#Add scores to array
model_scores.append([
'Logistic Regression',
accuracy_score(y_train, log_reg.predict(X_train)),
log_reg.best_score_,
accuracy_score(y_test, log_reg.predict(X_test)),
log_reg.score(X_test, y_test),
])
pd.DataFrame(model_scores, columns=cols)
# -
# #### With Demographic Data
# +
params = {
'C' : [200, 250, 300],
'class_weight' : [None]
}
log_reg_demo = GridSearchCV(LogisticRegression(max_iter=10_000), params, n_jobs=-1, scoring='balanced_accuracy')
log_reg_demo.fit(X_train_demo, y_train_demo)
print(log_reg_demo.best_params_)
# +
#Add scores to array
demo_model_scores.append([
'Logistic Regression',
accuracy_score(y_train, log_reg_demo.predict(X_train_demo)),
log_reg_demo.best_score_,
accuracy_score(y_test, log_reg_demo.predict(X_test_demo)),
log_reg_demo.score(X_test_demo, y_test_demo),
])
pd.DataFrame(demo_model_scores, columns=cols)
# -
# #### Compare Confusion Matrix
# +
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].set_title("Logistic Regression w/o Demographics")
ConfusionMatrixDisplay.from_estimator(log_reg, X_test, y_test, cmap='Blues', ax=ax[0])
ax[1].set_title("Logistic Regression with Demographics")
ConfusionMatrixDisplay.from_estimator(log_reg_demo, X_test_demo, y_test_demo, cmap='Purples', ax=ax[1])
plt.tight_layout()
# -
# ### KNeighbors
# KNN Models take a comparatively long time to predict and score
# #### Without Demographic Data
# +
params = {
'n_neighbors' : [7],
'weights' : ['uniform', 'distance'],
}
knn = GridSearchCV(KNeighborsClassifier(), params, n_jobs=-1, scoring='balanced_accuracy')
knn.fit(X_train, y_train)
print(knn.best_params_)
# +
#Add scores to array
model_scores.append([
'KNeighborsClassifier',
accuracy_score(y_train, knn.predict(X_train)),
knn.best_score_,
accuracy_score(y_test, knn.predict(X_test)),
knn.score(X_test, y_test)
])
pd.DataFrame(model_scores, columns=cols)
# -
# #### With Demographic Data
# +
params = {
'n_neighbors' : [3,5,7, 9],
'weights' : ['uniform', 'distance'],
}
knn_demo = GridSearchCV(KNeighborsClassifier(), params, n_jobs=-1, scoring='balanced_accuracy')
knn_demo.fit(X_train_demo, y_train_demo)
print(knn_demo.best_params_)
# +
#Add scores to array
demo_model_scores.append([
'KNeighborsClassifier',
accuracy_score(y_train_demo, knn_demo.predict(X_train_demo)),
knn_demo.best_score_,
accuracy_score(y_test_demo, knn_demo.predict(X_test_demo)),
knn_demo.score(X_test_demo, y_test_demo),
])
pd.DataFrame(demo_model_scores, columns=cols)
# -
# #### Compare Confusion Matrix
# +
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].set_title("KNN w/o Demographics")
ConfusionMatrixDisplay.from_estimator(knn, X_test, y_test, cmap='Blues', ax=ax[0])
ax[1].set_title("KNN with Demographics")
ConfusionMatrixDisplay.from_estimator(knn_demo, X_test_demo, y_test_demo, cmap='Purples', ax=ax[1])
plt.tight_layout()
# -
# ### Decision Tree
# #### Without Demographics Data
# +
params = {
'max_depth' : [5, 10, 20, 50, None]
}
dt = GridSearchCV(DecisionTreeClassifier(), params, n_jobs=-1, scoring='balanced_accuracy')
dt.fit(X_train, y_train)
print(dt.best_params_)
# -
#Add scores to array
model_scores.append([
'Decision Tree',
accuracy_score(y_train, dt.predict(X_train)),
dt.best_score_,
accuracy_score(y_test, dt.predict(X_test)),
dt.score(X_test, y_test)
])
pd.DataFrame(model_scores, columns=cols)
# #### With Demographics Data
# +
params = {
'max_depth' : [5, 10, 20, 50, None]
}
dt_demo = GridSearchCV(DecisionTreeClassifier(), params, n_jobs=-1, scoring='balanced_accuracy')
dt_demo.fit(X_train_demo, y_train_demo)
print(dt_demo.best_params_)
# +
#Add scores to array
demo_model_scores.append([
'Decision Tree',
accuracy_score(y_train_demo, dt_demo.predict(X_train_demo)),
dt_demo.best_score_,
accuracy_score(y_test_demo, dt_demo.predict(X_test_demo)),
dt_demo.score(X_test_demo, y_test_demo)
])
pd.DataFrame(demo_model_scores, columns=cols)
# -
# #### Compare Confusion Matrix
# +
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].set_title("Decision Tree w/o Demographics")
ConfusionMatrixDisplay.from_estimator(dt, X_test, y_test, cmap='Blues', ax=ax[0])
ax[1].set_title("Decision Tree with Demographics")
ConfusionMatrixDisplay.from_estimator(dt_demo, X_test_demo, y_test_demo, cmap='Purples', ax=ax[1])
plt.tight_layout()
# -
# ### RandomForest
# #### Without Demographics Data
# +
params = {
'n_estimators' :[50, 100, 150],
'max_depth' : [10,20,30],
'min_samples_split' : [2,3,5],
'max_features' : ['sqrt']
}
rf = GridSearchCV(RandomForestClassifier(), params, n_jobs=-1, scoring='balanced_accuracy')
rf.fit(X_train, y_train)
print(rf.best_params_)
# -
#Add scores to array
model_scores.append([
'RandomForest',
accuracy_score(y_train, rf.predict(X_train)),
rf.best_score_,
accuracy_score(y_test, rf.predict(X_test)),
rf.score(X_test, y_test)
])
pd.DataFrame(model_scores, columns=cols)
# #### With Demographics Data
# +
params = {
'n_estimators' :[50, 100, 150, 200],
'max_depth' : [5,10,15,20],
'min_samples_split' : [2,3,4,5]
}
rf_demo = GridSearchCV(RandomForestClassifier(), params, n_jobs=-1, scoring='balanced_accuracy')
rf_demo.fit(X_train_demo, y_train_demo)
print(rf.best_params_)
# -
#Add score to array
demo_model_scores.append([
'RandomForest',
accuracy_score(y_train_demo, rf_demo.predict(X_train_demo)),
rf_demo.best_score_,
accuracy_score(y_test_demo, rf_demo.predict(X_test_demo)),
rf_demo.score(X_test_demo, y_test_demo)
])
pd.DataFrame(demo_model_scores, columns=cols)
# #### Compare Confusion Matrix
# +
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].set_title("RandomForest w/o Demographics")
ConfusionMatrixDisplay.from_estimator(rf, X_test, y_test, cmap='Blues', ax=ax[0])
ax[1].set_title("ExtraTrees with Demographics")
ConfusionMatrixDisplay.from_estimator(rf_demo, X_test_demo, y_test_demo, cmap='Purples', ax=ax[1])
plt.tight_layout()
# -
# ### Model Score Analysis
df_demo_scores = pd.DataFrame(demo_model_scores, columns=cols)
df_scores = pd.DataFrame(model_scores, columns=cols)
# #### Without Demographics Data
df_scores
df_scores = df_scores.set_index("model")
df_scores = df_scores.astype('float')
df_scores.sort_values(by='test_balanced_acc')
# The models that had the best balanced accuracy severely sacrificed thier test accuracy score.
# #### With Demographics Data
df_demo_scores
#Manually add the scores from the Neural Net
df_demo_scores.loc[7] = ['Neural Net', .9957, .9895, .9797, .9591]
df_demo_scores.iloc[:,1:] = df_demo_scores.iloc[:,1:].astype('float')
df_demo_scores.sort_values('test_balanced_acc', inplace=True)
df_demo_scores.set_index('model', inplace=True)
df_demo_scores
plt.figure(figsize=(20,8))
n = np.arange(len(df_demo_scores.index))
w = .2
plt.bar(n,df_demo_scores['train_acc'], width=w, label='Training Accuracy',color=sns.color_palette("Paired")[1])
plt.bar(n+w+w,df_demo_scores['train_balanced_acc'], width=w, label='Training Balanced Accuracy', color=sns.color_palette("Paired")[0])
plt.bar(n+w,df_demo_scores['test_acc'], width=w, label='Testing Accuracy', color='darkorange')
plt.bar(n+w+w+w,df_demo_scores['test_balanced_acc'], width=w, label='Testing Balanced Accuracy', color=sns.color_palette("Paired")[6])
plt.xticks(n+.3, df_demo_scores.index, fontsize=15)
plt.ylabel("Accuracy", fontsize=17)
plt.legend()
plt.savefig("../plots/model_scores.png")
| code/04-models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
def getChoice(V,e): #εグリーディで行動選択
idx=np.argmax(V)#最大値のインデックスを返す
if np.random.rand()<e or V[idx]==-1: #Vの最大値が-1の時は全部のVが-1
k=np.random.randint(0,4)#ランダムに選ぶ
return act[k]
else:
return act[idx] #最大の価値を持つ選択肢
act=["up","down","left","right"]
Qtable=np.zeros((4,4,4))#価値を入れるいれもの
alpha=0.1
gamma=0.99
e=0.1
ite=50 #繰り返し回数
reward=np.zeros(ite)#総得点を記録
episode=np.zeros(ite)#何回でゴールまで行ったか記録
for i in range(ite):
current=[0,0]#スタート位置
goal=0
r=0
while goal==0:#ゴールしてなければ
#print(current)
a=getChoice(Qtable[current[0],current[1],:],e)
#print("a: ",a)
pre=current.copy() #動く前の位置をおぼえておく。かならずcopy()
if a==act[0]:
if current[0]!=0:#端でなければ
current[0]-=1#ひとつ動く
t=0
elif a==act[1]:
if current[0]!=3:
current[0]+=1
t=1
elif a==act[2]:
if current[1]!=0:
current[1]-=1
t=2
else:
if current[1]!=3:
current[1]+=1
t=3
r=-1 #一回につき-1
if current==[3,3]:
goal=1 #ゴールした
r+=101
Qtable[pre[0],pre[1],t]=Qtable[pre[0],pre[1],t]+alpha*(r+gamma*np.max(Qtable[current[0],current[1],:])-Qtable[pre[0],pre[1],t])#ここで学習
reward[i]+=r
episode[i]+=1
print("goalしました。",i)
# -
Qvalue=np.max(Qtable,axis=2) # 状態価値の最終状態
print(Qvalue)
import matplotlib.pyplot as plt
Qvalue[3,3]=100 #ゴールの価値は更新されてないので100を入れておく。必須ではない。
plt.pcolor(Qvalue)
plt.colorbar()
plt.plot(episode)
print(episode)
plt.plot(reward)
| src/assignment/201111/small_grid_world_Q.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inversion of a 3D cube
# In this notebook we show how to use a configuration file to run Hazel in a 3D cube, both in serial and parallel modes.
# ## Serial mode
# Let's first a set of observations obtained from the GREGOR telescope as example. The observations consisted of a scan of an active region in which filaments are seen when observed in the core of the He I 10830 A line.
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
import hazel
import h5py
import scipy.io as io
print(hazel.__version__)
label = ['I', 'Q', 'U', 'V']
# First read the observations and do some plots. The wavelength axis in the save file is given in displacement with respect to some reference wavelength, in this case 10830.0911 A.
# +
tmp = io.readsav('/scratch/Dropbox/test/test_hazel2/orozco/gregor_spot.sav')
print(tmp.keys())
f, ax = pl.subplots(nrows=1, ncols=2, figsize=(10,6))
ax[0].imshow(tmp['heperf'][:,0,:,0])
ax[1].imshow(tmp['heperf'][:,0,:,181])
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
stokes = np.zeros((4,210))
stokes[0,:] = tmp['heperf'][160,0,130,0:-40] / np.max(tmp['heperf'][160,0,130,:])
stokes[1,:] = tmp['heperf'][160,1,130,0:-40] / np.max(tmp['heperf'][160,0,130,:])
stokes[2,:] = tmp['heperf'][160,2,130,0:-40] / np.max(tmp['heperf'][160,0,130,:])
stokes[3,:] = tmp['heperf'][160,3,130,0:-40] / np.max(tmp['heperf'][160,0,130,:])
ax[0,0].plot(tmp['lambda'][0:-40] + 10830.0911, stokes[0,:])
ax[0,1].plot(tmp['lambda'][0:-40] + 10830.0911, stokes[1,:])
ax[1,0].plot(tmp['lambda'][0:-40] + 10830.0911, stokes[2,:])
ax[1,1].plot(tmp['lambda'][0:-40] + 10830.0911, stokes[3,:])
wvl = tmp['lambda'][0:-40]
stokes = stokes[:,:]
n_lambda = len(wvl)
print(n_lambda)
# -
# Now we want to prepare all files for a 2D inversion. First, like in 1D inversions, save the wavelength axis:
np.savetxt('10830_spot.wavelength', wvl+10830.0911, header='lambda')
# Then, let's assume that we weight all wavelengths equally:
f = open('10830_spot.weights', 'w')
f.write('# WeightI WeightQ WeightU WeightV\n')
for i in range(n_lambda):
f.write('1.0 1.0 1.0 1.0\n')
f.close()
stokes.shape
# As an example, let's work only with a few pixels, but what I show in the following can be scaled to any size of the input observations. So, let's fix the number of pixels to be 10 (a small piece of 5x2 pixels in the map):
nx = 5
ny = 2
n_pixel = nx * ny
stokes_3d = np.zeros((n_pixel,n_lambda,4), dtype=np.float64)
sigma_3d = np.zeros((n_pixel,n_lambda,4), dtype=np.float64)
los_3d = np.zeros((n_pixel,3), dtype=np.float64)
boundary_3d = np.zeros((n_pixel,n_lambda,4), dtype=np.float64)
stokes = tmp['heperf'][160:160+nx,:,130:130+ny,0:-40] / np.max(tmp['heperf'][160,0,130,:])
stokes = np.transpose(stokes, axes=(0,2,3,1)).reshape((n_pixel,210,4))
print(stokes.shape)
# Now we fill all arrays with information from the osbervations, including, like in the 1D model, a very rough estimation of the noise standard deviation:
# +
boundary = np.array([1.0,0.0,0.0,0.0])
for i in range(n_pixel):
noise = np.std(stokes[i,0:20,1])
stokes_3d[i,:,:] = stokes[i,:,:]
sigma_3d[i,:,:] = noise*np.ones((210,4))
los_3d[i,:] = np.array([0.0,0.0,90.0])
boundary_3d[i,:,:] = np.repeat(np.atleast_2d(boundary), n_lambda, axis=0)
f = h5py.File('10830_spot_stokes.h5', 'w')
db_stokes = f.create_dataset('stokes', stokes_3d.shape, dtype=np.float64)
db_sigma = f.create_dataset('sigma', sigma_3d.shape, dtype=np.float64)
db_los = f.create_dataset('LOS', los_3d.shape, dtype=np.float64)
db_boundary = f.create_dataset('boundary', boundary_3d.shape, dtype=np.float64)
db_stokes[:] = stokes_3d
db_sigma[:] = sigma_3d
db_los[:] = los_3d
db_boundary[:] = boundary_3d
f.close()
# -
# So we are now ready for the inversion. Let's print first the configuration file and then do a simple inversion for a 1D input file. You can see that we are including two atmospheres, a photosphere to explain the Si I line and a chromosphere to explain the He I multiplet. We also give some rough intervals for the parameters.
# %cat conf_spot_3d.ini
# Let's invert these profiles in a non-MPI mode, which can be done directly in Python:
iterator = hazel.Iterator(use_mpi=False)
mod = hazel.Model('conf_spot_3d.ini', working_mode='inversion')
iterator.use_model(model=mod)
iterator.run_all_pixels()
# We see that we found a solution with a relatively good $\chi^2$ and now let's analyze the results. For your specific case, you probably need some trial and error on the Stokes weights and range of parameters to find a reliable solution.
# +
f = h5py.File('output.h5', 'r')
print('(npix,nrand,ncycle,nstokes,nlambda) -> {0}'.format(f['spec1']['stokes'].shape))
for k in range(2):
fig, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
ax = ax.flatten()
for i in range(4):
ax[i].plot(f['spec1']['wavelength'][:] - 10830, stokes[k,:,i])
for j in range(2):
ax[i].plot(f['spec1']['wavelength'][:] - 10830, f['spec1']['stokes'][k,0,j,i,:])
for i in range(4):
ax[i].set_xlabel('Wavelength - 10830[$\AA$]')
ax[i].set_ylabel('{0}/Ic'.format(label[i]))
ax[i].set_xlim([-7,3])
pl.tight_layout()
f.close()
# -
# Then do some 2D plots. However, they are not very representative for such a small FOV.
# +
f = h5py.File('output.h5', 'r')
print(list(f['ch1'].keys()))
tau = np.squeeze(f['ch1']['tau'][:,:,-1,:])
v = np.squeeze(f['ch1']['v'][:,:,-1,:])
Bz = np.squeeze(f['ch1']['Bz'][:,:,-1,:])
fig, ax = pl.subplots(figsize=(10,6), nrows=1, ncols=3)
im = ax[0].imshow(tau.reshape((nx,ny)), cmap=pl.cm.viridis)
pl.colorbar(im, ax=ax[0])
ax[0].set_title(r'$\tau$')
im = ax[1].imshow(v.reshape((nx,ny)), cmap=pl.cm.viridis)
pl.colorbar(im, ax=ax[1])
ax[1].set_title('v')
im = ax[2].imshow(Bz.reshape((nx,ny)), cmap=pl.cm.viridis)
pl.colorbar(im, ax=ax[2])
ax[2].set_title(r'B$_z$')
print(f['ch1']['tau'].shape)
f.close()
# -
# ## Parallel mode
# For inverting the profiles in a multi-core machine, you need to create a Python file (e.g., script.py) with the following content:
#
# iterator = hazel.Iterator(use_mpi=True)
# mod = hazel.Model('conf_spot_3d.ini', rank=iterator.get_rank())
# iterator.use_model(model=mod)
# iterator.run_all_pixels()
#
# and run it with
#
# mpiexec -n n_cpu python script.py
| docs/notebooks/parallel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:carnd-term1]
# language: python
# name: conda-env-carnd-term1-py
# ---
# Import
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
from skimage.feature import hog
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.svm import LinearSVC
import time
from scipy.ndimage.measurements import label
from sklearn.svm import SVC
def plot_2(img1,img2):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 9))
f.tight_layout()
ax1.imshow(img1)
ax2.imshow(img2)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# ## Load Images
# +
cars = []
notcars = []
images = glob.glob('vehicles/*/*.png')
for image in images:
cars.append(image)
images = glob.glob('non-vehicles/*/*.png')
for image in images:
notcars.append(image)
#images2 = glob.glob('vehicles/vehicles/KITTI_*/*.png')
#cars.append(images)
print("cars ",len(cars))
print("non_cars ", len(notcars))
image = np.uint8(cv2.imread(cars[0]))
print("MAx ",np.max(image))
print("MIN",np.min(image))
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#plot_2(image, image2)
# -
# ## Feature Extraction
# +
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
# NEED TO CHANGE bins_range if reading .png files with mpimg!
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
block_norm= 'L2-Hys',
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
block_norm= 'L2-Hys',
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# -
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
file_features = []
# Read in each one by one
image = cv2.imread(file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
#1) Define an empty list to receive features
img_features = []
#2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(img)
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
#9) Return concatenated array of features
return np.concatenate(img_features)
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#print("feature.shape = ",features.shape )
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
# ## Split into Training and validation
# ## Linear SVM
print(len(X_train))
print(image.shape)
# ## Sliding Window
# Define a function that takes an image,
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# +
### TODO: Tweak these parameters and see how the results change.
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 11 # HOG orientations
pix_per_cell = 16 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
spatial_size = (16, 16) # Spatial binning dimensions
hist_bins = 8 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [370, 640] # Min and max in y to search in slide_window()
x_start_stop = [640, 1279] # Min and max in y to search in slide_window()
MIN_WX = 40
MIN_WY = 30
# +
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#car_features = np.hstack((car_features_rgb,car_features_yuv,car_features_hls))
#notcar_features = np.hstack((notcar_features_rgb, notcar_features_yuv, notcar_features_hls))
#print("car_features SHape = ",car_features.shape)
#print("notcar_features SHape = ",notcar_features.shape)
# Create an array stack of feature vectors
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=rand_state)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X_train)
# Apply the scaler to X
X_train = X_scaler.transform(X_train)
X_test = X_scaler.transform(X_test)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# +
# Use a linear SVC
#svc = LinearSVC()
svc = SVC(kernel = 'rbf')
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# +
# Check the prediction time for a single sample
t=time.time()
image = mpimg.imread('./test_images/test4.jpg')
draw_image = np.copy(image)
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the
# image you are searching is a .jpg (scaled 0 to 255)
#image = image.astype(np.float32)/255
windows = slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,xy_window=(96, 96), xy_overlap=(0.6, 0.5))
print("Windows :",len(windows))
windows.extend(slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,xy_window=(64, 64), xy_overlap=(0.5, 0.5)))
windows.extend(slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,xy_window=(128, 128), xy_overlap=(0.8, 0.5)))
print("Windows :",len(windows))
hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
plt.imshow(window_img)
# +
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap# Iterate through list of bboxes
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
x_wid = np.max(nonzerox) - np.min(nonzerox)
y_wid = np.max(nonzeroy) - np.min(nonzeroy)
#if (x_wid> MIN_WX and y_wid > MIN_WY and x_wid+10 >= y_wid):
if (x_wid> MIN_WX and y_wid > MIN_WY):
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
# -
# ## PIPELINE
# +
def pipeline(image):
draw_image = np.copy(image)
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the
# image you are searching is a .jpg (scaled 0 to 255)
#image = image.astype(np.float32)/255
windows = slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,xy_window=(96, 96), xy_overlap=(0.6, 0.5))
windows.extend(slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,xy_window=(64, 64), xy_overlap=(0.5, 0.5)))
#windows.extend(slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,xy_window=(48, 48), xy_overlap=(0.7, 0.7)))
windows.extend(slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,xy_window=(128, 128), xy_overlap=(0.8, 0.5)))
#print("Number of Windows :",len(windows))
hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat,hot_windows)
THRESHOLD = 1
# Apply threshold to help remove false positives
heat = apply_threshold(heat,THRESHOLD)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
fig = plt.figure()
plt.subplot(121)
plt.imshow(draw_img)
plt.title('Car Positions')
plt.subplot(122)
plt.imshow(heatmap, cmap='hot')
plt.title('Heat Map')
fig.tight_layout()
plt.savefig("./output_images/out_1.jpg")
return draw_img
# +
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
video_output = './project_video_output_final_submitted_1.mp4'
#video_output = './project_video_output_test_th_6.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
#clip1 = VideoFileClip("test_video.mp4")
easy_clip = clip1.fl_image(pipeline) #NOTE: this function expects color images!!
# %time easy_clip.write_videofile(video_output, audio=False)
# +
images = glob.glob('./test_images/test1.jpg')
for image in images:
temp_image = mpimg.imread(image)
temp_draw_image = np.copy(temp_image)
result_image = pipeline(temp_draw_image)
# -
len(windows)
test_img = mpimg.imread('./test_images/test6.jpg')
out_img = pipeline(test_img)
plt.imshow(out_img)
plt.imsave('./output_images/out_test6.jpg', out_img)
# +
test_img = mpimg.imread('./test_images/test6.jpg')
for window in windows:
cv2.rectangle(test_img, window[0], window[1], (0,0,255), 6)
plt.imshow(test_img)
plt.imsave('./output_images/sliding_windows.jpg', test_img)
# -
| P5_Vehicle_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import glob
import random as rn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import RMSprop, Adam
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report, confusion_matrix
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
import operator
import tensorflow as tf
import random
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from keras.models import load_model
import numpy as np
import pandas as pd
import os
import glob
import cv2
import random
import matplotlib.pyplot as plt
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import regularizers
from keras.callbacks import CSVLogger
#from livelossplot import PlotLossesKeras
import os
import numpy as np
#from imgaug import augmenters as iaa
#import cv2
from keras.layers.normalization import BatchNormalization
#import seaborn as sns
import pandas as pd
from keras import initializers
from keras import optimizers
import keras.backend as K
import tensorflow as tf
from keras.models import Model
# +
path = '../input/d/mashfiqrizvee/sampledataset2/'
# define paths
train_air_trapping_dir = path + 'Train/Air_trapping/'
train_aortic_elongation_dir = path + 'Train/Aortic_elongation/'
train_calcified_granuloma_dir = path + 'Train/Air_trapping/'
train_calcified_granuloma_dir = path + 'Train/Calcified_granuloma/'
train_callus_rib_fracture_dir = path + 'Train/Callus_rib_fracture/'
train_hiatal_hernia_dir = path + 'Train/Hiatal_hernia/'
train_laminar_atelectasis_fracture_dir = path + 'Train/Laminar_atelectasis/'
train_pleural_effusion_dir = path + 'Train/Pleural_effusion/'
test_air_trapping_dir = path + 'Test/Air_trapping/'
test_aortic_elongation_dir = path + 'Test/Aortic_elongation/'
test_calcified_granuloma_dir = path + 'Test/Calcified_granuloma/'
test_callus_rib_fracture_dir = path + 'Test/Callus_rib_fracture/'
test_hiatal_hernia_dir = path + 'Test/Hiatal_hernia/'
test_laminar_atelectasis_fracture_dir = path + 'Test/Laminar_atelectasis/'
test_pleural_effusion_dir = path + 'Test/Pleural_effusion/'
val_air_trapping_dir = path + 'val/Air_trapping/'
val_aortic_elongation_dir = path + 'val/Aortic_elongation/'
val_calcified_granuloma_dir = path + 'val/Calcified_granuloma/'
val_callus_rib_fracture_dir = path + 'val/Callus_rib_fracture/'
val_hiatal_hernia_dir = path + 'val/Hiatal_hernia/'
val_laminar_atelectasis_fracture_dir = path + 'val/Laminar_atelectasis/'
val_pleural_effusion_dir = path + 'val/Pleural_effusion/'
# find all files, our files has extension jpeg
#train_normal_cases = glob.glob(train_normal_dir + '*png')
train_air_trapping_cases = glob.glob(train_air_trapping_dir + '*png')
train_aortic_elongation_cases = glob.glob(train_aortic_elongation_dir + '*png')
train_calcified_granuloma_cases = glob.glob(train_calcified_granuloma_dir + '*png')
train_callus_rib_fracture_cases = glob.glob(train_callus_rib_fracture_dir + '*png')
train_hiatal_hernia_cases = glob.glob(train_hiatal_hernia_dir + '*png')
train_laminar_atelectasis_fracture_cases = glob.glob(train_laminar_atelectasis_fracture_dir + '*png')
train_pleural_effusion_cases = glob.glob(train_pleural_effusion_dir + '*png')
test_air_trapping_cases = glob.glob(test_air_trapping_dir + '*png')
test_aortic_elongation_cases = glob.glob(test_aortic_elongation_dir + '*png')
test_calcified_granuloma_cases = glob.glob(test_calcified_granuloma_dir + '*png')
test_callus_rib_fracture_cases = glob.glob(test_callus_rib_fracture_dir + '*png')
test_hiatal_hernia_cases = glob.glob(test_hiatal_hernia_dir + '*png')
test_laminar_atelectasis_fracture_cases = glob.glob(test_laminar_atelectasis_fracture_dir + '*png')
test_pleural_effusion_cases = glob.glob(test_pleural_effusion_dir + '*png')
val_air_trapping_cases = glob.glob(val_air_trapping_dir + '*png')
val_aortic_elongation_cases = glob.glob(val_aortic_elongation_dir + '*png')
val_calcified_granuloma_cases = glob.glob(val_calcified_granuloma_dir + '*png')
val_callus_rib_fracture_cases = glob.glob(val_callus_rib_fracture_dir + '*png')
val_hiatal_hernia_cases = glob.glob(val_hiatal_hernia_dir + '*png')
val_laminar_atelectasis_fracture_cases = glob.glob(val_laminar_atelectasis_fracture_dir + '*png')
val_pleural_effusion_cases = glob.glob(val_pleural_effusion_dir + '*png')
# make path using / instead of \\ ... this may be redudant step
'''
train_normal_cases = [x.replace('\\', '/') for x in train_normal_cases]
train_pneu_cases = [x.replace('\\', '/') for x in train_pneu_cases]
test_normal_cases = [x.replace('\\', '/') for x in test_normal_cases]
test_pneu_cases = [x.replace('\\', '/') for x in test_pneu_cases]
val_normal_cases = [x.replace('\\', '/') for x in val_normal_cases]
val_pneu_cases = [x.replace('\\', '/') for x in val_pneu_cases]
'''
# create lists for train, test & validation cases, create labels as well
train_list = []
test_list = []
val_list = []
for x in train_air_trapping_cases:
train_list.append([x, 0])
for x in train_aortic_elongation_cases:
train_list.append([x, 1])
for x in train_calcified_granuloma_cases:
train_list.append([x, 2])
for x in train_callus_rib_fracture_cases:
train_list.append([x, 3])
for x in train_hiatal_hernia_cases:
train_list.append([x,4])
for x in train_laminar_atelectasis_fracture_cases:
train_list.append([x,5])
for x in train_pleural_effusion_cases:
train_list.append([x,6])
for x in test_air_trapping_cases:
test_list.append([x, 0])
for x in test_aortic_elongation_cases:
test_list.append([x, 1])
for x in test_calcified_granuloma_cases:
test_list.append([x, 2])
for x in test_callus_rib_fracture_cases:
test_list.append([x, 3])
for x in test_hiatal_hernia_cases:
test_list.append([x,4])
for x in test_laminar_atelectasis_fracture_cases:
test_list.append([x,5])
for x in test_pleural_effusion_cases:
test_list.append([x,6])
for x in val_air_trapping_cases:
val_list.append([x, 0])
for x in val_aortic_elongation_cases:
val_list.append([x, 1])
for x in val_calcified_granuloma_cases:
val_list.append([x, 2])
for x in val_callus_rib_fracture_cases:
val_list.append([x, 3])
for x in val_hiatal_hernia_cases:
val_list.append([x,4])
for x in val_laminar_atelectasis_fracture_cases:
val_list.append([x,5])
for x in val_pleural_effusion_cases:
val_list.append([x,6])
# shuffle/randomize data as they were loaded in order: normal cases, then pneumonia cases
rn.shuffle(train_list)
rn.shuffle(test_list)
rn.shuffle(val_list)
# create dataframes
train_df = pd.DataFrame(train_list, columns=['image', 'label'])
test_df = pd.DataFrame(test_list, columns=['image', 'label'])
val_df = pd.DataFrame(val_list, columns=['image', 'label'])
# +
plt.figure(figsize=(20,5))
plt.subplot(1,3,1)
sns.countplot(train_df['label'])
plt.title('Train data')
plt.subplot(1,3,2)
sns.countplot(test_df['label'])
plt.title('Test data')
plt.subplot(1,3,3)
sns.countplot(val_df['label'])
plt.title('Validation data')
plt.show()
# +
def process_data(img_path):
img = cv2.imread(img_path)
img = cv2.resize(img, (196, 196))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img/255.0
img = np.reshape(img, (196,196,1))
return img
def compose_dataset(df):
data = []
labels = []
for img_path, label in df.values:
data.append(process_data(img_path))
labels.append(label)
return np.array(data), np.array(labels)
# +
X_train, y_train = compose_dataset(train_df)
X_test, y_test = compose_dataset(test_df)
X_val, y_val = compose_dataset(val_df)
print('Train data shape: {}, Labels shape: {}'.format(X_train.shape, y_train.shape))
print('Test data shape: {}, Labels shape: {}'.format(X_test.shape, y_test.shape))
print('Validation data shape: {}, Labels shape: {}'.format(X_val.shape, y_val.shape))
# +
# define generator
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False
)
# fit generator on our train features
datagen.fit(X_train)
# -
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)
# +
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
# -
'''
def vgg():
base_model = ResNet50(weights=None,include_top=False,pooling='avg',input_shape=(196,196,1))
predictions=Dense(7,activation='softmax',trainable=True)(base_model.output)
for layer in base_model.layers:
layer.trainable=True
model=Model(inputs=[base_model.input], outputs=[predictions])
optim = tf.keras.optimizers.Adam(lr=1e-5,decay=1e-5)
#loss_func = 'mae'
model.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy'])
return model
model=None
model = vgg()
model.summary()
'''
# +
from keras.optimizers import Adam, RMSprop
# -
# def build_model(pretrained):
# model = Sequential([
# pretrained,
# layers.GlobalAveragePooling2D(),
# layers.Dense(7, activation='softmax')
# ])
#
# model.compile(
# loss='caegorical_crossentropy',
# optimizer=Adam(lr=1e-5),
# metrics=['accuracy']
# )
#
# return model
from tensorflow.keras import layers
# +
model=tf.keras.applications.ResNet50(include_top=False,weights='imagenet',input_tensor=None,input_shape=(196,196,3),pooling=None,classes=7,)
x=base_model.output
x = Flatten()(x)
x = Dense(256, activation=softmax)(x)
x = Dense(512, activation=softmax)(x)
x = Dense(1024,activation=softmax)(x)
prediction = Dense(7, activation=tf.nn.softmax)(x)
optimizer = Adam(lr=0.0001,decay=1e-5)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
callback =EarlyStopping(monitor='loss', patience=10)
history = model.fit(datagen.flow(X_train,y_train, batch_size=64),validation_data=(X_test, y_test), epochs = 100, verbose = 2, callbacks=[callback],class_weight={0:0.677839851, 1: 0.6265060241,2:1.373584906,3:1.394636015,4:1.662100457,5:0.9680851064,6:1.177993528})
#,
# -
print('Lowest training loss: ', min(history.history['loss']))
print('Lowest validation loss: ', min(history.history['val_loss']))
print('Highest training accuracy: ', max(history.history['accuracy']))
print('Highest validation accuracy: ', max(history.history['val_accuracy']))
model.save("ResNet50.h5")
# # Evaluation
# First, we will quickly check evolution of loss and accuracy over epochs and then draw confusion matrix on test dataa. Then how our validation set (16 cases) will work with trained model and compare real vs predicted label
# ## Draw loss on train vs test data evolution
# +
plt.figure(figsize=(20,5))
# plot loss & val loss
plt.subplot(1,2,1)
sns.lineplot(x=history.epoch, y=history.history['loss'], color='red', label='Loss')
sns.lineplot(x=history.epoch, y=history.history['val_loss'], color='orange', label='Val Loss')
plt.title('Loss on train vs test')
plt.legend(loc='best')
# plot accuracy and val accuracy
plt.subplot(1,2,2)
sns.lineplot(x=history.epoch, y=history.history['accuracy'], color='blue', label='Accuracy')
sns.lineplot(x=history.epoch, y=history.history['val_accuracy'], color='green', label='Val Accuracy')
plt.title('Accuracy on train vs test')
plt.legend(loc='best')
plt.show()
# -
# ## Confusion matrix on test data
y_test_hat = model.predict(X_test, batch_size=4)
y_test_hat = np.argmax(y_test_hat, axis=1)
y_test = np.argmax(y_test, axis=1)
# +
# calculate confusion matrix & classification report
conf_m = confusion_matrix(y_test, y_test_hat)
clas_r = classification_report(y_test, y_test_hat)
print(conf_m)
# print classification report
print('Classification report on test data')
print(clas_r)
# -
| Notebooks/All notebooks/ResNet50/ResNet50.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from sklearn.neighbors import KDTree as skKNN
from cuML import KNN as cumlKNN
import pygdf
import os
# # Helper Functions
# +
from timeit import default_timer
class Timer(object):
def __init__(self):
self._timer = default_timer
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def start(self):
"""Start the timer."""
self.start = self._timer()
def stop(self):
"""Stop the timer. Calculate the interval in seconds."""
self.end = self._timer()
self.interval = self.end - self.start
# -
import gzip
def load_data(nrows, ncols, cached = 'data/mortgage.npy.gz',source='mortgage'):
if os.path.exists(cached) and source=='mortgage':
print('use mortgage data')
with gzip.open(cached) as f:
X = np.load(f)
X = X[np.random.randint(0,X.shape[0]-1,nrows),:ncols]
else:
print('use random data')
X = np.random.random((nrows,ncols)).astype('float32')
df = pd.DataFrame({'fea%d'%i:X[:,i] for i in range(X.shape[1])}).fillna(0)
return df
# +
from sklearn.metrics import mean_squared_error
def array_equal(a,b,threshold=1e-2,with_sign=True,metric='mse'):
a = to_nparray(a)
b = to_nparray(b)
if with_sign == False:
a,b = np.abs(a),np.abs(b)
if metric=='mse':
error = mean_squared_error(a,b)
else:
error = np.sum(a!=b)/(a.shape[0]*a.shape[1])
res = error<threshold
return res
def to_nparray(x):
if isinstance(x,np.ndarray) or isinstance(x,pd.DataFrame):
return np.array(x)
elif isinstance(x,np.float64):
return np.array([x])
elif isinstance(x,pygdf.DataFrame) or isinstance(x,pygdf.Series):
return x.to_pandas().values
return x
# -
# # Run tests
# +
# %%time
nrows = 2**16
ncols = 40
X = load_data(nrows,ncols)
print('data',X.shape)
# -
n_neighbors = 10
# %%time
knn_sk = skKNN(X)
D_sk,I_sk = knn_sk.query(X,n_neighbors)
# %%time
X = pygdf.DataFrame.from_pandas(X)
# %%time
knn_cuml = cumlKNN(n_gpus=1)
knn_cuml.fit(X)
D_cuml,I_cuml = knn_cuml.query(X,n_neighbors)
passed = array_equal(D_sk,D_cuml)
message = 'compare knn: cuml vs sklearn distances %s'%('equal'if passed else 'NOT equal')
print(message)
passed = array_equal(I_sk,I_cuml)
message = 'compare knn: cuml vs sklearn indexes %s'%('equal'if passed else 'NOT equal')
print(message)
| python/notebooks/knn_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lista 1 de EDA 2
#
# ## <NAME> Isaque
# ## Conjunto de dados
#
# Os dados utilizados são do ministério da Saúde e foram obtidos no [portal brasileiro de dados abertos](http://dados.gov.br/dataset).
#
# ### Cadastro Nacional de Estabelecimentos de Saúde - CNES - http://dados.gov.br/dataset/cnes
#
# Armazena a localização dos estabelecimentos registrados no Cadastro Nacional de Estabelecimentos de Saúde.
# Cada dado conta com:
# - co_cnes (Código único do estabelecimento)
# - co_ibge (Código do IBGE)
# - origem_dado (Origem das informações)
# - data_atualizacao (Data da ultima atualização do dado)
# - lat (Coordenada da localização geográfica)
# - long (Coordenada da localização geográfica)
#
#
# ## Tratamento dos dados
# Função que carrega, trata e ordena os dados.
# +
import matplotlib.pyplot as plt
import load_and_prepare_data as ld
import search_methods as sm
data = ld.load_and_prepare_data()
data[-1]
# -
# ## Distribuição dos dados
co_cnes = [row[0] for row in data]
plt.plot(co_cnes)
# # Benchmark dos métodos de busca
#
# Valor a ser encontrado
value = 6651321
# ## Busca Sequêncial Indexada
index_list = sm.create_index_list(data, 10000)
# %timeit -n10 -r1 sm.indexed_sequential_search(data, index_list, value)
# ## Busca Sequêncial
# %timeit -n10 -r1 sm.sequential_search(data, value)
# ## Busca por interpolação
# %timeit -n10 -r1 sm.interpolation_search(data, value)
# ## Busca Binária
# %timeit -n10 -r1 sm.binary_search(data, value)
# ## Busca Binária Recursiva
# %timeit -n10 -r1 sm.recursive_binary_search(data, 0, len(data), value)
| Lista1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <hr style="height:2px;">
#
# # Demo: Training data generation for combined denoising and upsamling of synthetic 3D data
#
# This notebook demonstrates training data generation for a combined denoising and upsampling task of synthetic 3D data, where corresponding pairs of isotropic low and high quality stacks can be acquired.
# Anisotropic distortions along the Z axis will be simulated for the low quality stack, such that a CARE model trained on this data can be applied to images with anisotropic resolution along Z.
#
# We will use only a few synthetically generated stacks for training data generation, whereas in your application you should aim to use stacks from different developmental timepoints to ensure a well trained model.
#
# More documentation is available at http://csbdeep.bioimagecomputing.com/doc/.
# +
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
from tifffile import imread
from csbdeep.utils import download_and_extract_zip_file, plot_some, axes_dict
from csbdeep.io import save_training_data
from csbdeep.data import RawData, create_patches
from csbdeep.data.transform import anisotropic_distortions
# -
# <hr style="height:2px;">
#
# # Download example data
#
# First we download some example data, consisting of a synthetic 3D stacks with membrane-like structures.
download_and_extract_zip_file (
url = 'http://csbdeep.bioimagecomputing.com/example_data/synthetic_upsampling.zip',
targetdir = 'data',
)
# We plot XY and XZ slices of a training stack pair:
# +
y = imread('data/synthetic_upsampling/training_stacks/high/stack_00.tif')
x = imread('data/synthetic_upsampling/training_stacks/low/stack_00.tif')
print('image size =', x.shape)
plt.figure(figsize=(16,15))
plot_some(np.stack([x[5],y[5]]),
title_list=[['XY slice (low)','XY slice (high)']],
pmin=2,pmax=99.8);
plt.figure(figsize=(16,15))
plot_some(np.stack([np.moveaxis(x,1,0)[50],np.moveaxis(y,1,0)[50]]),
title_list=[['XZ slice (low)','XZ slice (high)']],
pmin=2,pmax=99.8);
# -
# <hr style="height:2px;">
#
# # Generate training data for upsampling CARE
#
# We first need to create a `RawData` object, which defines how to get the pairs of low/high SNR stacks and the semantics of each axis (e.g. which one is considered a color channel, etc.).
#
# Here we have two folders "low" and "high", where corresponding low and high-SNR stacks are TIFF images with identical filenames.
# For this case, we can simply use `RawData.from_folder` and set `axes = 'ZYX'` to indicate the semantic order of the image axes.
raw_data = RawData.from_folder (
basepath = 'data/synthetic_upsampling/training_stacks',
source_dirs = ['low'],
target_dir = 'high',
axes = 'ZYX',
)
# Furthermore, we must define how to modify the Z axis to mimic a real microscope as closely as possible if data along this axis is acquired with reduced resolution. To that end, we define a `Transform` object that will take our `RawData` as input and return the modified image. Here, we use `anisotropic_distortions` to accomplish this.
#
# The most important parameter is the subsampling factor along Z, which should for example be chosen as 4 if it is planned to later acquire (low-SNR) images with 4 times reduced axial resolution.
anisotropic_transform = anisotropic_distortions (
subsample = 4,
psf = None,
subsample_axis = 'Z',
yield_target = 'target',
)
# From the synthetically undersampled low quality input stack and its corresponding high quality stack, we now generate some 3D patches. As a general rule, use a patch size that is a power of two along XYZT, or at least divisible by 8.
# Typically, you should use more patches the more trainings stacks you have. By default, patches are sampled from non-background regions (i.e. that are above a relative threshold), see the documentation of `create_patches` for details.
#
# Note that returned values `(X, Y, XY_axes)` by `create_patches` are not to be confused with the image axes X and Y.
# By convention, the variable name `X` (or `x`) refers to an input variable for a machine learning model, whereas `Y` (or `y`) indicates an output variable.
X, Y, XY_axes = create_patches (
raw_data = raw_data,
patch_size = (32,64,64),
n_patches_per_image = 512,
transforms = [anisotropic_transform],
save_file = 'data/my_training_data.npz',
)
assert X.shape == Y.shape
print("shape of X,Y =", X.shape)
print("axes of X,Y =", XY_axes)
# ## Show
#
# This shows a ZY slice of some of the generated patch pairs (odd rows: *source*, even rows: *target*)
for i in range(2):
plt.figure(figsize=(16,2))
sl = slice(8*i, 8*(i+1)), slice(None), slice(None), 0
plot_some(X[sl],Y[sl],title_list=[np.arange(sl[0].start,sl[0].stop)])
plt.show()
None;
| examples/upsampling3D/1_datagen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/hannanabdul55/seldonian-fairness/blob/master/logistic_regression_seldonian.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] pycharm={"name": "#%% md\n"}
# # Seldonian Model example usage notebook
# This notebook shows the usage of the `seldonian` library to train a `LogisticRegression` classifier on the [UCI Adult Income Dataset](https://archive.ics.uci.edu/ml/datasets/adult)
# + id="hdWz82x7LuPa"
try:
import seldonian
except:
# !pip install seldonian
import torch
import numpy as np
try:
import shap
except:
# !pip install shap
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from seldonian.seldonian import *
from seldonian.objectives import ghat_tpr_diff, ghat_recall_rate
# + [markdown] pycharm={"name": "#%% md\n"}
# # Dataset
# We use the `shap` library to import the preprocessed version of the Adult data.
# + id="miqDzE-GN1NV"
X, y = shap.datasets.adult()
X_d, y_d = shap.datasets.adult(display=True)
# -
# # Dataset statistics
# Here, we plot the counts of each category of a column that you can specify in the cell below. This column will be used as the sensitive attribute in the rest of the notebook.
# + pycharm={"name": "#%%\n"}
A = 'Sex'
# + pycharm={"name": "#%%\n"}
grps = X_d.groupby(A)
counts = {}
for k,v in grps:
counts[k] = v.shape[0]
plt.bar(counts.keys(), counts.values())
plt.title(f"Counts of number of samples of each category in column {A}")
plt.show()
# -
# # Constraint definition
#
# In the Seldonian Approach, the safety constraints are defined as a tuple $(g_i, \delta_i) \forall i \in m$ , where there are $m$ safety constraints. $g: \Theta \to \mathbb{R}$ is a function that quantifies the desirability of a solution and $\delta$ is the permissive probability of returning an undesirable/unsafe solution.
#
# Hence, if $f(\theta, D)$ is the original minimization optimization objective where $D$ is the data, then the new Seldonian objective function is:
# $$\arg_{\theta}\min f(\theta, D) \quad \quad \quad \text{s.t. } \Pr(g_i(\theta, D) \le 0 ) \ge 1- \delta_i \quad \forall i \in m $$
#
# The trianing is a 3 step-process:
# - **Dataset split**: The input training data, $D$, is split into 2 sets - the candidate set, $D_{c}$, and the safety set, $D_{s}$.
# - **Candidate selection** - This is the step executed when the `fit` method is called on the Seldonian Model. This method runs the optimization objective with the safety constraints using _only_ the data $D_c$. Since $D_s$ is not available at this step, we _predict_ the upper bound on $g(\theta, D_s)$, let's call it $\hat{g}(\theta, D_c)$ using concentration inequalities like $ttest$ or _Hoeffding's_ bounds.
# - **Safety test** - This step is used to run the trained parameters, $\theta_c$ from the candidate selection step and calculate $g(\theta_c, D_s)$.
#
# Here we set `g_hats` as a list of $g(\theta)$'s where each item is a dictionary with the `fn` key is assigned a function callable that calculates $g_i(\theta)$ and the $\delta_i$.
#
# In this case, we have only one constraint which is the recall constraint on the `Sex` category in the dataset. This constraint is also known as `EqualizedOdds` constraint from [Hardt et.al 2016](https://proceedings.neurips.cc/paper/2016/file/9d2682367c3935defcb1f9e247a97c0d-Paper.pdf). The constraint is written as
#
# $$g(\theta) = |\Pr(f(\theta, X)=y |Y=y, A = Male) - \Pr(f(\theta, X)=y |Y=y, A = Female)| - 0.05 $$
#
# where `X` is the input features, `A` is the sensitive feature (`Sex` in this case) and `Y` is the target/prediction. The `0.05` is the maximum value that is permissible.
#
# Hence, this constraint upper bounds the absolute difference between the individual accuracy for each category of the sensitive attributes to `0.05`.
#
# We also split the entire dataset to a trainj and test set. Note that the test set here is _different_ from the safety set used within the Seldonian Algorithm.
#
# + id="sZQ3yM1kMb0L"
A_idx = list(X.columns).index(A)
X = X.to_numpy()
X_tr, X_te, y_tr,y_te = train_test_split(X, y, test_size=0.3, random_state=42)
g_hats = [{
'fn': ghat_recall_rate(A_idx, threshold=0.05),
'delta': 0.05
}]
# -
# ## Training the Seldonian Logistic Regression Model
# The call to `model.fit()` run the candidate selection step and get the $\theta_c$ parameter.
# + colab={"base_uri": "https://localhost:8080/"} id="QhRS04sDOb2U" outputId="d469095a-1f47-4d4e-9494-b1614b395326"
model = LogisticRegressionSeldonianModel(X_tr, y_tr, g_hats=g_hats, test_size=0.2, stratify=True)
model.fit()
# -
# # Safety test
# The call to `model._safetyTest()` runs the next step i.e. the _safety test_ on the candidate model $\theta_c$.
# + colab={"base_uri": "https://localhost:8080/"} id="komHZE_5Q0cm" outputId="045ca488-b551-47cc-d12a-05ea1aaad6b6"
safe = model._safetyTest()
print(f"The trained model {'failed' if safe>0 else 'passed'} the safety test.")
# -
# # Seldonian Model metrics
# Here, we report the Seldonian Model's accuracy and the violation i.e. the $g(\theta_c)$ on the test set `X_te`. We can see that the $g(\theta_c) < 0$. This means the candidate $\theta_c$ _passed_ the safety test.
# + colab={"base_uri": "https://localhost:8080/"} id="XbEkNNqhtMGc" outputId="08490545-bbf6-474b-ce88-2651893366de"
print(f"Constrained model accuracy: {accuracy_score(y_te, model.predict(X_te))}")
print(f"Constrained model violation: {ghat_recall_rate(A_idx, threshold=0.05)(X_te, y_te, model.predict(X_te), 0.05, ub=False)} ")
# -
# # Unconstrained model metrics
# Now, we compare this model to an unconstrained model trained on the same dataset using scikit learns `LogisticRegression` and the same metrics are then calculated on the test set `X_te`.
# + colab={"base_uri": "https://localhost:8080/"} id="6_17W9SytQjg" outputId="8449a432-5f0e-49a0-f292-6ab189d5a40e"
from sklearn.linear_model import LogisticRegression
uc_model = LogisticRegression(solver='liblinear', random_state=42).fit(X_tr, y_tr)
# -
# We can see that the unconstrained model _fails_ the safety test.
# + colab={"base_uri": "https://localhost:8080/"} id="cKVy7eiLti5r" outputId="d9633473-404b-4cef-d818-7105bc8fe31f"
g_theta_test = ghat_recall_rate(A_idx, threshold=0.05)(X_te, y_te, uc_model.predict(X_te), 0.05, ub=False)
print(f"Uncontrained model accuracy: {accuracy_score(y_te, uc_model.predict(X_te))}")
print(f"Unconstrained model value of g(\\theta): {g_theta_test}")
print(f"Hence, this model {'failed' if g_theta_test >0 else 'passed'} the safety test")
| logistic_regression_seldonian.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import molsysmt as msm
# # Has small molecules
molecular_system = msm.demo['TcTIM']['1tcd.mmtf']
molecular_system = msm.convert(molecular_system, to_form='molsysmt.MolSys')
msm.build.has_small_molecules(molecular_system)
| docs/contents/build/has_small_molecules.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="3elkSd8yRyr5" outputId="a9763050-c88b-431f-c381-fd4d05e71d22" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
import sys
sys.path.append('/content/drive/My Drive/pendulo_nn')
# + id="hD7RwDat6CSg" outputId="0d374b3c-9542-492b-f88c-0ac1c9f75505" colab={"base_uri": "https://localhost:8080/", "height": 54}
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward, predict
from building_your_deep_neural_network_step_by_step_v8a import *
np.random.seed(1)
# + [markdown] id="VPbSMKod6CSl"
# ## 2 - Dataset
#
# You will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!
#
# **Problem Statement**: You are given a dataset ("data.h5") containing:
# - a training set of m_train images labelled as cat (1) or non-cat (0)
# - a test set of m_test images labelled as cat and non-cat
# - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).
#
# Let's get more familiar with the dataset. Load the data by running the cell below.
# + id="doOtTRTT7PEu"
training_data = np.loadtxt('/content/drive/My Drive/pendulo_nn/training_data.txt', dtype=float)
training_torques = np.loadtxt('/content/drive/My Drive/pendulo_nn/training_torques.txt', dtype=float)
training_torques = training_torques.reshape(1,-1)
# + [markdown] id="c5UzCH_I6CSu"
# The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
# + id="CpSLn8YU6CSu" outputId="413ae5cf-4918-4d74-875c-593486d8e483" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Example of a picture
print (training_data.shape)
print (training_torques.shape)
# + id="GJcgC3we6CS7"
# Standardize data to have feature values between 0 and 1.
#training_data[0] = training_data[0]/training_data[0].max()
#training_data[1] = training_data[1]/training_data[1].max()
training_data[0] = training_data[0]/10
training_data[1] = training_data[1]/10
#training_torques = training_torques/training_torques.max()
# + id="H8JpjBK6bQV0" outputId="fb2f11d3-c31f-472e-c3c9-e1754703b586" colab={"base_uri": "https://localhost:8080/", "height": 68}
print(training_data[0].max())
print(training_data[1].max())
print(training_torques.max())
# + id="E-sSrHBg6CTE"
### CONSTANTS DEFINING THE MODEL ####
n_x = 2 # num_px * num_px * 3
n_h = 1
n_y = 1
layers_dims = (n_x, n_h, n_y)
# + id="T20kufF96CTJ"
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
A2, cache2 = linear_forward(A1, W2, b2)
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = -2*(Y - A2)
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_backward(dA2, cache2)
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# + [markdown] id="4_tNOmuK6CTN"
# Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
# + id="yPsKKoXZ6CTO" outputId="1a6d17f2-71fa-4c02-d536-fb42b1e626b6" colab={"base_uri": "https://localhost:8080/", "height": 1000}
parameters = two_layer_model(training_data, training_torques, layers_dims = (n_x, n_h, n_y), num_iterations = 23000, print_cost=True)
# + [markdown] id="4M-E4RFe6CTU"
# Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.
#
# Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
# + id="tdM7bLsq0YUD"
np.savetxt('/content/drive/My Drive/pendulo_nn/W1.txt', parameters["W1"], fmt='%f')
# + id="MY-Kqf2xFWVl" outputId="56dad4af-df32-4fbe-ec6a-94017844e956" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(parameters["W2"].shape)
# + id="xEoaBY1o-BL_"
np.savetxt('/content/drive/My Drive/pendulo_nn/b1.txt', parameters["b1"] , fmt='%f')
# + id="5hSYQjnE-EUl"
np.savetxt('/content/drive/My Drive/pendulo_nn/W2.txt', parameters["W2"] , fmt='%f')
# + id="3-mVanTy-E9u"
np.savetxt('/content/drive/My Drive/pendulo_nn/b2.txt', parameters["b2"] , fmt='%f')
# + id="bSKibRCQ6CTV" outputId="82504a26-7dd0-4b66-e46f-ea2ec8592c41" colab={"base_uri": "https://localhost:8080/", "height": 34}
predictions_train = predict(training_data, training_torques, parameters)
| pendulo_nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### - Write a program that prints the numbers from 1 to n. For multiples of ‘3’ print “Fizz” instead of the number and for the multiples of ‘5’ print “Buzz” instead of the the number itself. If a number is a multiple of 3 and 5 both, print "FizzBuzz".
# +
n = int(input("Enter a number: "))
if n ==0:
exit()
seperator = "," # Expected Output format: Comma seperated numbers with "Fizz" and "Buzz" inserted in between
for num in range(1,n+1):
if num==n:
seperator="\n"
if num%15==0:
print("FizzBuzz",end=seperator)
continue
elif num%5 ==0:
print("Buzz",end=seperator)
continue
elif num%3 == 0:
print("Fizz",end=seperator)
else:
print(num,end=seperator)
# -
| Fizz Buzz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Regression
from sklearn.linear_model import LinearRegression
# Consider a linear regression model with an input vector $x^i = (X_1 , X_2 , \dots , X_n )$, and a real-valued output $y$. The linear regression model has the form:
#
# $$\large f_\theta(X) = \theta_0 + \sum_{i=1}^mx^i\theta^i$$
#
# Here:
# - $[\theta_1, \theta_2, ... , \theta_n]$ are called **model weights** (generally) OR **coefficients** (in linear regression)
# - $\theta_0$ is called **bias** (generally) OR **intercept** (in linear regression)
#
# We consider a set of training data $(x_1 , y_1 ) \dots (x_m , y_m )$ from which to estimate the parameters $\theta$. Here m is the no of training examples we have.
# The regression model relies on several assumptions:
#
# - The independent variable is not random.
# - The variance of the error term is constant across observations. This is important for evaluating the goodness of the fit.
# - The errors are not autocorrelated. The Durbin-Watson statistic detects this; if it is close to 2, there is no autocorrelation.
# - The errors are normally distributed. If this does not hold, we cannot use some of the statistics, such as the F-test.
#
# If we confirm that the necessary assumptions of the regression model are satisfied, we can safely use the statistics reported to analyze the fit.
# Linear Regression Loss Function (Ordinary Least Squares)
# -----
#
# <center><img src="images/loss.png" width="50%"/></center>
# - The coefficient estimates for Ordinary Least Squares rely on the independence of the features.
# - When features are correlated and the columns of the design matrix \(X\) have an approximate linear dependence, the design matrix becomes close to singular and as a result, the least-squares estimate becomes highly sensitive to random errors in the observed target, producing a large variance.
# - This situation of multicollinearity can arise, for example, when data are collected without an experimental design.
# ## Some other loss function for Linear Regression
# ### Absolute or Laplace or L1 loss:
# $$J_\theta = \sum_{i=1}^m|e_i| = \sum_{i=1}^m|y_t - \theta_0 - \theta_1X_1 - \theta_2X_2|$$
# ### Residual sum of squares or L2 loss:
# $$J_\theta = \sum_{i=1}^me_i^2 = \sum_{i=1}^m(y_t - \theta_0 - \theta_1X_1 - \theta_2X_2)^2$$
# ### Huber loss:
#
# It is between L1 and L2 loss.
# # Linear Models with Regularisation
# - Regularization is the process of introducing additional information to minimize overfitting.
# - Regularization discourages unnecessary complexity.
# - In regularisation, we try to shrink the regression coefficients by imposing a penalty on their size.
# #### One Way to Regularize: Add a constraint to the loss function
#
# Regularized Loss = Loss Function + Constraint
#
# Here constraint = $\large \sum_{j=1}^n |\theta_j|^p$ .
# - Depending upon the value of the parameter $p$ in the constraint, we can have different types of regularisation like L1 (p=1) and L2 (p2).
#
# - This is more generally known as Lp regularizer.
#
# - For making visualization easy, let us plot them in 2D space. For that we suppose that we just have two parameters.
#
# <center><img src="images/lp_reg.png" width="80%"/></center>
# ## Lasso (Least Absolute Shrinkage Selector Operator) Regression
from sklearn.linear_model import Lasso
# #### Linear Regression Loss Function + L1 Regularization
#
# Here we see that the loss function is modified with an extra term called the penalty term.
#
# <center><img src="images/l1.png" width="50%"/></center>
#
# $$ L1 = \large \lambda \sum_{j=1}^n|\theta_j| $$
#
# Considering two independent variables, the penalty can be given as = $|\theta_1| + |\theta_2|$ .
#
# Notice that the intercept $\theta_0$ has been left out of the penalty term. Penalization of the intercept would make the procedure depend on the origin chosen for y.
#
# - Here $\lambda$ is a **hyperparameter** and should be _set at the time of model training_. Higher the values of lambda, bigger is the penalty.
# - L1 shrinks the weights using the absolute values of the weight coefficients (i.e., the weight vector).
# - Penalize the model by the absolute weight coefficients.
# <center><img src="images/lasso.png" width="50%"/></center>
# #### The effect of L1 loss
# - Penalize large coefficients - Large coefficients will increase the size of the total error functions.
# - L1 regularization induces sparsity. Sparsity means as many values will be zero as possible.
#
# #### Why Lasso?
# - lasso selects only some feature while reduces the coefficients of others to zero. This property is known as **feature selection** and which is absent in case of ridge.
# - It is generally used when we have more number of features, because it automatically does feature selection.
# ## Ridge Regression
from sklearn.linear_model import Ridge
# #### Linear Regression Loss Function + L2 Regularization
# <center><img src="images/l2.png" width="50%"/></center>
# $$ \large L2 = \lambda \sum_{j=1}^n\theta_j^2$$
#
# The ridge coefficients minimize a penalized residual sum of squares
#
# - Here again $\lambda$ is a hyperparameter
# - L2 shrinks the weights by computing the Euclidean norm of the weight coefficients (the weight vector )
# - it is mostly used to prevent multicollinearity.
# <center><img src="images/ridge.png" width="50%"/></center>
# For both Lasso and Ridge: λ -> Regularization parameter
# -----
#
# λ parameter controls the regularization strength, aka the size of the shaded area.
#
# The larger the value of the stronger the regularization of the model.
# ## Elastic Net
#
# Elastic net is basically a combination of both L1 and L2 regularization. So if you know elastic net, you can implement both Ridge and Lasso by tuning the parameters. So it uses both L1 and L2 penality term
# <center><img src="images/elastic.png" width="75%"/></center>
| src/week4/day2_linear-models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 33 - Grid Search
#
# ### The following topis are discussed in this notebook:
# * Using `GridSearchCV` for hyperparameter selection.
#
#
# ## Grid Search
#
# Grid search is a method for performing hyperparameter tuning for a model. This technique involves identifying one or more hyperparameters that you would like to tune, and then selecting some number of values to consider for each hyperparameter. We then evaluate each possible set of hyperparameters by performing some type of validation. Typically, this will involve performing cross-validation to generate an out-of-sample performance estimate for each set of hyperparameters. We then typically select the model that has the highest cross-validation score.
# ## Import Packages
#
# We will illustrate how to perform grid search in Scikit-Learn in this lesson. We begin by importing a few packages and tools that are not directly related to grid search.
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
# -
# ## Generate Data
#
# In this lesson, we will work with a synthetic dataset created for a classification problem. The dataset contains 400 observations, each of which will have 6 features, and will be assigned one of 10 possible classes. The features are stored in an array named `X`, while the labels are stored in an array named `y`. We start by viewing the contents of `X` in a DataFrame format.
#
# +
np.random.seed(1)
X, y = make_classification(n_samples=400, n_features=6, n_informative=6,
n_redundant=0, n_classes=10, class_sep=2)
pd.DataFrame(X)
# -
# Let's now view the first few elements of `y`.
print(y[:20])
# ## Grid Search in Scikit-Learn
#
# GridSearch is performed in Scikit-Learn using the `GridSearchCV` class. We will import this class in the cell below.
from sklearn.model_selection import GridSearchCV
# ## Grid Search with Logistic Regression
#
# We will illustrate the usage of `GridSearchCV` by first performing hyperparameter tuning to select the optimal value of the regularization parameter `C` in a logistic regression model.
#
# We start by defining a parameter grid. This is a dictionary containing keys for any hyperparameters we wish to tune over. The values associated with each key should be a list or array of values to consider for that hyperparameter.
param_grid = [
{'C': 10**np.linspace(-3,3,20)}
]
# We then create an instance of the estimate that we wish to tune over. In this case, that is the `LogisticRegression` class. Note that we do not fit the model to the training data yet.
lin_reg = LogisticRegression(solver='lbfgs', multi_class='multinomial', max_iter=1000)
# We then create an instance of the `GridSearchCv` class. When creating this instance, we must provide an estimator, a parameter grid, a number of folds to use in cross-validation, an evaluation metric to use in cross-validation. If we specify that `refit=True` (which is the default value), then `GridSearchCV` will automatically fit the best model found to the entire data set. We will discuss this more later.
#
# After creating an instance of `GridSearchCV`, we train it using the `fit` method.
#
# A trained `GridSearchCV` obtain has many attributes and methods that we might be interested in. We will explore this in more detail later, but for now, the most important attributes are `best_score_` and `best_params_`. The `best_score_` attribute will contain the cross-validation score for the best model found, while `best_params_` will be a dictionary of the hyperparameter values that generated the optimal cross-validation score.
# +
lr_gridsearch = GridSearchCV(lin_reg, param_grid, cv=10, scoring='accuracy',
refit=True, iid=False)
lr_gridsearch.fit(X, y)
print(lr_gridsearch.best_score_)
print(lr_gridsearch.best_params_)
# -
# We see that the highest cross-validation score obtain for any of the values of `C` considered was 62.7%. This was obtained by using `C = 0.16237767`.
# ### Obtaining the Best Model
#
# When trained, `GridSearchCV` class will automatically refit a final model to the full training set using the optimal hyperparameter values found. This model is stored in the attribute `best_estimator_`.
#
# In the cell below, we extract the best model from our `GridSearchCV` object and use it to calculate the training accuracy for this model.
lr_model = lr_gridsearch.best_estimator_
print('Training Score:', lr_model.score(X, y))
# ## Grid Search with Decision Trees
#
# We will now illustrate how to use `GridSearchCV` to perform hyperparameter tuning for a decision tree. We will tune over two hyperparameters: `max_depth` and `min_samples_leaf`.
# +
param_grid = [{
'max_depth': [2, 4, 8, 16, 32, 64],
'min_samples_leaf': [2, 4, 8, 16]
}]
tree = DecisionTreeClassifier()
np.random.seed(1)
dt_gridsearch = GridSearchCV(tree, param_grid, cv=10, scoring='accuracy',
refit=True, iid=False)
dt_gridsearch.fit(X, y)
print(dt_gridsearch.best_score_)
print(dt_gridsearch.best_params_)
# -
# The decision tree with the highest cross-validation score had a `max_depth` of 32 and a `min_samples_leaf` of 8. Notice that this model outperforms the best logistic regression model that we found above. In the cell below, we extract the best model from the `GridSearchCV` object, and calculate its score on the training set.
#
dt_model = dt_gridsearch.best_estimator_
print('Training Score:', dt_model.score(X, y))
# ## Grid Search with Random Forests
#
# We will now illustrate how to use `GridSearchCV` to perform hyperparameter tuning for a random forest. We will tune over two hyperparameters: `max_depth` and `min_samples_leaf`. We will set the `n_estimators` hyperparameter to 200.
# +
param_grid = [{
'max_depth':[2, 4, 8, 16, 32, 64],
'min_samples_leaf':[2, 4, 8, 16]
}]
forest = RandomForestClassifier(n_estimators=200)
np.random.seed(1)
rf_gridsearch = GridSearchCV(forest, param_grid, cv=10, scoring='accuracy',
refit=True, iid=False)
rf_gridsearch.fit(X, y)
print(rf_gridsearch.best_score_)
print(rf_gridsearch.best_params_)
# -
# The random forest with the highest cross-validation score had a `max_depth` of 8 and a `min_samples_leaf` of 4. This model outperforms either of our previous two models. In the cell below, we extract the best model from the `GridSearchCV` object, and calculate its score on the training set.
rf_model = rf_gridsearch.best_estimator_
print('Training Score:', rf_model.score(X, y))
# ## Exploring Grid Search Results
#
# If we would like to see more detailed results pertaining to the results of the grid seach process, more information can be found in the `cv_results` attribute of a trained instance of the `GridSearchCV` class. This attribute contains a dictionary with several pieces of information pertaining to the results of the cross-validation steps. We will start by looking at the keys of the items stored in this dictionary.
cv_res = rf_gridsearch.cv_results_
print(cv_res.keys())
# The items `split0_test_score` through `split9_test_score` each contain the validation score for each of the models considered on one particular fold. The average validation scores for each individual model can be found in the `mean_test_score` item.
print(cv_res['mean_test_score'])
# In the cell below, we print the average test scores along with the hyperparameter values for the models that generated them.
for score, params in zip(cv_res['mean_test_score'], cv_res['params']):
print(score, params)
# We see that although the `max_depth=8`, `min_samples_leaf=4` model performed the best, there were a few other models that had very similar results.
| files/notebooks/dsci_303_503/32 - Grid Search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="XfPGWnNOCc8P"
# ##Bolean Operatons
# + colab={"base_uri": "https://localhost:8080/"} id="oyGQ50P9CiFb" outputId="c645ee3e-a2ba-40e3-92f1-1ff9318751b2"
#Boleans represent one of two values: True or False
print(10>9)
print(10==9)
print(9>10)
# + colab={"base_uri": "https://localhost:8080/"} id="qK2_f2k1C7XX" outputId="8f212dc1-bbd9-492e-952e-832c3108e361"
a=10
b=9
print(a>b)
print(a==a)
print(b>a)
b>a
# + colab={"base_uri": "https://localhost:8080/"} id="qlE7iEOIDxLB" outputId="dd6600f2-0f05-43b9-a4bf-f06347af30c9"
print(bool("hello"))
print(bool(15))
print(bool(True))
# + colab={"base_uri": "https://localhost:8080/"} id="3IeHnkNhEBMq" outputId="f010238b-5960-4a3d-9f65-cc46ebf2c898"
print(bool(False))
print(bool(None))
print(bool(0))
print(bool([])) #allows you to evaluate and gives False in return
# + colab={"base_uri": "https://localhost:8080/"} id="25Vs8ddDEsfL" outputId="83217178-d78a-4601-e429-32e957838827"
def myFunction():return True
if myFunction():
print("Yes")
else:
print("No")
# + colab={"base_uri": "https://localhost:8080/"} id="T4RDoL-0GH77" outputId="18bd21b3-7e88-40e9-f471-cde7427db9e9"
def myFunction():return False
if myFunction():
print("Yes")
else:
print("No")
# + [markdown] id="8MNngkOeGasN"
# # You Try!
# + colab={"base_uri": "https://localhost:8080/"} id="KxmkKtE-GSVP" outputId="4ecea8dd-c8f1-4c4a-a56f-c712eeb00bf6"
a=6
b=7
print(a==b)
print(a!=a)
# + colab={"base_uri": "https://localhost:8080/"} id="pB4I1IGFG1nQ" outputId="c28d9bfc-1f71-4260-a39f-e75d508d450d"
print(10+5)
print(10-5)
print(10*5)
print(10/5) #division - Quotient
print(10%5) #modulo division
print(10%3) #modulo division
print(10//3) # Floor division
print(10**2) #concatenation
# + colab={"base_uri": "https://localhost:8080/"} id="Yh4BTbkPJVVs" outputId="ae98937b-7918-4dc6-9bc1-9277391a2ca6"
a=60 #0011 1100
b=13 #0000 1101
print(a & b) #AND
print(a|b) #OR
print(a^b) #XOR
print(a<<2)
print(a>>2)
# + colab={"base_uri": "https://localhost:8080/"} id="qI4WZUR6MzRM" outputId="9e1e3a45-1927-4aa9-d408-bdcfd17ef13f"
x=6
x+=3 #same as x=x+3
print(x)
x%=3 #x=6% 3 , remainder 0
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="c92wWA3-NjeX" outputId="28d2543e-f244-40b5-b7bb-f365aae856da"
a=True
b= False
a and b
a or b
# + colab={"base_uri": "https://localhost:8080/"} id="_Dt2V5YQNrTn" outputId="9f5d0a34-91fb-40f1-c2d8-434f64561d87"
a=True
b= False
print(a and b)
print(a or b)
print(not(a and b))
print(not(a or b)) #negation
# + colab={"base_uri": "https://localhost:8080/"} id="Plle4V2vOKYG" outputId="5d13885b-b9a0-486d-d4a5-2eb71c097b9d"
a is b
a is not b
# + colab={"base_uri": "https://localhost:8080/"} id="rTo--BeOOSsP" outputId="c7f2f9e8-5a59-44f2-ced0-a6a1690f6e16"
print(a is b)
print(a is not b)
| Operations_and_expressions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# +
# We load Google stock data in a DataFrame
Google_stock = pd.read_csv('./GOOG.csv')
# We print some information about Google_stock
print('Google_stock is of type:', type(Google_stock))
print('Google_stock has shape:', Google_stock.shape)
# -
Google_stock
Google_stock.head()
Google_stock.tail()
Google_stock.isnull().any()
# We get descriptive statistics on our stock data
Google_stock.describe()
# We get descriptive statistics on a single column of our DataFrame
Google_stock['Adj Close'].describe()
# We print information about our DataFrame
print()
print('Maximum values of each column:\n', Google_stock.max())
print()
print('Minimum Close value:', Google_stock['Close'].min())
print()
print('Average value of each column:\n', Google_stock.mean())
# We display the correlation between columns
Google_stock.corr()
# We load fake Company data in a DataFrame
data = pd.read_csv("./fake_company.csv")
data
# Display the total amount of moneey spent in slaraies each year
data.groupby(['Year'])['Salary'].sum()
# Display the average salary per year
data.groupby(['Year'])['Salary'].mean()
# Total salary each employee received over all the years
data.groupby(['Name'])['Salary'].sum()
# Salary by department by year (two-part grouping)
data.groupby(['Year', 'Department'])['Salary'].sum()
| scratch/lesson_3/pandas/Pandas_load-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import math
from tensorboardX import SummaryWriter
import env_configurations
sess = None
# +
import tensorflow as tf
import collections
from a2c_continuous import A2CAgent
import common.tr_helpers
import networks
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.8)
config = tf.ConfigProto(gpu_options=gpu_options)
sess = tf.InteractiveSession(config=config)
# +
import gym
import wrappers
import games_configurations
a2c_config = games_configurations.bipedalwalkerhardcore__config
env_name = a2c_config['ENV_NAME']
#'RoboschoolHalfCheetah-v1'#'BipedalWalker-v2'#'Pendulum-v0'#'QuadruppedWalk-v1' #'QuadruppedWalk-v1' #$'BipedalWalker-v2'#'RoboschoolAnt-v1'
#'BipedalWalker-v2'#'RoboschoolAnt-v1' #'LunarLanderContinuous-v2'#'BipedalWalker-v2'#'LunarLander-v2'
#'MountainCarContinuous-v0'#'CarRacing-v0'#'CartPole-v1' #'RoboschoolAnt-v1' #'CarRacing-v0' #'LunarLander-v2' #'Acrobot-v1' #
#a2c_config = games_configurations.pendulum_lstm_config
#a2c_config = games_configurations.halfcheetah_lstm_config_v2
#a2c_config = games_configurations.roboschoolant_config
#a2c_config = a2c_games_configurations.pendulum_config
#a2c_config = games_configurations.bipedalwalker_config
#a2c_config = games_configurations.roboschoolhumanoid_lstm_config
obs_space, action_space = env_configurations.get_obs_and_action_spaces(env_name)
print(obs_space)
print(action_space)
print(action_space.low)
print(action_space.high)
# -
import ray
ray.init(redis_max_memory=1024*1024*100, object_store_memory=1024*1024*100)
agent = A2CAgent(sess,'run', obs_space, False, action_space, a2c_config)
#agent.restore('nn/humanoid_best')
agent.train()
#agent.save('nn/latest')
agent.save('nn/latest_run2')
ray.shutdown()
import gym
gym.envs.registry.all()
| test_a2c_continuous.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: julia-1.1
# kernelspec:
# display_name: Julia 1.1.1
# language: julia
# name: julia-1.1
# ---
# # Cellular Automaton
# Port of [Think Complexity chapter 5](http://greenteapress.com/complexity2/html/index.html) by <NAME>.
# + inputHidden=false jupyter={"outputs_hidden": false} outputExpanded=true outputHidden=true
using Pkg # Only first time
pkg"add Luxor" # Only first time
using Luxor
# -
# Cellular Automaton = discrete space (cells) as input for a calculation in discrete time
# ## A Trivial Example
# 0 dimensional CA, inverting its cell at each timestep (2 state values only)
#
# rule:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function rule0dim(x::Bool)
!x
end
# -
# time evolution:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function step0dim(x₀::Bool, steps::Int64)
xs = [x₀]
for i in 1:steps
push!(xs, rule0dim(xs[end]))
end
xs
end
# -
# visualisation:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
res = step0dim(false, 10)
Drawing(50, 120, "out.svg")
for (i, val) in enumerate(res)
if val
sethue("grey")
else
sethue("lightgrey")
end
box(5, 5+i*10, 10, 10, :fill)
end
finish()
preview()
# -
# ## Wolfram's Experiment
# 1 dimensional CA with 2 state values, new value of a cell depends only on state of neighbouring cells.
#
# rule can be expressed as an integer
#
# |prev|111|110|101|100|011|010|001|000|
# |---|---|---|---|---|---|---|---|
# |next|b7|b6|b5|b4|b3|b2|b1|b0|
# |rule50|0|0|1|1|0|0|1|0|
#
# b can be converted to an integer
#
# get a rule from an integer:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function inttorule1dim(val::UInt8)
digs = BitArray(digits(val, base=2))
for i in length(digs):7
push!(digs, false)
end
digs
end
# -
# Apply rule to a cell knowing its own previous state and the previous state of his left and right neighbour
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function applyrule1dim(rule::BitArray{1}, bits::BitArray{1})
val = 1 + bits[3] + 2*bits[2] + 4*bits[1]
rule[val]
end
# -
# time evolution:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function step1dim(x₀::BitArray{1}, rule::BitArray{1}, steps::Int64)
xs = [x₀]
len = length(x₀)
for i in 1:steps
x = copy(x₀)
for j in 2:len-1
x[j] = applyrule1dim(rule, xs[end][j-1:j+1])
end
push!(xs, x)
end
xs
end
# -
# initialisation:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
x₀ = falses(21)
x₀[11] = true
res = step1dim(x₀, inttorule1dim(UInt8(50)), 9);
# -
# visualisation:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function visualize1dim(res, dim)
width = dim * (length(res[1]) + 1)
height = dim * (length(res) + 1)
Drawing(width, height, "out.svg")
for (i, arr) in enumerate(res)
for (j, val) in enumerate(arr)
if val
sethue("grey")
else
sethue("lightgrey")
end
box(j*dim, i*dim, dim, dim, :fill)
end
end
finish()
preview()
end
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
visualize1dim(res, 10)
# -
# ## Classifying CAs
# ### Class 1
#
# Evolution from any starting condition to the same uniform pattern, eg. rule0
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
using Random
x₀ = bitrand(21)
x₀[1] = false; x₀[end] = false;
res = step1dim(x₀, inttorule1dim(UInt8(0)), 1)
visualize1dim(res, 10)
# -
# ### Class 2
#
# Generation of a simple pattern with nested structure, i.e. a pattern that contains many smaller versions of itself, eg. rule50.
#
# Example that looks like a Sierpinsi triangle (fractal): rule18.
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
x₀ = falses(129)
x₀[65] = true
res = step1dim(x₀, inttorule1dim(UInt8(18)), 63);
visualize1dim(res, 4)
# -
# ### Class 3
#
# CAs that generate randomness, eg. rule30.
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
x₀ = falses(201)
x₀[101] = true
res = step1dim(x₀, inttorule1dim(UInt8(30)), 99);
visualize1dim(res, 2.5)
# -
# Center column as a sequence of bits, is hard to distinguish from a truly random sequence: pseudo-random number generators (PRNGs).
#
# - regularities can be detected statistically
# - a PRNG with finite amount of state will eventually repeat itself (period)
# - underlying process is fundamentally deterministic (unlike some physical processes: thermodynamics or quantum mechanics)
#
# This complex behavior is surprising (chaos is often associated with non-linear behavior of continuous time and space processes).
# ### Class 4
#
# CAs that are Turing complete or universal, which means that they can compute any computable function, eg. rule110.
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
x₀ = bitrand(600)
res = step1dim(x₀, inttorule1dim(UInt8(110)), 599);
visualize1dim(res, 0.85)
# -
# - After about 100 steps, simple repeating patterns emerge, but there are a number of persistent structures that appear as disturbances. Some are vertical, other are diagonal and are called spaceships.
#
# - Collisions between spaceships yields different results depending on their type and their phase. Some collisions annihilate both ships; other leaves one ship unchanged; still other yield one or more ships of different types.
#
# - The collisions are the basis of computation in a rule110 CA. You can think of spaceships as signales that propagate through space, and collisions as gate that compute logical operations like AND and OR.
# ## Turing State-Machines
#
# Based on [wikipedia: Turing Machine](https://en.wikipedia.org/wiki/Turing_machine).
#
# A Turing machine is a mathematical model of computation that defines an abstract machine, which manipulates symbols on a tape according to a table of rules. Despite the model's simplicity, given any computer algorithm, a Turing machine capable of simulating that algorithm's logic can be constructed.
#
# - A tape divided into cells, one next to the other. Each cell contains a symbol from some finite alphabet. The alphabet contains a special blank symbol (here written as '0') and one or more other symbols. The tape is assumed to be arbitrarily extendable to the left and to the right, i.e., the Turing machine is always supplied with as much tape as it needs for its computation. Cells that have not been written before are assumed to be filled with the blank symbol. In some models the tape has a left end marked with a special symbol; the tape extends or is indefinitely extensible to the right.
# - A head that can read and write symbols on the tape and move the tape left and right one (and only one) cell at a time. In some models the head moves and the tape is stationary.
# - A state register that stores the state of the Turing machine, one of finitely many. Among these is the special start state with which the state register is initialized. These states, writes Turing, replace the "state of mind" a person performing computations would ordinarily be in.
# - A finite table of instructions that, given the state the machine is currently in and the symbol it is reading on the tape (symbol currently under the head), tells the machine to do the following in sequence:
# - Erase or write a symbol.
# - Move the head ( 'L' for one step left or 'R' for one step right or 'N' for staying in the same place).
# - Assume the same or a new state as prescribed.
# Table of rules:
#
# | Tape Symbol | State A | State B | State C |
# |:-----------:|-----------|-----------|-----------|
# | 0 | 1 - R - B | 1 - L - A | 1 - L - B |
# | 1 | 1 - L - C | 1 - R - B | 1 - R - H |
#
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function applyrulebusybeaver(state, read)
if state == 'A' && read == 0
return 1, 'R', 'B'
elseif state == 'A' && read == 1
return 1, 'L', 'C'
elseif state == 'B' && read == 0
return 1, 'L', 'A'
elseif state == 'B' && read == 1
return 1, 'R', 'B'
elseif state == 'C' && read == 0
return 1, 'L', 'B'
elseif state == 'C' && read == 1
return 1, 'R', 'H'
end
end
# -
# struct to represent the Turing State-Machine:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
mutable struct Turing
tape :: Array{Int64}
position :: Int64
state :: Char
end
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function Base.show(io::IO, turing::Turing)
print(io, turing.position, " - ", turing.state, ": ", turing.tape)
end
# -
# implementation of a step:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
function stepturing(turing, applyrule)
if turing.state == 'H'
error("Machine has stopped!")
end
read = turing.tape[turing.position]
(write, dir, turing.state) = applyrule(turing.state, read)
turing.tape[turing.position] = write
if dir == 'L'
if turing.position == length(turing.tape)
push!(turing.tape, false)
end
turing.position += 1
else
if turing.position == 1
pushfirst!(turing.tape, false)
else
turing.position -= 1
end
end
nothing
end
# -
# output:
# + inputHidden=false jupyter={"outputs_hidden": false} outputHidden=false
turing = Turing(zeros(Int64, 11), 6, 'A')
println(turing)
try
while true
stepturing(turing, applyrulebusybeaver)
println(turing)
end
catch
end
| Lectures/Lecture 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score
WS_data = pd.read_csv("G:/data sceince/Python/Clustering/DBSCAN/Wholesale customers data.csv")
WS_data.head()
# +
#In order to determine the no. of rows and columns we will use shape method
# -
WS_data.shape
WS_data.info() # using info function we will come to now about the null values
# +
# As channel and region columns are not useful because anyhow they are not going to contribute in determining noisy points so we can drop them.
# -
WS_data.drop(['Channel','Region'],axis=1,inplace = True)
WS_data.head()
# +
# Now we will standardize our data, in order to standardize the data we will use standard scaler as our complete data set is numeric
# For standardizing the data using standard scaler we will convert our data into array
# -
WS_data.values
scaler = StandardScaler().fit_transform(WS_data)
scaler
# +
# Final model building using dbscan function
# -
dbscan = DBSCAN(eps = 2 , min_samples = 7) # As the number of columns in our data set is 6 so we have considered minimum points>=D+1 that 6+1 = 7
dbscan.fit(scaler)
labels = dbscan.labels_
sil_s = silhouette_score(scaler, labels = labels, metric='euclidean')
sil_s
cl = pd.DataFrame(dbscan.labels_,columns = ['Cluster'])
cl
pd.concat([WS_data,cl],axis = 1)
| DBSCAN_clustering .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.5 64-bit
# language: python
# name: python37564bitaf41ac0548f5488ebcd4c7467e3de1ec
# ---
# # DeepAR implementations in GluonTS and PyTorchTS
#
# @author: <NAME>, https://github.com/kkuusisto
#
# This notebook served as complementary material for the [Berlin Time Series Meetup](https://www.meetup.com/Berlin-Time-Series-Analysis-Meetup/events/273248909/) hosted on Tue, Oct 13 7:00 PM GMT+2.
#
# The topic can be found in the event and was organised as a book club like session to explore Amazon's DeepAR model.
#
# This notebook combines two example notebooks from the GluonTS and PyTorchTS repositories, makes them consistent in order to compare both frameworks.
#
# - GluonTS: https://github.com/awslabs/gluon-ts/blob/master/examples/m5_gluonts_template.ipynb
# - PyTorchTS: https://github.com/zalandoresearch/pytorch-ts/blob/master/examples/m5.ipynb
#
# We are using the [M5 forecasting competition](https://www.kaggle.com/c/m5-forecasting-accuracy/overview) data, which is available on Kaggle (https://www.kaggle.com/c/m5-forecasting-accuracy/data). You first need to agree to the terms of the competition before being able to download the data. After you have done that, please copy the files into `../../data/m5-forecasting-accuracy`.
#
#
# ## Imports
# +
import mxnet as mx
from mxnet import gluon
import torch
import numpy as np
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import json
import os
from tqdm.autonotebook import tqdm
from pathlib import Path
# -
# We also define globally accessible variables, such as the prediction length and the input path for the M5 data. Note that `prediction_length` corresponds to the length of the validation/evaluation periods.
prediction_length = 28
context_length = prediction_length * 2
m5_input_path="../../data/m5-forecasting-accuracy"
# ## Reading the M5 data and transforming it to ListDataset
#
# First we need to convert the provided M5 data into a format that is readable by GluonTS and PyTorchTS. At this point we assume that the M5 data, which can be downloaded from Kaggle, is present under `m5_input_path`.
calendar = pd.read_csv(f'{m5_input_path}/calendar.csv')
sales_train_validation = pd.read_csv(f'{m5_input_path}/sales_train_validation.csv')
# ### Building dynamic features
# We start the data convertion process by building dynamic features (features that change over time, just like the target values). Here, we are mainly interested in the event indicators `event_type_1` and `event_type_2`. We will mostly drop dynamic time features as GluonTS will automatically add some of these as part of many models' transformation chains.
# +
cal_features = calendar.drop(
['date', 'wm_yr_wk', 'weekday', 'wday', 'month', 'year', 'event_name_1', 'event_name_2', 'd'],
axis=1
)
cal_features['event_type_1'] = cal_features['event_type_1'].apply(lambda x: 0 if str(x)=="nan" else 1)
cal_features['event_type_2'] = cal_features['event_type_2'].apply(lambda x: 0 if str(x)=="nan" else 1)
cal_features = cal_features.values.T
train_cal_features = cal_features[:,:-context_length-prediction_length]
test_cal_features = cal_features[:,:-context_length]
# We replicate the dynamic features for each sample in our training set
test_cal_features_list = [test_cal_features] * len(sales_train_validation)
train_cal_features_list = [train_cal_features] * len(sales_train_validation)
# -
# ### Building static features
# We then go on to build static features (features which are constant and series-specific). Here, we make use of all categorical features that are provided to us as part of the M5 data.
# +
state_ids = sales_train_validation["state_id"].astype('category').cat.codes.values
state_ids_un , state_ids_counts = np.unique(state_ids, return_counts=True)
store_ids = sales_train_validation["store_id"].astype('category').cat.codes.values
store_ids_un , store_ids_counts = np.unique(store_ids, return_counts=True)
cat_ids = sales_train_validation["cat_id"].astype('category').cat.codes.values
cat_ids_un , cat_ids_counts = np.unique(cat_ids, return_counts=True)
dept_ids = sales_train_validation["dept_id"].astype('category').cat.codes.values
dept_ids_un , dept_ids_counts = np.unique(dept_ids, return_counts=True)
item_ids = sales_train_validation["item_id"].astype('category').cat.codes.values
item_ids_un , item_ids_counts = np.unique(item_ids, return_counts=True)
stat_cat_list = [item_ids, dept_ids, cat_ids, store_ids, state_ids]
stat_cat = np.concatenate(stat_cat_list)
stat_cat = stat_cat.reshape(len(stat_cat_list), len(item_ids)).T
stat_cat_cardinalities = [len(item_ids_un), len(dept_ids_un), len(cat_ids_un), len(store_ids_un), len(state_ids_un)]
# -
# ### Transforming to ListDataset
# Finally, we can build both the training and the testing set from target values and both static and dynamic features. We implicitly "train_test_split" by including the window of length predition_length only in the test set.
# +
from gluonts.dataset.common import load_datasets, ListDataset
from gluonts.dataset.field_names import FieldName
num_test_samples = 1000 # For the purpose of demonstration ;)
df = sales_train_validation.drop(["id","item_id","dept_id","cat_id","store_id","state_id"], axis=1)
target_values = df.values
test_target_values = target_values.copy()
train_target_values = [ts[:-prediction_length] for ts in df.values]
m5_dates = [pd.Timestamp("2011-01-29", freq='1D') for _ in range(len(df))]
train_ds = ListDataset([
{
FieldName.TARGET: target,
FieldName.START: start,
FieldName.FEAT_DYNAMIC_REAL: fdr,
FieldName.FEAT_STATIC_CAT: fsc
}
for (target, start, fdr, fsc) in zip(train_target_values,
m5_dates,
train_cal_features_list,
stat_cat)
], freq="D")
test_ds = ListDataset([
{
FieldName.TARGET: target,
FieldName.START: start,
FieldName.FEAT_DYNAMIC_REAL: fdr,
FieldName.FEAT_STATIC_CAT: fsc
}
for (target, start, fdr, fsc) in zip(test_target_values[:num_test_samples],
m5_dates[:num_test_samples],
test_cal_features_list[:num_test_samples],
stat_cat[:num_test_samples])
], freq="D")
# -
# Just to be sure, we quickly verify that dataset format is correct and that our dataset does indeed contain the correct target values as well as dynamic and static features.
next(iter(train_ds))
# # Gluon-TS
# ### Estimator with Gluon-TS
#
# Having obtained our training and testing data, we can now create a GluonTS estimator. In our example we will use the `DeepAREstimator`, an autoregressive RNN which was developed primarily for the purpose of time series forecasting. Note however that you can use a variety of different estimators. Also, since GluonTS is mainly target at probabilistic time series forecasting, lots of different output distributions can be specified. In the M5 case, we think that the `NegativeBinomialOutput` distribution best describes the output.
#
# For a full list of available estimators and possible initialization arguments see https://gluon-ts.mxnet.io/api/gluonts/gluonts.model.html.
#
# For a full list of available output distributions and possible initialization arguments see https://gluon-ts.mxnet.io/api/gluonts/gluonts.distribution.html.
# +
from gluonts.model.deepar import DeepAREstimator
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.trainer import Trainer
epochs = 2 # For the purpose of demonstration ;)
estimator = DeepAREstimator(
prediction_length=prediction_length,
freq="D",
distr_output = NegativeBinomialOutput(),
use_feat_dynamic_real=True,
use_feat_static_cat=True,
cardinality=stat_cat_cardinalities,
context_length=context_length,
trainer=Trainer(
learning_rate=1e-3,
epochs=epochs,
num_batches_per_epoch=50,
batch_size=32
)
)
predictor = estimator.train(train_ds)
# -
# ### Generating forecasts
#
# Once the estimator is fully trained, we can generate predictions for the test values. GluonTS comes with the make_evaluation_predictions function that automates all this procedure. Roughly, this module performs the following steps:
#
# - Removes the final window of length prediction_length of the dataset that we want to predict
# - The estimator uses the remaining dataset to predict (in the form of sample paths) the "future" window that was just removed
# - The module outputs a generator over the forecasted sample paths and a generator over the dataset
# +
from gluonts.evaluation.backtest import make_evaluation_predictions
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds,
predictor=predictor,
num_samples=100
)
print("Obtaining time series conditioning values ...")
tss = list(tqdm(ts_it, total=len(test_ds)))
print("Obtaining time series predictions ...")
forecasts = list(tqdm(forecast_it, total=len(test_ds)))
# -
# ### Calculate performance metrics
#
# Using the test data, we can calculate a set of evaluation metrics
# +
from gluonts.evaluation import Evaluator
evaluator = Evaluator(quantiles=[0.5, 0.67, 0.95, 0.99])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
pd.DataFrame.from_dict(agg_metrics, columns=['Metric'], orient='index').head(10)
# -
# ### Plotting sample predictions
#
# Finally, we can also visualize our predictions for some of the time series.
# +
num_plots = 3
def plot_prob_forecasts(ts_entry, forecast_entry, sample_id):
plot_length = 150
prediction_intervals = (90, 95)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
_, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax)
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
ax.axvline(ts_entry.index[-prediction_length], color='b')
plt.legend(legend, loc="upper left")
plt.show()
plt.clf()
print("Plotting time series predictions ...")
for i in range(len(test_ds)):
if i > num_plots - 1:
break
ts_entry = tss[i]
forecast_entry = forecasts[i]
plot_prob_forecasts(ts_entry, forecast_entry, i)
# -
# # PyTorch-TS
#
# Same as with Gluon-TS, we define an Estimator and train the model on our training data. The syntax is almost the same compared to Gluon-TS. However, under the hood, the Estimator is using PyTorch.
# +
from pts.model.deepar import DeepAREstimator
from pts.modules import NegativeBinomialOutput
from pts import Trainer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Set GPU usage here directly
epochs = 2 # For the purpose of demonstration ;)
estimator = DeepAREstimator(
prediction_length=prediction_length,
freq="D",
distr_output=NegativeBinomialOutput(),
use_feat_dynamic_real=True,
use_feat_static_cat=True,
cardinality=stat_cat_cardinalities,
context_length=context_length,
input_size=104,
trainer=Trainer(
learning_rate=1e-3,
epochs=2,
num_batches_per_epoch=50,
batch_size=32,
device=device,
)
)
predictor = estimator.train(train_ds)
# -
# ### Calculate performance metrics
#
# Same as for Gluon-TS, we can calculate a set of evaluation metrics
# +
from pts.evaluation import make_evaluation_predictions
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds,
predictor=predictor,
num_samples=100,
)
print("Obtaining time series conditioning values ...")
tss = list(tqdm(ts_it, total=len(test_ds)))
print("Obtaining time series predictions ...")
forecasts = list(tqdm(forecast_it, total=len(test_ds)))
# -
# ### Calculate performance metrics
#
# Using the test data, we can calculate a set of evaluation metrics
# +
from pts.evaluation import Evaluator
evaluator = Evaluator()
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
pd.DataFrame.from_dict(agg_metrics, columns=['Metric'], orient='index').head(10)
# -
# ### Plotting sample predictions
# +
num_plots = 3
print("Plotting time series predictions ...")
for i in range(len(test_ds)):
if i > num_plots - 1:
break
ts_entry = tss[i]
forecast_entry = forecasts[i]
plot_prob_forecasts(ts_entry, forecast_entry, i)
# -
| python/deepAR/DeepAR GlueonTS PyTorchTS comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Yolo auf der Webcam
# ## 1. Import
import sys
import os, platform
import json
import numpy as np
import cv2
import ctypes
from matplotlib import pyplot as plt
from PIL import Image
from datetime import datetime
from qnn import TinierYolo
from qnn import utils
sys.path.append("/opt/darknet/python/")
from darknet import *
# %matplotlib inline
import IPython
# ## 2. Initialisierung
# +
classifier = TinierYolo()
classifier.init_accelerator()
net = classifier.load_network(json_layer="/usr/local/lib/python3.6/dist-packages/qnn/params/tinier-yolo-layers.json")
conv0_weights = np.load('/usr/local/lib/python3.6/dist-packages/qnn/params/tinier-yolo-conv0-W.npy', encoding="latin1")
conv0_weights_correct = np.transpose(conv0_weights, axes=(3, 2, 1, 0))
conv8_weights = np.load('/usr/local/lib/python3.6/dist-packages/qnn/params/tinier-yolo-conv8-W.npy', encoding="latin1")
conv8_weights_correct = np.transpose(conv8_weights, axes=(3, 2, 1, 0))
conv0_bias = np.load('/usr/local/lib/python3.6/dist-packages/qnn/params/tinier-yolo-conv0-bias.npy', encoding="latin1")
conv0_bias_broadcast = np.broadcast_to(conv0_bias[:,np.newaxis], (net['conv1']['input'][0],net['conv1']['input'][1]*net['conv1']['input'][1]))
conv8_bias = np.load('/usr/local/lib/python3.6/dist-packages/qnn/params/tinier-yolo-conv8-bias.npy', encoding="latin1")
conv8_bias_broadcast = np.broadcast_to(conv8_bias[:,np.newaxis], (125,13*13))
file_name_cfg = c_char_p("/usr/local/lib/python3.6/dist-packages/qnn/params/tinier-yolo-bwn-3bit-relu-nomaxpool.cfg".encode())
net_darknet = lib.parse_network_cfg(file_name_cfg)
# +
out_dim = net['conv7']['output'][1]
out_ch = net['conv7']['output'][0]
img_folder = './yoloimages/'
file_name_out = c_char_p("/home/xilinx/jupyter_notebooks/qnn/detection".encode())
file_name_probs = c_char_p("/home/xilinx/jupyter_notebooks/qnn/probabilities.txt".encode())
file_names_voc = c_char_p("/opt/darknet/data/voc.names".encode())
tresh = c_float(0.3)
tresh_hier = c_float(0.5)
darknet_path = c_char_p("/opt/darknet/".encode())
conv_output = classifier.get_accel_buffer(out_ch, out_dim)
# -
# ## 3. Loop
# +
vc = cv2.VideoCapture(0)
vc.set(cv2.CAP_PROP_FPS, 1)
while(1):
start = time.time()
#vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
for i in range(7):
vc.grab()
is_capturing, frame_in = vc.read()
frame = cv2.cvtColor(frame_in, cv2.COLOR_BGR2RGB) # makes the blues image look real colored
cv2.imwrite('frame.jpg', frame_in)
#vc.release()
end = time.time()
#print(end-start)
start = time.time()
img_file = 'frame.jpg'#os.path.join(img_folder, image_name)
file_name = c_char_p(img_file.encode())
img = load_image(file_name,0,0)
img_letterbox = letterbox_image(img,416,416)
img_copy = np.copy(np.ctypeslib.as_array(img_letterbox.data, (3,416,416)))
img_copy = np.swapaxes(img_copy, 0,2)
free_image(img)
free_image(img_letterbox)
#print('Preprocessing')
end = time.time()
#print (end-start)
start = time.time()
#First convolution layer in sw
if len(img_copy.shape)<4:
img_copy = img_copy[np.newaxis, :, :, :]
conv0_ouput = utils.conv_layer(img_copy,conv0_weights_correct,b=conv0_bias_broadcast,stride=2,padding=1)
conv0_output_quant = conv0_ouput.clip(0.0,4.0)
conv0_output_quant = utils.quantize(conv0_output_quant/4,3)
#print('First Conv')
#Offload to hardware
conv_input = classifier.prepare_buffer(conv0_output_quant*7);
classifier.inference(conv_input, conv_output)
conv7_out = classifier.postprocess_buffer(conv_output)
#Last convolution layer in sw
conv7_out = conv7_out.reshape(out_dim,out_dim,out_ch)
conv7_out = np.swapaxes(conv7_out, 0, 1) # exp 1
if len(conv7_out.shape)<4:
conv7_out = conv7_out[np.newaxis, :, :, :]
conv8_output = utils.conv_layer(conv7_out,conv8_weights_correct,b=conv8_bias_broadcast,stride=1)
conv8_out = conv8_output.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
#print('done conv')
end = time.time()
#print (end-start)
start = time.time()
#Draw detection boxes
lib.forward_region_layer_pointer_nolayer(net_darknet,conv8_out)
lib.draw_detection_python(net_darknet, file_name, tresh, tresh_hier,file_names_voc, darknet_path, file_name_out, file_name_probs);
#print('drawn boxes')
#Display result
IPython.display.clear_output(1)
file_content = open(file_name_probs.value,"r").read().splitlines()
detections = []
for line in file_content[0:]:
name, probability = line.split(": ")
detections.append((probability, name))
for det in sorted(detections, key=lambda tup: tup[0], reverse=True):
print("class: {}\tprobability: {}".format(det[1], det[0]))
res = Image.open(file_name_out.value.decode() + ".png")
display(res)
#print(detections)
end = time.time()
#print(end-start)
# -
# ## 4. Deinitialisierung
# +
classifier.deinit_accelerator()
from pynq import Xlnk
xlnk = Xlnk();
xlnk.xlnk_reset()
# -
vc.release()
| notebooks/yolo-webcam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: rocketPy
# language: python
# name: rocketpy
# ---
# # Example Usage of Simulation
#
# This file demonstrates the use of rocketPy's simulation environment.
#
# First we import some useful packages
# +
import numpy as np
import scipy as sp
import scipy.integrate as spint
import matplotlib.pyplot as plt
# and from rocket py we need simulation and solutions
from rocketPy.simulation import Simulation as Sim
from rocketPy.solution import Solution
# -
# We need a dynamic object to simulate, and we create this using a small class.
#
# This rocket is a one dimensional object.
# We define a few useful properties at creation, but then the functions take over.
#
# For any dynamic object you need the function ```dynamics```
# This function takes in a current time, state, and stage number and returns the rate of change of the state
#
# In addition to this, you can (optionally) define some staging functions. These staging functions define how the dynamic object can change between stages.
#
# For this example, a simple rocket is modelled. It will thrust upwards, coast, and then descend under a parachute. For simplicity, we only consider the rocket as a one dimensional object. The rocket will return to the ground using dual deployment, ie both a drogue chute and a main chute, each triggered at a different time.
#
# The drogue chute is deployed 7 seconds after apogee, and (to demonstrate the usage) jumps the position up by 1000m when it happens. This is a very powerful tool, since when staging a rocket you can imagine the mass of rocket to decrease by a step change, which would be difficult to model using other methods.
#
# The main chute will deploy at an altitude of 2500 m.
#
# Each of the staging functions have additional properties we need to specify.
#
# - terminal (boolean): Should the simulation run stop when this event triggers
# - direction (1 or -1): which way must the 0-crossing be for the trigger to occur
# - etc
class VerySimpleRocket():
def __init__(self):
self.m = 40
self.T = 4000
self.g = 9.81
self.y0 = np.array([0., 0.])
self.rhoCDA1 = 0.05
self.rhoCDA2 = 0.1
self.stage_list = [0,1,2]
self.staging_functions = [self.staging_deploy_drogue,self.staging_deploy_main, self.staging_landing]
self.nominal_stages = [0,1,2] # defines this as a nominal flight
def staging_deploy_drogue(self,t,y,stage=0):
return y[1]
staging_deploy_drogue.terminal = False
staging_deploy_drogue.direction=-1
staging_deploy_drogue.trigger_if_stage_in =[0]
staging_deploy_drogue.possible_next_stages = [1,2]
staging_deploy_drogue.nominal_next_stage = 1
staging_deploy_drogue.t_offset = 7 #stages 7 seconds after the apogee is detected
staging_deploy_drogue.modify_state = lambda self, state: self.modify_state_drogue_deployed(state)
def staging_deploy_main(self, t, y, stage=0):
return y[0]-2500
staging_deploy_main.terminal = False
staging_deploy_main.direction = -1
staging_deploy_main.trigger_if_stage_in =[0,1]
staging_deploy_main.possible_next_stages = [ 2]
staging_deploy_main.nominal_next_stage = 2
staging_deploy_main.t_offset = 0
staging_deploy_main.modify_state = None
def staging_landing(self, t, y, stage=0):
return y[0]
staging_landing.terminal = True
staging_landing.direction = -1
staging_landing.trigger_if_stage_in =[0,1,2]
staging_landing.possible_next_stages = []
staging_landing.nominal_next_stage = None
staging_landing.t_offset = 0
staging_landing.modify_state = None
def modify_state_drogue_deployed(self, state):
# this function replaces the state when the corresponding branch is explored
state[0] += 1000
return state
def dynamics(self, t, y, stage=0):
if stage == 0:
if t<4:
return np.array([y[1], self.T/self.m - self.g])
else:
return np.array([y[1], -self.g])
elif stage == 1:
return np.array([y[1], -0.5*self.rhoCDA1*y[1]*abs(y[1])/self.m - self.g])
elif stage == 2:
return np.array([y[1], -0.5*self.rhoCDA2*y[1]*abs(y[1])/self.m - self.g])
else:
raise ValueError
# Instantiate the rocket and the sim
r = VerySimpleRocket()
s = Simulation(r)
# Do a very simple sim, starting at stage 0.
sol=s.solve([0,600], r.y0, 0, user_events=r.staging_functions)
# The result object (from scipy.solve_ivp) is stored in sol.sols, as a list
sol
# Now simulate the nominal trajectory
nominal_sol = s.nominal_solve([0,6000], r.y0, 0)
# You can ask for the solution at some time, for instance at $$t = 5$$
nominal_sol.sol(5)
# so its 1085 m up, with a speed of 358 m/s.
# Or you can plot it
# +
# helper function to get the bounds of the simulation
t_range = np.linspace(nominal_sol.t_min(), nominal_sol.t_max(), 500)
plt.plot(t_range, nominal_sol.sol(t_range)[0])
plt.xlabel('t')
plt.ylabel('y')
plt.grid()
plt.figure()
plt.plot(t_range, nominal_sol.sol(t_range)[1])
plt.xlabel('t')
plt.ylabel('v')
plt.grid()
# -
# The real magic is in simulating all possible outcomes
full_sol = s.full_solve([0,6000], r.y0, 0)
# full solve gives a list of all the possible simulations
full_sol
# number of possible outcomes
len(full_sol)
# Plot the solutions
# +
t_range = np.linspace(nominal_sol.t_min(),nominal_sol.t_max(), 500)
plt.plot(t_range,nominal_sol.sol(t_range)[0], '.-k', label='Nominal')
for i, sol in enumerate(full_sol):
t_range = np.linspace(sol.t_min(),sol.t_max(), 500)
plt.plot(t_range,sol.sol(t_range)[0], '--',label=i)
plt.grid()
plt.xlabel('t')
plt.ylabel('y')
plt.legend()
# +
t_range = np.linspace(nominal_sol.t_min(),nominal_sol.t_max(), 500)
plt.plot(t_range,nominal_sol.sol(t_range)[1], '.-k', label='Nominal')
for i, sol in enumerate(full_sol):
t_range = np.linspace(sol.t_min(),sol.t_max(), 500)
plt.plot(t_range,sol.sol(t_range)[1], '--',label=i)
plt.grid()
plt.xlabel('t')
plt.ylabel('v')
plt.legend()
# -
# sometimes its easier to see it in the state space
# +
t_range = np.linspace(nominal_sol.t_min(),nominal_sol.t_max(), 500)
plt.plot(nominal_sol.sol(t_range)[0],nominal_sol.sol(t_range)[1], '.-k', label='Nominal')
i=0;
for sol in full_sol:
t_range = np.linspace(sol.t_min(),sol.t_max(), 500)
plt.plot(sol.sol(t_range)[0],sol.sol(t_range)[1], label=i)
i+=1
#plt.xlim([0,50])
plt.grid()
plt.xlabel('y')
plt.ylabel('v')
plt.legend()
# -
# or as a list to see what is happening in each
# +
i=0;
fig, axes = plt.subplots(len(full_sol),2, sharex='col', sharey='col', figsize=(10,15), squeeze=False)
for sol in full_sol:
t_range_nom = np.linspace(nominal_sol.t_min(),nominal_sol.t_max(), 500)
axes[i][0].plot(t_range_nom,nominal_sol.sol(t_range_nom)[0], '--k', label='Nominal')
axes[i][1].plot(t_range_nom,nominal_sol.sol(t_range_nom)[1], '--k', label='Nominal')
t_range = np.linspace(sol.t_min(),sol.t_max(), 500)
axes[i][0].plot(t_range,sol.sol(t_range)[0], label=i)
axes[i][1].plot(t_range,sol.sol(t_range)[1], label=i)
axes[i][0].grid(True)
axes[i][0].set_xlabel('t')
axes[i][0].set_ylabel('y')
axes[i][0].legend()
axes[i][1].grid(True)
axes[i][1].set_xlabel('t')
axes[i][1].set_ylabel('v')
axes[i][1].legend()
i+=1
plt.tight_layout()
| docs/examples/staging_demonstrator-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Learning Rate Finder
# + hide_input=true
# %matplotlib inline
from fastai.gen_doc.nbdoc import *
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
# -
# Learning rate finder plots lr vs loss relationship for a [`Learner`](/basic_train.html#Learner). The idea is to reduce the amount of guesswork on picking a good starting learning rate.
#
# **Overview:**
# 1. First run lr_find `learn.lr_find()`
# 2. Plot the learning rate vs loss `learn.recorder.plot()`
# 3. Pick a learning rate before it diverges then start training
#
# **Technical Details:** (first [described]('https://arxiv.org/abs/1506.01186') by <NAME>)
# >Train [`Learner`](/basic_train.html#Learner) over a few iterations. Start with a very low `start_lr` and change it at each mini-batch until it reaches a very high `end_lr`. [`Recorder`](/basic_train.html#Recorder) will record the loss at each iteration. Plot those losses against the learning rate to find the optimal value before it diverges.
# ## Choosing a good learning rate
# For a more intuitive explanation, please check out [Sylvain Gugger's post](https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html)
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
def simple_learner(): return Learner(data, simple_cnn((3,16,16,2)), metrics=[accuracy])
learn = simple_learner()
# First we run this command to launch the search:
# + hide_input=true
show_doc(Learner.lr_find)
# -
learn.lr_find(stop_div=False, num_it=200)
# Then we plot the loss versus the learning rates. We're interested in finding a good order of magnitude of learning rate, so we plot with a log scale.
learn.recorder.plot()
# Then, we choose a value that is approximately in the middle of the sharpest downward slope. In this case, training with 3e-2 looks like it should work well:
simple_learner().fit(2, 3e-2)
# Don't just pick the minimum value from the plot!:
learn = simple_learner()
simple_learner().fit(2, 1e-0)
# Picking a value before the downward slope results in slow training:
learn = simple_learner()
simple_learner().fit(2, 1e-3)
# + hide_input=true
show_doc(LRFinder)
# + hide_input=true
show_doc(LRFinder.on_train_end)
# + hide_input=true
show_doc(LRFinder.on_batch_end)
# + hide_input=true
show_doc(LRFinder.on_train_begin)
# + hide_input=true
show_doc(LRFinder.on_epoch_end)
| docs_src/callbacks.lr_finder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Mean and median imputation
#
# Imputation is the act of replacing missing data with statistical estimates of the missing values. The goal of any imputation technique is to produce a **complete dataset** that can then be then used for machine learning.
#
# Mean/median imputation consists of replacing all occurrences of missing values (NA) within a variable by the mean (if the variable has a Gaussian distribution) or median (if the variable has a skewed distribution).
#
#
# ### Assumptions
#
# Mean/median imputation has the assumption that the data are missing completely at random (MCAR). If this is the case, we can think of replacing the NA with the most frequent occurrence of the variable, which is the mean if the variable has a Gaussian distribution, or the median otherwise.
#
# The rationale is to replace the population of missing values with the most frequent value, since this is the most likely occurrence.
#
# ### Advantages
#
# - Easy to implement
# - Fast way of obtaining complete datasets
#
# ### Limitations
#
# - Distortion of original variance
# - Distortion of covariance with remaining variables within the dataset
#
# When replacing NA with the mean or median, the variance of the variable will be distorted if the number of NA is big respect to the total number of observations (since the imputed values do not differ from the mean or from each other). Therefore leading to underestimation of the variance.
#
# In addition, estimates of covariance and correlations with other variables in the dataset may also be affected. This is because we may be destroying intrinsic correlations since the mean/median that now replace NA will not preserve the relation with the remaining variables.
#
#
# ### Final note
# Replacement of NA with mean/median is widely used in the data science community and in various data science competitions. See for example the winning solution of the KDD 2009 cup: "Winning the KDD Cup Orange Challenge with Ensemble Selection" (http://www.mtome.com/Publications/CiML/CiML-v3-book.pdf).
#
# Typically, mean/median imputation is done together with adding a variable to capture those observations where the data was missing (see lecture "Creating a new variable with the missing data"), thus covering 2 angles: if the data was missing completely at random, this would be contemplated by the mean imputation, and if it wasn't this would be captured by the additional variable.
#
# In addition, both methods are extremely straight forward to implement, and therefore are a top choice in data science competitions.
# ===============================================================================
#
# ## Real Life example:
#
# ### Predicting Survival on the Titanic: understanding society behaviour and beliefs
#
# Perhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time.
#
# ### Predicting Sale Price of Houses
#
# The problem at hand aims to predict the final sale price of homes based on different explanatory variables describing aspects of residential homes. Predicting house prices is useful to identify fruitful investments, or to determine whether the price advertised for a house is over or underestimated, before making a buying judgment.
#
# =====================================================================================
#
# In the following cells, I will demonstrate mean/median imputation using the Titanic and House Price datasets from Kaggle.
#
# If you haven't downloaded the datasets yet, in the lecture "Guide to setting up your computer" in section 1, you can find the details on how to do so.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
% matplotlib inline
# for regression problems
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
# for classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# to split and standarize the datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# to evaluate regression models
from sklearn.metrics import mean_squared_error
# to evaluate classification models
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore')
# -
# ## Titanic
# +
# load the Titanic Dataset with a few variables for demonstration
data = pd.read_csv('titanic.csv', usecols = ['Age', 'Fare','Survived'])
data.head()
# +
# let's look at the percentage of NA
data.isnull().mean()
# -
# ### Imputation important
#
# Imputation should be done over the training set, and then propagated to the test set. This means that the mean/median to be used to fill missing values both in train and test set, should be extracted from the train set only. And this is to avoid overfitting.
# +
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data, data.Survived, test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# +
# let's make a function to create 2 variables from Age:
# one filling NA with median, and another one filling NA with zeroes
def impute_na(df, variable, median):
df[variable+'_median'] = df[variable].fillna(median)
df[variable+'_zero'] = df[variable].fillna(0)
# -
median = X_train.Age.median()
median
impute_na(X_train, 'Age', median)
X_train.head(15)
impute_na(X_test, 'Age', median)
# #### Mean/median imputation alters the variance of the original distribution of the variable
# +
# we can see a change in the variance after imputation
print('Original Variance: ', X_train['Age'].std())
print('Variance after median imputation: ', X_train['Age_median'].std())
# -
# we can see that the distribution has changed slightly with now more values accumulating towards the median
fig = plt.figure()
ax = fig.add_subplot(111)
X_train['Age'].plot(kind='kde', ax=ax)
X_train.Age_median.plot(kind='kde', ax=ax, color='red')
lines, labels = ax.get_legend_handles_labels()
ax.legend(lines, labels, loc='best')
# As mentioned above, the median imputation distorts the original distribution of the variable Age. The transformed variable shows more values around the median value.
# filling NA with zeroes creates a peak of population around 0, as expected
fig = plt.figure()
ax = fig.add_subplot(111)
X_train['Age'].plot(kind='kde', ax=ax)
X_train.Age_zero.plot(kind='kde', ax=ax, color='red')
lines, labels = ax.get_legend_handles_labels()
ax.legend(lines, labels, loc='best')
# Filling NA with 0s also distorts the distribution of the original variable, generating an accumulation of values around 0. We will see in the next lecture a method of NA imputation that preserves variable distribution.
# ### Machine learning model performance on different imputation methods
#
# #### Logistic Regression
# +
# Let's compare the performance of Logistic Regression using Age filled with zeros or alternatively the median
# model on NA imputed with zeroes
logit = LogisticRegression(random_state=44, C=1000) # c big to avoid regularization
logit.fit(X_train[['Age_zero', 'Fare']], y_train)
print('Train set zero imputation')
pred = logit.predict_proba(X_train[['Age_zero', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set zero imputation')
pred = logit.predict_proba(X_test[['Age_zero', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
print()
# model on NA imputed with median
logit = LogisticRegression(random_state=44, C=1000) # c big to avoid regularization
logit.fit(X_train[['Age_median', 'Fare']], y_train)
print('Train set median imputation')
pred = logit.predict_proba(X_train[['Age_median', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set median imputation')
pred = logit.predict_proba(X_test[['Age_median', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# -
# We see that median imputation leads to better performance of the logistic regression. Why?
print('Average total survival:', X_train.Survived.mean())
print('Average real survival of children: ', X_train[X_train.Age<15].Survived.mean())
print('Average survival of children when using Age imputed with zeroes: ', X_train[X_train.Age_zero<15].Survived.mean())
print('Average survival of children when using Age imputed with median: ', X_train[X_train.Age_median<15].Survived.mean())
# Children were more likely to survive the catastrophe (0.57 for children vs 0.38 for the entire Titanic). Thus, smaller values of Age are a good indicator of survival.
#
# When we replace NA with zeroes, we are masking the predictive power of Age. After zero imputation it looks like children did not have a greater chance of survival, and therefore the model loses predictive power.
#
# On the other hand, replacing NA with the median, preserves the predictive power of the variable Age, as smaller Age values will favour survival.
# #### Support Vector Machine
# +
# Let's compare the performance of SVM using Age filled with zeros or alternatively the median
SVM_model = SVC(random_state=44, probability=True, max_iter=-1, kernel='linear',)
SVM_model.fit(X_train[['Age_zero', 'Fare']], y_train)
print('Train set zero imputation')
pred = SVM_model.predict_proba(X_train[['Age_zero', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set zero imputation')
pred = SVM_model.predict_proba(X_test[['Age_zero', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
print()
SVM_model = SVC(random_state=44, probability=True, max_iter=-1, kernel='linear')
SVM_model.fit(X_train[['Age_median', 'Fare']], y_train)
print('Train set median imputation')
pred = SVM_model.predict_proba(X_train[['Age_median', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set median imputation')
pred = SVM_model.predict_proba(X_test[['Age_median', 'Fare']])
print('Logistic Regression roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
print()
# -
# For SVM as well, median imputation leads to better performance of the model, compared to replacing NA with zeroes.
#
# #### Random Forests
# +
# Let's compare the performance of Random Forests using Age filled with zeros or alternatively the median
rf = RandomForestClassifier(n_estimators=100, random_state=39, max_depth=3)
rf.fit(X_train[['Age_zero', 'Fare']], y_train)
print('Train set zero imputation')
pred = rf.predict_proba(X_train[['Age_zero', 'Fare']])
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set zero imputation')
pred = rf.predict_proba(X_test[['Age_zero', 'Fare']])
print('Random Forests zero imputation roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
print()
rf = RandomForestClassifier(n_estimators=100, random_state=39, max_depth=3)
rf.fit(X_train[['Age_median', 'Fare']], y_train)
print('Train set median imputation')
pred = rf.predict_proba(X_train[['Age_median', 'Fare']])
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set median imputation')
pred = rf.predict_proba(X_test[['Age_median', 'Fare']])
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
print()
# -
# Random Forests, as well as SVMs, perform better with median imputation, compared with replacing with zeroes.
# ## House Sale Dataset
# +
# we are going to train a model on the following variables,
# they contain the 3 numerical variables with NA
cols_to_use = ['OverallQual', 'TotalBsmtSF', '1stFlrSF', 'GrLivArea','WoodDeckSF', 'BsmtUnfSF',
'LotFrontage', 'MasVnrArea', 'GarageYrBlt', 'SalePrice']
# +
# let's load the House Sale Price dataset
data = pd.read_csv('houseprice.csv', usecols=cols_to_use)
print(data.shape)
data.head()
# -
# let's plot the histograms to have an impression of the distribution of the numerical variables
data[cols_to_use].isnull().mean()
# There are 4 variables with missing data.
# +
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data, data.SalePrice, test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# +
# let's impute the NA with the median or comparatively, zeros
# remember that we need to impute with the median for the train set, and then propagate to test set
# we use the function that we made a few cells ago.
median = X_train.LotFrontage.median()
impute_na(X_train, 'LotFrontage', median)
impute_na(X_test, 'LotFrontage', median)
# -
median = X_train.MasVnrArea.median()
impute_na(X_train, 'MasVnrArea', median)
impute_na(X_test, 'MasVnrArea', median)
median = X_train.GarageYrBlt.median()
impute_na(X_train, 'GarageYrBlt', median)
impute_na(X_test, 'GarageYrBlt', median)
# create a list with the untransformed columns
cols_to_use.remove('SalePrice')
cols_to_use
# create a list with the transformed columns
cols_zero = [col+'_zero' if col in ['LotFrontage', 'MasVnrArea', 'GarageYrBlt'] else col for col in cols_to_use]
cols_median = [col+'_median' if col in ['LotFrontage', 'MasVnrArea', 'GarageYrBlt'] else col for col in cols_to_use]
cols_median
# ### Machine learning model performance
# #### Linear Regression
# +
# Let's compare the performance of Linear Regression on zero vs median imputation of variables
linreg = LinearRegression()
linreg.fit(X_train[cols_zero], y_train)
print('Train set zero imputation')
pred = linreg.predict(X_train[cols_zero])
print('Linear Regression mse: {}'.format(mean_squared_error(y_train, pred)))
print('Test set zero imputation')
pred = linreg.predict(X_test[cols_zero])
print('Linear Regression mse: {}'.format(mean_squared_error(y_test, pred)))
print()
linreg = LinearRegression()
linreg.fit(X_train[cols_median], y_train)
print('Train set median imputation')
pred = linreg.predict(X_train[cols_median])
print('Linear Regression mse: {}'.format(mean_squared_error(y_train, pred)))
print('Test set median imputation')
pred = linreg.predict(X_test[cols_median])
print('Linear Regression mse: {}'.format(mean_squared_error(y_test, pred)))
print()
# -
# Here, models trained on the dataset in which missing data was replaced by the median show a worse predictive performance. If we look at the mean squared error of the test sets, we see that when we use median imputation, the difference between the estimated and real values is bigger (bigger mse).
#
# This suggests that the missingness of the data is a good indicator of house price.
print('House average price when data is missing: ', data[(data.LotFrontage.isnull())|(data.GarageYrBlt.isnull())|(data.MasVnrArea.isnull())]['SalePrice'].median())
print('House average price on complete cases: ', data.dropna()['SalePrice'].median())
print('House average price when LotFrontage is missing: ', data[(data.LotFrontage.isnull())]['SalePrice'].median())
print('House average price when GarageYrBlt is missing: ', data[(data.GarageYrBlt.isnull())]['SalePrice'].median())
print('House average price when MasVnrArea is missing: ', data[(data.MasVnrArea.isnull())]['SalePrice'].median())
# We can see that Houses for which the LotFrontage is missing are more expensive than the mean house. Similarly, houses where MasVnrArea is missing are also more expensive than the mean house. On the other hand, houses where GarageYrBlt is missing are substantially cheaper than the mean house.
#
# Therefore, missingness in these variables is a strong indicator of price, missingnesst would take the price up or down, and thus, replacing by zero (and therefore flagging the missingness), leads to better performance.
# **That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
| feature_engineering/05.2_Mean_and_median_imputation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py3
# language: python
# name: py3
# ---
class Solution:
def mySqrt(self, x: int) -> int:
left = 0
right = x
sqrt = 0
while left <= right:
mid = (left + right) // 2
if mid*mid == x:
sqrt = mid
break
elif mid*mid < x:
left = mid + 1
sqrt = mid
else:
right = mid - 1
return sqrt
s = Solution()
s.mySqrt(0)
| algorithms/69-sqrt(x).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from stock_transformer import Stock_Transformer, Standard_Indicator
st = Stock_Transformer('aapl')
company_info = st.get_company_info()
company_info
# -
df_pd = st.get_stock_data_pandas(period='1y')
df_pd
df_pd_with_defined_range = st.get_stock_data_pandas(start = '2019-01-01', end = '2019-05-30')
df_pd_with_defined_range
header, df_np = st.get_stock_data_np()
header
df_np
si = Standard_Indicator('aapl', period = '2mo')
ma = si.moving_average(5)
ma
windows = [2,3,5,10]
si.ma_plot(windows)
| Notebooks/stock_query_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # __Clustering and analysing Toronto data__
# -
# **Daggy1234**
#
# _Aspiring Data Scientist_
#
# ### Extracting data from Wikipedia
# ---
# We will use the wikipedia page
# [Here](https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M) to load the tabluar data into a pandas dataframe. We will use the libraries of requests, BeautifulSoup and pandas to create a dataframe.
# + pycharm={"name": "#%%\n"}
import pandas as pd
from bs4 import BeautifulSoup
import requests
# + pycharm={"name": "#%%\n"}
#get the data
y = requests.get('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M')
text = y.text
soup = BeautifulSoup(text,'html.parser')
tb = str(soup.find('table'))
df = pd.read_html(tb)
df = df[0]
df
# we have officially obtained all the tabular data
# + pycharm={"name": "#%%\n"}
# we will now process data
dfa = df[df.Borough != 'Not assigned']
dfa['Neighborhood'].fillna(df['Borough'],inplace=True)
dfa.reset_index(inplace=True)
dfa
# + pycharm={"name": "#%%\n"}
ll = pd.read_csv('https://cocl.us/Geospatial_data')
ll.head()
# + pycharm={"name": "#%%\n"}
# wei will now merge
df = pd.merge(dfa,ll,on='Postal Code')
df = df.drop('index',axis=1)
df
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Mapping the values
# ---
# Using folium we will create a vivid map of the toronto area. marking all the areas!
#
#
#
# + pycharm={"name": "#%%\n"}
#find areas in toronto by using sorting the districts
dft = df[df['Borough'].str.contains('Toronto',regex=False)]
dft
# + pycharm={"name": "#%%\n"}
import folium
map_toronto = folium.Map(location=[43.651070,-79.347015],zoom_start=12)
for lat,long,b,n in zip(dft['Latitude'],dft['Longitude'],dft['Borough'],dft['Neighborhood']):
label = '{}, {}'.format(n, b)
label = folium.Popup(label, parse_html=True)
folium.CircleMarker(
[lat,long],
radius=3,
popup=label,
color='blue',
fill=False,
fill_color='#3186cc',
fill_opacity=1.0,
parse_html=False).add_to(map_toronto)
map_toronto
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Machine Learning
# ----
# Lets use Kmeans clustering to cluster the data into similar subsets and plot them on the map
#
# we will use latitute and longitude
# + pycharm={"name": "#%%\n"}
from sklearn.cluster import KMeans
k = 4
toronto_clustering = dft.drop(['Postal Code','Borough','Neighborhood'],1)
kmeans = KMeans(n_clusters = k,random_state=0).fit(toronto_clustering)
kmeans.labels_
dft.insert(0, 'Cluster Labels', kmeans.labels_)
dft
# + pycharm={"name": "#%%\n"}
import numpy as np
import matplotlib.cm as cm
import matplotlib.colors as colors
map_clusters = folium.Map(location=[43.651070,-79.347015],zoom_start=12,tiles='Stamen Toner')
x = np.arange(k)
ys = [i + x + (i*x)**2 for i in range(k)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
markers_colors = []
for lat, lon, neighbourhood, cluster in zip(dft['Latitude'], dft['Longitude'], dft['Neighborhood'], dft['Cluster Labels']):
label = folium.Popup(' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters
# + pycharm={"name": "#%%\n"}
| analysingtorontodata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_pytorch_p36)
# language: python
# name: conda_pytorch_p36
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torch
import random
device = 'cuda' if torch.cuda.is_available() else 'cpu'
import os, sys
opj = os.path.join
from tqdm import tqdm
from functools import partial
import acd
from copy import deepcopy
sys.path.append('..')
from transforms_torch import wavelet_filter
import transform_wrappers
sys.path.append('../dsets/mnist')
import dset
from model import Net, Net2c
from util import *
from torch import nn
from style import *
from pytorch_wavelets import DWTForward, DWTInverse
from captum.attr import *
from knockout import *
import pywt
from pywt._doc_utils import wavedec2_keys, draw_2d_wp_basis
import warnings
warnings.filterwarnings("ignore")
# +
# set args
args = dset.get_args()
# load mnist data
train_loader, test_loader = dset.load_data(args.batch_size, args.test_batch_size, device)
# -
# # load model
# wavelet transform
xfm = DWTForward(J=3, mode='symmetric', wave='db4')
ifm = DWTInverse(mode='symmetric', wave='db4')
t = lambda x: xfm(x)
transform_i = transform_wrappers.modularize(lambda x: ifm(x))
transformer = lambda x: wavelet_filter(x, t, transform_i, idx=2)
# +
# load model
# train_Net2c(train_loader, args, transformer, save_path=opj('models/wt','net2c_' + str(0) + '.pth'))
model = Net2c().to(device)
model.load_state_dict(torch.load(opj('models/wt','net2c_' + str(0) + '.pth'), map_location=device))
# test model
test_loss, correct = test_Net2c(test_loader, model, transformer)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, 2*len(test_loader.dataset),
100. * correct / (2*len(test_loader.dataset))))
# -
# # scores in wt space
# +
def transform_i_re(x):
x_tuple = (x[0], (x[1], x[2], x[3]))
return transform_i(x_tuple)
def wv_reshape(x):
'''
x : list or tuple of tensors
'''
h = []
a, b, c, d = x
a = a.squeeze().detach().cpu().numpy()
b = b.squeeze().detach().cpu().numpy()
c = c.squeeze().detach().cpu().numpy()
d = d.squeeze().detach().cpu().numpy()
h.append(a)
h.append((d[0], d[1], d[2]))
h.append((c[0], c[1], c[2]))
h.append((b[0], b[1], b[2]))
arr, _ = pywt.coeffs_to_array(h)
return arr
# -
# wavelet transform
xfm = DWTForward(J=3, mode='symmetric', wave='db4').to(device)
ifm = DWTInverse(mode='symmetric', wave='db4').to(device)
t = lambda x: xfm(x)
transform_i = transform_wrappers.modularize(lambda x: ifm(x))
transformer = lambda x: wavelet_filter(x, t, transform_i, idx=2)
# test image
x, _ = iter(test_loader).next()
x = x[0:1].to(device)
x_t = t(x)
print('Shape of wavelet coeffs\n', x_t[0].shape, x_t[1][0].shape, x_t[1][1].shape,x_t[1][2].shape)
# +
# flatten tuples
a, (b, c, d) = x_t
a.requires_grad, b.requires_grad, c.requires_grad, d.requires_grad = True, True, True, True
x_t_re = (a, b, c, d)
# prepend transform onto network
m_t = transform_wrappers.Net_with_transform(model=model, transform=transform_i_re).to(device)
m_t.eval()
print('Difference of the model outputs', torch.norm(m_t(x_t_re) - model(x)).item())
# +
# backward
output = m_t(x_t_re)[0][1]
output.backward()
# input * gradient
scores = []
for i in range(len(x_t_re)):
# input * gradient
scores.append((x_t_re[i] * x_t_re[i].grad).squeeze())
# -
# compute the 2D DWT
coef = pywt.wavedec2(x.squeeze().cpu().detach().numpy(), 'db4', mode='symmetric', level=3)
# normalize each coefficient array independently for better visibility
coef[0] /= np.abs(coef[0]).max()
for detail_level in range(3):
coef[detail_level + 1] = [d/np.abs(d).max() for d in coef[detail_level + 1]]
arr, slices = pywt.coeffs_to_array(coef)
plt.imshow(arr, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
# +
# normalize each coefficient array independently for better visibility
a, b, c, d = x_t_re
a[0] /= abs(a[0]).max()
for i in range(3):
b[:,:,i] /= abs(b[:,:,i]).max()
c[:,:,i] /= abs(c[:,:,i]).max()
d[:,:,i] /= abs(d[:,:,i]).max()
x_t_re = (a, b, c, d)
# reshaping
arr_c = wv_reshape(x_t_re)
arr_ig = wv_reshape(scores)
plt.figure(figsize=(10,10))
plt.subplot(1, 2, 1)
plt.imshow(arr_c, cmap=plt.cm.gray)
plt.title('Wavelet coefficients: J=3')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(arr_ig, cmap=plt.cm.gray)
plt.title('InputXGradient')
plt.axis('off')
plt.show()
# +
im_orig = tensor_t_augment(x, transformer)[0].squeeze().detach().cpu().numpy()
im_f = tensor_t_augment(x, transformer)[1].squeeze().detach().cpu().numpy()
vmax = np.maximum(np.max(np.abs(im_orig)),np.max(np.abs(im_f)))
vmin = np.minimum(np.min(np.abs(im_orig)),np.min(np.abs(im_f)))
plt.figure(figsize=(10,10))
plt.subplot(1, 3, 1)
plt.imshow(im_orig, cmap=plt.cm.gray, vmax=vmax, vmin=vmin)
plt.title('Original')
plt.axis('off')
plt.subplot(1, 3, 2)
plt.imshow(im_f, cmap=plt.cm.gray, vmax=vmax, vmin=vmin)
plt.title('Filtered')
plt.axis('off')
plt.subplot(1, 3, 3)
plt.imshow(im_orig-im_f, cmap=plt.cm.gray, vmax=vmax, vmin=vmin)
plt.title('Difference')
plt.axis('off')
plt.show()
# -
# # Captum
# get interp scores
attr_methods = ['IG', 'DeepLift', 'SHAP', 'CD', 'InputXGradient']
name = 'IG'
func = [IntegratedGradients, DeepLift, GradientShap, None, InputXGradient][0]
attributer = func(m_t)
class_num = 1
attributions = attributer.attribute((deepcopy(x_t_re)),target=class_num)
| ex_mnist/interpretation/interp_wavelet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/raqueeb/TensorFlow2/blob/master/embedding_bangla_v1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Mi3C6LhrrjKx" colab_type="text"
# ## এমবেডিং, ওয়ার্ড এমবেডিং এবং বাংলায় টেক্সট অ্যানালাইসিস
#
# + [markdown] id="G4TNlkGyLz4C" colab_type="text"
# একটা জিনিস ভেবে দেখেছেন কি? আমরা এ পর্যন্ত মেশিন লার্নিং মডেলের ইনপুট হিসেবে যা দিয়েছি তা সবই সংখ্যা। মনে করে দেখুন, এপর্যন্ত সব মডেলের ক্লাসিফিকেশন অথবা রিগ্রেশন এর জন্য যা দিয়েছি সব সংখ্যায় দিয়েছি। তার পাশাপাশি ইমেজ নিয়ে যখন কাজ করেছি তখনো কিন্তু ইমেজ (সেটা গ্রেস্কেল হোক আর কালার হোক - তার জন্য গ্রেস্কেল ইনটেনসিটি অথবা কালারের আরজিবি চ্যানেলের আউটপুট), সবকিছুই সংখ্যায় গিয়েছে। এর অর্থ হচ্ছে মেশিন লার্নিং/ডিপ লার্নিং মডেল সংখ্যা ছাড়া আর কিছু বোঝেনা। আর বুঝবেই বা কিভাবে? সেতো যন্ত্র। আর মানুষের সবকিছুই কমপ্লেক্স।
#
# সেদিক থেকে মানুষের ভাষা আরো অনেক কমপ্লেক্স। আমরা একেকজন একেক ভাষায় কথা বলি, ভাষাগুলোর মধ্যে সংযোগ/সিমিলারিটি এবং কি বলতে গিয়ে কি বলে ফেললাম এবং তার ফলাফল, তার পাশাপাশি অনেক শব্দ একটা ভাষায় যা বোঝায় সেটা অন্য ভাষায় তার বৈপরীত্য দেখায়। এখন আপনি বাংলায় কথা বললেও সেটার মধ্যে ৪০% বাইরের শব্দ ব্যবহার করলে তো আরো সমস্যা। আর এই কারণে টেক্সট নিয়ে কাজ করা বেশ কমপ্লেক্স।
#
# যেকোনো ল্যাঙ্গুয়েজে তার প্রতিটা শব্দের একটা অর্থ আছে। তবে এটার অর্থ অনেক সময় নির্ভর করে কনটেক্সটে বা শব্দটা বাক্যের মধ্যে কোথায় এই মুহূর্তে আছে। একই শব্দের আবার অনেকগুলো কনটেক্সচুয়াল অর্থ থাকে সে কারণে শব্দকে শুধুমাত্র শব্দ বা অক্ষর লেভেলে কাজ করলে হবে না। কারণ, ডিপ লার্নিং মডেল যেহেতু সংখ্যা ছাড়া কিছু বোঝেনা, সে কারণে একেক ভাষার একেক বুলি এবং বকাবকি এর পাশাপাশি সেই ভাষাগুলোকে ঠিকমতো সংখ্যায় ট্রান্সফার করা একটা চ্যালেঞ্জ এর কাজ অবশ্যই।
#
# ## কেন টাইম সিরিজ নিয়ে আলাপ হয়নি?
#
# এই বইতে আমি ইচ্ছে করে ‘টাইম সিরিজ’ যোগ করিনি, কারণ সেটার এপ্লিকেশন লেভেল এখনো বেসিক লেভেলে নেই। তবে, এই রিকারেন্ট নিউরাল নেটওয়ার্ককে শিখিয়ে দিলে সে (আরএনএন) ফ্রি ফর্ম (ইচ্ছেমতো) টেক্সট জেনারেট করতে পারে। আমরা যেমন দেখেছি ‘এল এস টি এম’, (লঙ শর্ট টার্ম মেমোরি) নেটওয়ার্কে ‘শেক্সপিয়ার’ ক্লাসিক পড়তে দিলে, সে শেক্সপিয়ারের মতো আরেকটা ক্লাসিক লিখে ফেলেছে, যেখানে এই নেটওয়ার্কের মধ্যে শব্দ, বাক্য এবং ব্যাকরণ তৈরির কোন ধারণা নেই। কারণ, ডিপ লার্নিং প্রচুর ক্লাসিক বই পড়ে বুঝেছে কিভাবে শব্দ, বাক্য বা তার ব্যাকরণ ব্যবহার করতে হয় এর ভেতরে না ঢুকেই। টাইম সিরিজের ব্যাপারটা হচ্ছে সে পরের জিনিসটা প্রেডিক্ট করবে। আগের সময়ে কি ছিলো, সেটাকে ধরে এরপরে কি কি আসতে পারে সেটাই বলবে সে। না বুঝে। যদি শব্দ লেভেলে দেখি, ‘আমি’ এর পর কি আসতে পারে তাহলে ‘ভালো’ আসতে পারে, কারণ ‘আমি ভালো আছি’ একটা বহুল প্রচলিত বাক্য।
#
# বড় ব্যাপার হচ্ছে এই অক্ষর থেকেই ‘এল এস টি এম’ আস্তে আস্তে মানবিক ব্যাকরন এবং কিভাবে একটা বাক্য তৈরি করতে হয় সে ধরনের একটা ধারণা পেয়ে থাকে। এটা সে কোন কিছু বুঝে করে না। প্যাটার্ন থেকে করে। আর সে কারণেই এটার উপরে আমরা খুব একটা ভরসা করব না। আমরা চাইব মেশিনকে শেখাতে, যেভাবে মানুষ ভাষা, শব্দ, ব্যাকরণ শেখে। ওই একই কারণে অক্ষর লেভেলে টেক্সট জেনারেশন নিয়ে আমরা এই মুহূর্তে আলাপ করব না। ইন্টারনেটে দেখতে পারেন কিভাবে একেকটা ‘এল এস টি এম’ নেটওয়ার্ক শেক্সপিয়ারের মত বড় বড় নাটক লিখে ফেলছে। এটা আসলে সে একটা ক্লাসিক্যাল লেখার প্যাটার্ন দেখে তার পার্সপেক্টিভ থেকে লিখেছে। বুঝে লেখেনি।
#
# ## ন্যাচারাল ল্যাঙ্গুয়েজ প্রসেসিং
#
# সেজন্য এর মধ্যে এসে যোগ হয়েছে ‘ন্যাচারাল ল্যাঙ্গুয়েজ প্রসেসিং’। ন্যাচারাল ল্যাঙ্গুয়েজ প্রসেসিং এর মধ্যে লিঙ্গুইস্টিকস, কম্পিউটার সায়েন্স, ইনফরমেশন ইঞ্জিনিয়ারিং এবং কৃত্রিম বুদ্ধিমত্তা ব্যাপারগুলো চলে এসেছে কাজের স্বার্থে। এমনিতেই মানুষ এবং যন্ত্রের মধ্যে একটা যোগসূত্র স্থাপন করা বেশ ঝামেলার ব্যাপার। ‘স্পিচ রিকগনিশন’ এর পাশাপাশি স্বয়ংক্রিয়ভাবে একটা ভাষা বুঝতে পারা এবং তার পাশাপাশি সেই ভাষায় বুঝে টেক্সট জেনারেট করা সহজ ব্যাপার নয়, যখন সবকিছুর পেছনে কাজ করে সংখ্যা। সবচেয়ে বড় কথা হচ্ছে আপনি একটা যন্ত্রকে শেখাচ্ছেন সে আপনার সাথে ন্যাচারালি যোগসুত্র স্থাপন করতে পারে। এই ‘ন্যাচারাল ল্যাঙ্গুয়েজ প্রসেসিং’ এর শুরুটা হচ্ছে সেই টেক্সটকে ঠিকমতো পড়তে পারা, সেটাকে নিজের মতো করে বোঝা এবং মানুষ যেভাবে একটা বাক্যের ‘ইনটেন্ট’ বুঝতে পারে সেভাবে তাকে বুঝিয়ে তার কাছ থেকে উত্তর বের করা। সহজ ব্যাপার নয়। তবে শুরু করতে হবে কোথাও।
#
# যেহেতু ‘ন্যাচারাল ল্যাঙ্গুয়েজ প্রসেসিং’ একটা বিশাল সাবজেক্ট, আমি এই বইতে ব্যাপারটা আনার ব্যর্থ চেষ্টা করব না। তবে কম্পিউটার কিভাবে ‘টেক্সট’ এর সাথে ‘ইন্টারঅ্যাক্ট’ করে সেটা নিয়েই দেখব আমরা সামনের চ্যাপ্টারগুলোতে। আমরা জানি মেশিন লার্নিং মডেল ভেক্টরকে ইনপুট হিসেবে নেয়। আর এই ভেক্টরগুলো হচ্ছে সংখ্যার অ্যারে। যখন আমরা টেক্সট মানে শব্দ এবং বাক্য নিয়ে আলাপ করব, তখন আমাদের প্রথম কাজ হবে এই স্ট্রিংগুলোকে কিভাবে সংখ্যায় পাল্টানো যায়। অর্থাৎ সেই বাক্য বা শব্দটি কিভাবে ‘ভেক্টরাইজ’ করা যায় মডেলে দেবার আগে। আমাদের এই চ্যাপ্টার থেকে শুরু করব কিভাবে আস্তে আস্তে ডিপ নিউরাল নেটওয়ার্ক দিয়ে বাংলার একটা অ্যাপ্লিকেশনে যাওয়া যায়।
#
# ## ওয়ার্ড ভেক্টর কি?
#
# কম্পিউটার সাইন্সে ভেক্টর মানে হচ্ছে সংখ্যা দিয়ে একটা সারি। এমনটা দেখতে। <img src="https://raw.githubusercontent.com/raqueeb/deep_learning_book/master/assets/vector32.png"> চিত্রঃ একটা ভেক্টর, ইংরেজি সংখ্যায় লেখা
#
# প্রোগ্রামিংয়ে ভেক্টরের ইনডেক্স পজিশন শুরু হয় ০ দিয়ে। আমাদের ভেক্টরে পজিশন ০ এর ভ্যালু হচ্ছে ২৩, পজিশন ১ এর ভ্যালু ১৮, এভাবেই চলতে থাকে।
#
# ন্যাচারাল ল্যাঙ্গুয়েজ প্রসেসিং এ ওয়ার্ল্ড ভেক্টর খুবই জনপ্রিয় কারণ এই ব্যাপারটা মডেলকে শেখায় কোথায় কোন শব্দটা পাওয়া যেতে পারে ‘কন্টেক্সট’ এর ওপর ভিত্তি করে। এনএলপি মডেলে কন্টেক্সট যোগ করলে এর অ্যাকুরেসি বেড়ে যায় অনেকখানি। একটা ওয়ার্ড ভেক্টরের ভ্যালুগুলো হচ্ছে তার পজিশন, যা সাধারণত আমরা ৩০০ ডাইমেনশন স্পেসে কাজ করি। যেহেতু ডাইমেনশন বড় সেকারণে ওয়ার্ড ভেক্টরগুলোর মধ্যে অংকের সাহায্যে এর কাছাকাছি সিমান্টিক্যালি ম্যাথমেটিক্যাল অপারেশন করা যেতে পারে। বলেন কি? শব্দ দিয়ে যোগ বিয়োগ করা? সেটাও সম্ভব। দেখবো সামনে।
#
# আমরা যেহেতু মেশিন লার্নিং মডেলে বিভিন্ন ক্লাস অথবা কন্টিনিউয়াস ভ্যালু প্রেডিক্ট করতে পারি, সেখানে আমরা সিমান্টিক্যালি কোরিলেটেড কন্টেক্সট ডাটাসেট থেকে নিতে পারব না কেন?
#
#
# + id="SaDQqrJg7p68" colab_type="code" outputId="13a064eb-c89e-4644-a5d4-9789c1f0511d" colab={"base_uri": "https://localhost:8080/", "height": 34}
try:
# শুধুমাত্র টেন্সর-ফ্লো ২.x ব্যবহার করবো
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
keras = tf.keras
# + [markdown] id="V0un3M2_MnHe" colab_type="text"
# আমরা যেহেতু ডিপ নিউরাল নেটওয়ার্কের কথা বলছি সেখানে সবকিছুতেই লেয়ার কনসেপ্ট কাজ করে। আজকে এখানে আমরা নতুন একটা লেয়ার নিয়ে কথা বলবো যেটাকে আমরা বলছি এম্বেডিং লেয়ার। শুরুতেই অনেকে বলবেন এম্বেডিং মানে কি? এর সাথে সংখ্যার সম্পর্ক কি?
# + id="E4bim_ShMw8x" colab_type="code" colab={}
from tensorflow.keras import layers
embedding_layer = layers.Embedding(1000, 5)
# + id="eRmBcA6iMx71" colab_type="code" outputId="f9705e19-3b1e-4671-95d0-8d00fbb22324" colab={"base_uri": "https://localhost:8080/", "height": 85}
# তিনটা সংখ্যা পাঠালাম, দেখি কি করে?
# সংখ্যায় ওয়েট
result = embedding_layer(tf.constant([1,2,3]))
result.numpy()
# + id="MYLOnO5Uxmmr" colab_type="code" colab={}
# অথবা টেন্সর-ফ্লো দিয়ে টোকেনাইজ করে দেখি
from tensorflow.keras.preprocessing.text import Tokenizer
# + id="8mezzU-TxprR" colab_type="code" colab={}
tokenizer = Tokenizer()
text = ['আমি এখন বই পড়ি', 'এটি আমার অনেক পছন্দের একটা বই']
tokenizer.fit_on_texts(text)
sequences = tokenizer.texts_to_sequences(text)
# + id="MVVdIpcqxrxB" colab_type="code" outputId="626f1224-b784-4e1c-ff5a-a944ac2a5a51" colab={"base_uri": "https://localhost:8080/", "height": 34}
sequences
# + [markdown] id="XZxexs73hmRo" colab_type="text"
# ইউনিকোড স্ট্রিং দুভাবে টেন্সর-ফ্লোতে দেখানো যায়। স্ট্রিং স্কেলারে কোড পয়েন্টের সিকোয়েন্সকে এনকোড করবে জানা ক্যারেক্টার এনকোডিং দিয়ে। আমরা ভেক্টর নিয়ে দেখালে। এখানে প্রতিটা পজিশনে একটা সিঙ্গেল কোড পয়েন্ট আছে।
#
# এখানে কয়েকটা উদাহরণ দেখালাম, তবে মাথা খারাপ করার দরকার নেই।
# + id="yXxHWH-We9Vg" colab_type="code" outputId="ff33eb15-9f6b-46cf-db1e-8fe45e7d7076" colab={"base_uri": "https://localhost:8080/", "height": 68}
text_chars = tf.constant([ord(char) for char in u"আমি এখন বই পড়ি"])
text_chars
# + [markdown] id="2dxKTGEZkRwZ" colab_type="text"
# যা দেখছেন, ইউনিকোড স্ট্রিং, দেখাচ্ছে ইউনিকোড কোড পয়েন্ট ভেক্টরে। ব্যাচে দেখি। সব সংখ্যার খেলা।
# + id="AutcJ9tfe9Sh" colab_type="code" outputId="9410ab79-d851-45fc-cb69-1ae2b86fd6b9" colab={"base_uri": "https://localhost:8080/", "height": 85}
batch_utf8 = [s.encode('UTF-8') for s in
[u'প্রিয় বন্ধু', u'কেমন চলছে তোমার?', u'আমি ভালো', u'😊']]
batch_chars_ragged = tf.strings.unicode_decode(batch_utf8,
input_encoding='UTF-8')
for sentence_chars in batch_chars_ragged.to_list():
print(sentence_chars)
# + id="89HMTwJagkYu" colab_type="code" outputId="52682e13-9c10-40a6-de78-9c71d1c4e027" colab={"base_uri": "https://localhost:8080/", "height": 153}
batch_chars_padded = batch_chars_ragged.to_tensor(default_value=-1)
print(batch_chars_padded.numpy())
# + [markdown] id="tuzAjhepliiv" colab_type="text"
# মাল্টি ডাইমেনশনে দেখি।
# + id="CY5U_UCMg8MS" colab_type="code" outputId="68ecf30e-2b61-484f-e471-9150e04f41c3" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(tf.strings.unicode_script(batch_chars_ragged))
# + [markdown] id="YHqel-xSNGyu" colab_type="text"
# ## ‘এম্বেডিং’ সিমিলারিটি, একটা থেকে আরেকটা শব্দের দূরত্ব
#
# মেশিন লার্নিং এর ভাষায় ‘এম্বেডিং’ হচ্ছে সিমিলারিটি, যদি শব্দের কথা বলি তাহলে একটা শব্দ থেকে আরেকটা শব্দ কতটুকু যুক্ত বা একটা শব্দ থেকে আরেকটা শব্দ কত দূরে? তাদের মধ্যে সম্পর্ক আছে কিনা? যেমন, রাজা’র সাথে ‘রানী’ শব্দটা কিন্তু সম্পর্কযুক্ত। যেমন, ‘মা’ শব্দের সাথে ‘বাবা’ সম্পর্কযুক্ত। ‘বাংলাদেশ’ শব্দের সাথে ‘ঢাকা’ সম্পর্কযুক্ত। সেই থেকে ‘আকাশ’ শব্দের সাথে ‘টেবিল’ কিন্তু বহু দূরে মানে তাদের মধ্যে সম্পর্ক টানা বেশ কঠিন, যদি না কোনদিন ‘আকাশ’ থেকে ‘টেবিল’ পড়ে।
#
# এই এম্বেডিং ব্যাপারটা এসেছে কিছুটা ‘ওয়ান হট’ এনকোডিং থেকে। তবে, ব্যাপারটা ঠিক সেরকম নয়। শব্দকে যেহেতু আমাদেরকে সংখ্যায় পাল্টাতে হবে তাহলে আর বাকি উপায় কি? মনে আছে, আমাদের এই ‘ওয়ান হট’ এনকোডিং বা ‘ডামি ভেরিয়েবল’ ব্যাপারটা এসেছে শব্দ দিয়ে তৈরি ক্যাটাগরিকাল ভেরিয়েবল থেকে? মেশিন লার্নিং মডেলে আমরা যখন কিছু শব্দকে সংখ্যার ক্যাটাগরিতে ভাগ করতে চাই, যেটা এমুহুর্তে সংখ্যায় নেই। যেমন, আইরিশ ডাটাসেটের তিন প্রজাতির ফুলের জন্য ০,১,২, ভাগে ভাগ করতে পারছিনা, সেখানে চলে এসছে এই ডামি ভেরিয়েবল। আমরা যাকে বলছি শব্দকে দিয়ে তার জন্য একটা করে এনকোডিং। এর অর্থ হচ্ছে, আমাদের ভাষায় যতগুলো শব্দ আছে সেগুলোকে ‘ওয়ান হট’ এনকোডিং করা যেতে পারে। আর সেখানেই সমস্যা।
#
# <img src="https://raw.githubusercontent.com/raqueeb/deep_learning_book/master/assets/hot.png"> চিত্রঃ ‘ওয়ান হট’ এনকোডিং এর উদাহরণ
#
# এখানে একটা উদাহরণ দেয়া যাক। একটা বাক্য। ‘আমি এখন বই পড়ি’। এই চারটা শব্দকে যদি আমরা সংখ্যায় পাল্টাতে চাই, তাহলে আমাদের এই ভোকাবুলারির প্রতিটা শব্দকে ‘০’ ভেক্টর দিয়ে শুরু করব। আমাদের এখানে যেহেতু ৪টা ইউনিক শব্দ, সে কারণে এই শূন্য ভেক্টরের দৈর্ঘ্য হবে ৪। প্রতিটি শব্দকে ঠিকমতো রিপ্রেজেন্ট করার জন্য একেকটা শব্দের ইনডেক্সে তার করেসপন্ডিং ‘১’ বসাবো। ছবি দেখি। এখন এই টেবিল থেকে প্রতিটা শব্দের জন্য তার ভেক্টর বের করা সোজা। আমাদের এই চারটা শব্দের জন্য ভেক্টরের দৈর্ঘ্য হচ্ছে ৪ যার বাকি তিনটাই ০। এভাবে আমরা শব্দকে বিভিন্ন ক্যাটাগরিতে ভাগ করতে পারি। তবে ব্যাপারটা সেরকম এফিশিয়েন্ট নয় যখন আমাদের ভোকাবুলারিতে ১০ হাজার শব্দ থাকবে। তার মানে একেকটা ভেক্টরের দৈর্ঘ্য ১০ হাজার হবে - এর মধ্যে ৯৯.৯৯ শতাংশই হচ্ছে ০। এটা দক্ষ সিস্টেম না। আমরা যদি প্রতিটা বাংলা শব্দের জন্য ডামি ভ্যারিয়েবল বানাই, তাহলে কতো বড় ডামি ভ্যারিয়েবলের একেকটা টেবিল হবে?
#
# এই সমস্যা থেকে বের হবার উপায় কি? প্রতিটা শব্দকে একটা করে ইউনিক সংখ্যা দিয়ে দেওয়া। আমাদের আগের ভোকাবুলারিটাকে ‘আমি এখন বই পড়ি’কে আমরা একটা ‘ডেন্স’ ভেক্টরের মত আলাদা আলাদা করে সংখ্যা দিয়ে দিতে পারি। এই জিনিসটা আগের থেকে অনেকটাই এফিশিয়েন্ট। একেকটা শব্দের জন্য একেকটা আলাদা আলাদা সংখ্যা। সবচেয়ে বড় ব্যাপার হচ্ছে বিশাল ‘স্পার্স’ হাই-ডাইমেনশন স্পেস থেকে কম ডাইমেনশন স্পেসে চলে এলাম। তবে, এটার সমস্যা দুটো।
#
# * ১. আমাদের এই সংখ্যায় এনকোডিং সিস্টেমটাকে ‘ম্যানুয়ালি’ করা হয়েছে। ফলে এদের মধ্যে কোন ‘রিলেশনশিপ’ বের করা যাচ্ছে না। অংকেও এক সংখ্যা থেকে আরেক সংখ্যার মধ্যে যে রিলেশনশিপ সেটা অনুপস্থিত। ফলে আমাদের অংকের ভাষায় কো-সাইন ভেক্টরে কে কোথায় আছে সেটা বের করা মুশকিল।
#
# * ২. একটা মডেলের জন্য এই ধরনের ‘ম্যানুয়াল’ এনকোডিং শব্দগুলোর মধ্যে সম্পর্ক না বুঝলে সেটা সমস্যা হয়ে দাঁড়াবে। যেহেতু এই একেকটা ফিচারের জন্য একেকটা ‘ওয়েট’ সে কারণে একটা লিনিয়ার ক্লাসিফায়ারের পক্ষে এই ফিচার এবং ওয়েট এর কম্বিনেশন কোন সম্পর্ক দেখাবেনা। এটা বড় সমস্যা।
#
# আর সে কারণেই চলে এসেছে ওয়ার্ড এম্বেডিং।
# + [markdown] id="awPlT5SRBH2G" colab_type="text"
# ## একটা ছোট্ট উদাহরন, রেস্টুরেন্ট রিভিউ
#
# আমরা এখানে একটা এমবেডিং লেয়ারের উদাহরণ নিয়ে আসি। ইনপুট ডাইমেনশনে কতগুলো ভোকাবুলারি আছে? ধরে নিচ্ছি ৫০টা। আমি সংখ্যা না গুনেই বলছি। ৫০টা ডামি ভ্যারিয়েবল। মানে আমরা কতগুলো ক্যাটেগরিতে এনকোড করছি। আমাদের লুক আপ টেবিলে কতগুলো আইটেম আছে সেটাই এখানে আসবে।
#
# আমাদের ৫০টা ভোকাবুলারির জন্য 'ওয়ান হট' এনকোডিং ব্যবহার করছি, কেরাসের ভেতরে। এটা ব্যবহার করা ঠিক না তবে ছোটখাট উদাহরণে সেটা করা যায়।
# + id="MEOvo2pjrjKy" colab_type="code" colab={}
from numpy import array
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Embedding, Dense
# + [markdown] id="aS7ZaxjhMJ0w" colab_type="text"
# আমরা এখানে ছোট একটা দশটা রেস্টুরেন্টের রিভিউ যোগ করেছি। এর পাশাপাশি তার পাঁচটা নেগেটিভ আর পাঁচটা পজেটিভ লেবেল যোগ করেছি। এর উপর আমরা নিউরাল নেটওয়ার্কে ট্রেইন করব।
# + id="9Ecoq69LrjK0" colab_type="code" colab={}
# ১০টা েস্টুরেন্ট রিভিউএর ইনপুট
reviews = [
'আমি আর আসছি না এখানে!',
'একদম বাজে সার্ভিস',
'কথা শোনে না ওয়েটার',
'একদম ঠান্ডা খাবার',
'বাজে খাবার!',
'অসাধারণ',
'অসাধারণ সার্ভিস!',
'খুব ভালো!',
'মোটামুটি',
'এর থেকে ভালো হয়না']
# লেবেল বলে দিচ্ছি (1=নেগেটিভ, 0=পজেটিভ)
labels = array([1,1,1,1,1,0,0,0,0,0])
# + id="-CxmAlaErjK3" colab_type="code" outputId="d5a4adf4-2198-49ed-b8eb-7d97352e3477" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(reviews[0])
# + [markdown] id="59EA_3KSKOos" colab_type="text"
# শব্দের সংখ্যা না গুনেও একটা ধারনা নিয়েছি ৫০টা মানে ভোকাবুলারি সাইজ ৫০। ছোট্ট উদাহরণ বলে আমরা ‘ওয়ান হট এনকোডিং’ ব্যবহার করছি। এর কাজ হচ্ছে এই বাক্যগুলোকে শব্দে ভেঙে ফেলা। যাকে আমরা বলছি টোকেনাইজিং। এরপর তাকে নিজস্ব ইনডেক্সে পাঠাবে। শেষে আমরা যা পাব সেটা সংখ্যার সিকোয়েন্স।
# + id="JaZk5g6PrjK5" colab_type="code" outputId="b1236422-a7a6-4166-b078-4288d47297ee" colab={"base_uri": "https://localhost:8080/", "height": 34}
# ওয়ান হট এনকোডিং
VOCAB_SIZE = 50
encoded_reviews = [one_hot(d, VOCAB_SIZE) for d in reviews]
print(f"Encoded reviews: {encoded_reviews}")
# + [markdown] id="Ok0ds1g-LOOl" colab_type="text"
# যতগুলো শব্দ ততগুলো করে সংখ্যা একেকটা অংশে। তবে এই সিকোয়েন্সে একটা কনসিস্টেন্সি রাখার জন্য একটা লেনথ ঠিক করে দিতে হবে। শব্দ ধরে এখন কোনটা ২ অথবা ৩ এবং ৪ লেনথে, কিন্তু প্যাডিং এর জন্য আমরা MAX_LENGTH = 4 ধরে দিচ্ছি। দেখুন এখানে বাকি অংশ ০ দিয়ে ভরে দিয়েছে।
# + id="xFLBnh2erjK8" colab_type="code" outputId="2bbaf82a-bf12-48db-e1b9-b4c10f3fbf12" colab={"base_uri": "https://localhost:8080/", "height": 187}
MAX_LENGTH = 4
padded_reviews = pad_sequences(encoded_reviews, maxlen=MAX_LENGTH, padding='post')
print(padded_reviews)
# + [markdown] id="-10aiXydLsei" colab_type="text"
# আমরা আগের মতো সিকোয়েন্সিয়াল নেটওয়ার্ক তৈরি করছি। আউটপুট ডাইমেনশনে কমিয়ে এনেছি সেটা। এখানে একটা ডেন্স লেয়ার, তার মানে একটা ওয়েট ম্যাট্রিক্স, একটা লার্নিং চলছে এখানে। এই এমবেডিং লেয়ারে যে লার্নিং চলছে সেটা শব্দগুলোকে একটা ইউক্লুডিয়ান স্পেসে সংখ্যাগুলোর মধ্যে একটা রিলেশনশিপ তৈরি হচ্ছে।
#
# মডেলটা দেখুন।
# + id="sBM4Eu7OrjLJ" colab_type="code" outputId="7fbcd3f1-2741-4830-9054-34bed275f5e6" colab={"base_uri": "https://localhost:8080/", "height": 272}
model = Sequential()
embedding_layer = Embedding(VOCAB_SIZE, 8, input_length=MAX_LENGTH)
model.add(embedding_layer)
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
print(model.summary())
# + id="MVDOD7q7rjLO" colab_type="code" outputId="b4fe0f82-e993-4eb2-ed2f-14ff08284f61" colab={"base_uri": "https://localhost:8080/", "height": 34}
# মডেলকে ট্রেইন করি
model.fit(padded_reviews, labels, epochs=100, verbose=0)
# + [markdown] id="zQpHhhrbOhdM" colab_type="text"
# ১০০ ইপকের পর ভালো করে লক্ষ্য করুন, প্রতিটা লাইন হচ্ছে একেকটা শব্দের এমবেডিং। এখানে ওয়েটগুলোর অর্থ এখন খুঁজতে যাবো না এমুহুর্তে।
# + id="hnaII1xnrjLS" colab_type="code" outputId="b5ad3ade-7909-4835-f0db-da60954d1f5b" colab={"base_uri": "https://localhost:8080/", "height": 1000}
print(embedding_layer.get_weights()[0].shape)
print(embedding_layer.get_weights())
# + [markdown] id="tQAiapl6OdFO" colab_type="text"
# অ্যাক্যুরেসি কেন ১ এসেছে? কারণ, দশটা বাক্যের মধ্যে সবগুলোই ইউনিক। একটার সাথে আরেকটার মিল নেই। আর, এতো ছোট উদাহরণে আর কি হবে?
# + id="dPvlpHsNrjLU" colab_type="code" outputId="69efabf8-e96f-4287-bc62-7f0f60ffd99f" colab={"base_uri": "https://localhost:8080/", "height": 34}
loss, accuracy = model.evaluate(padded_reviews, labels, verbose=0)
print(f'Accuracy: {accuracy}')
# + [markdown] id="RVYFsXuIbHK0" colab_type="text"
# আরেকটা উদাহরণ দেখি। এসেছে স্ট্যাকওভারফ্লো থেকে। আমরা যা আলাপ করেছি সেখানে ওয়ান হট এনকোডিং একদম চোখে দেখা হয়নি। খালি চোখে হট এনকোডিং এর ভেতরে দেখতে চাই।
# + id="plzrmT4xbUrL" colab_type="code" colab={}
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Dense, LSTM
import numpy as np
# + [markdown] id="TGF-jsBsbYE2" colab_type="text"
# এখানেও আমরা একটা বাক্যকে ক্লাসিফাই করবো। 'ব্যাগ অফ ওয়ার্ড' (একটা ব্যাগে যতো শব্দ আছে) হচ্ছে একটা মেকানিজম যার মাধ্যমে টেক্সট থেকে ফিচার বের করা যায়। এটা আসলে আমাদের এই তিনটা উদাহরন থেকে যতো ইউনিক শব্দ আছে সেটা থেকে সে ট্রেনিং নেয়। তবে, আমরা তার থেকেও ভালো মডেল মানে সিকোয়েন্সের ওপর জোর দেবো।
# + id="K9yE6vy9cAtl" colab_type="code" outputId="eaecc23f-4b1b-4d41-8795-05afada4277a" colab={"base_uri": "https://localhost:8080/", "height": 34}
num_classes = 5
max_words = 20
sentences = ['আমি আর আসছি না এখানে!',
'কথা শোনে না ওয়েটার',
'একদম ঠান্ডা খাবার']
# লেবেল তৈরি করি
labels = np.random.randint(0, num_classes, 3)
y = to_categorical(labels, num_classes=num_classes)
words = set(w for sent in sentences for w in sent.split())
word_map = {w : i+1 for (i, w) in enumerate(words)}
sent_ints = [[word_map[w] for w in sent.split()] for sent in sentences]
vocab_size = len(words)
print(vocab_size)
# + [markdown] id="PI3TxDorl5Hw" colab_type="text"
# বাক্যের শব্দের সিকোয়েন্স, ৫, ৪, ৩
# + id="O1u40lNjlztt" colab_type="code" outputId="0948b5b4-1249-4a9d-f13e-8e71e1ac27cb" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(sent_ints)
# + [markdown] id="sFHbTsYHiisK" colab_type="text"
# আমাদেরকে প্যাড করতে হবে max_words লেনথ ধরে, যা পাবো len(words) এবং ১ যোগ করে। +১ মানে হচ্ছে আমরা ০কে রিজার্ভ রাখবো যাতে সেটার বাইরে না যায়। এখানে ওয়ান হট এনকোডিং এবং প্যাডিং করছি -
# + id="Cx50nUsufCfN" colab_type="code" outputId="a4bf0414-4a11-47c3-8779-7129a8fdec34" colab={"base_uri": "https://localhost:8080/", "height": 34}
# ট্রেনিং ডেটার X এর হিসেব
X = np.array([to_categorical((pad_sequences((sent,),
max_words)).reshape(20,),vocab_size + 1) for sent in sent_ints])
print(X.shape)
# + [markdown] id="qi9UPNAlna11" colab_type="text"
# এখন দেখি আমাদের ওয়ান হট এনকোডিং এর অবস্থা? হাজারো ০।
# + id="LNGqytPrnOtT" colab_type="code" outputId="26cb07ea-1b96-4049-d583-53cd0856acc2" colab={"base_uri": "https://localhost:8080/", "height": 1000}
print(X)
# + id="ltNmRAeWp-nf" colab_type="code" outputId="db8ca373-d36b-434d-f126-bc0bc39b7d38" colab={"base_uri": "https://localhost:8080/", "height": 68}
# y দেখি
print(y)
# + [markdown] id="mtHeqlNVj7Oy" colab_type="text"
# মডেলে ডেন্স লেয়ার যোগ করছি যাতে ওয়ান হট শব্দগুলোকে ডেন্স ভেক্টরে পাল্টে নেয়া যায়। তো LSTM এর কাজ কি? আমাদের বাক্যের ভেতরে শব্দের ভেক্টরকে কনভার্ট করতে হবে ডেন্স বাক্য ভেক্টরে। একদম শেষ লাইনে সফটম্যাক্স এক্টিভেশন ফাংশন ব্যবহার করছি যাতে ক্লাসের ওপর প্রবাবিলিটি ডিস্ট্রিবিউশন চালাতে পারে।
# + id="JJgHhxWYfkFr" colab_type="code" colab={}
model = Sequential()
model.add(Dense(512, input_shape=(max_words, vocab_size + 1)))
model.add(LSTM(128))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# + id="wwB1YH6If5dw" colab_type="code" outputId="105bfa87-c964-4b6b-9b65-85b1c41d2655" colab={"base_uri": "https://localhost:8080/", "height": 68}
model.fit(X,y)
# + [markdown] id="9bXyPHd6Ntlo" colab_type="text"
# ## শব্দ এবং সংখ্যার কাছাকাছি এনকোডিং
#
# শব্দ যখন সংখ্যা হবে, তখন শব্দের মধ্যে যেরকম সম্পর্ক ছিলো, সেটা আরো ভালো বোঝা যাবে সংখ্যায়। শব্দে "আমি" "তুমি" যেমন কাছাকাছি, সংখ্যায় সেটা আরো পরিস্কার হবে। এমবেডিং এ এটা এমন ধরনের ‘ডেন্স’ রিপ্রেজেন্টেশন যার মধ্যে একই ধরনের শব্দের একই বা কাছাকাছি ধরনের এনকোডিং হবে। ‘পুরুষ’ এবং ‘মহিলা’ এই দুটো অক্ষরের মধ্যে আকাশ পাতাল পার্থক্য হলেও সম্পর্কের কারণে এদুটো কাছাকাছি থাকবে। সংখ্যার এনকোডিং ও একই ধরনের হবে। সবচেয়ে বড় কথা হচ্ছে এ ধরনের এনকোডিং ম্যানুয়ালি বা আমাদের হাতে লিখে দিতে হবে না। বরং যেহেতু এগুলো ট্রেনিংযোগ্য প্যারামিটার ফলে মডেলের ট্রেনিং এর সময় ওয়েটগুলো ‘এডজাস্ট’ হবে এদের সম্পর্কের ভিত্তিতে। একটা ছোট ডাটাসেটের ওয়ার্ড এম্বেডিং ৮ ডাইমেনশনাল হলেও সেটা বড় ডাটাসেটের জন্য ১০২৪ ডাইমেনশন পর্যন্ত যেতে পারে। যত বেশি ডাইমেনশনাল এম্বেডিং ততবেশি শব্দগুলোর মধ্যে সম্পর্কগুলোকে আরো ভালোভাবে বোঝা যাবে তবে তার জন্য প্রচুর ডাটা লাগবে শিখতে।
#
# ## এমবেডিং এর প্রি-ট্রেইনড মডেল
#
# আমরা যখন ‘ওয়ান হট’ এনকোডিং উদাহরণ দেখছিলাম, সেখানে প্রতিটা শব্দ একটা ৪ ডাইমেনশনাল ফ্লোটিং পয়েন্ট সংখ্যা ভেক্টর দিয়ে রিপ্রেজেন্ট করা হয়েছিল। আমরা এ ধরনের এম্বেডিংকে বলতে পারি ‘লুকআপ’ টেবিল। এই ‘লুকআপ’ টেবিলের ওয়েটগুলো শিখছে যখন আমরা ‘ডেন্স’ ভেক্টরের তার করেসপন্ডিং টেবিল দেখছি। ফলে সেটা ধরে আমরা প্রতিটা শব্দ ধরে এনকোডিং করছি। তবে, এখানে একটা বড় সমস্যা আছে। আমরা যখন এনকোডিং করবো তখন সব বাংলা শব্দকে একসাথে এনকোডিং না করলে আমরা কিভাবে একটা শব্দকে আরেকটা শব্দের সাথে সিমিলারিটি, শব্দগুলোর মধ্যে দূরত্ব বের করবো? তাদের মধ্যে সিমান্টিক এনালিসিস কে করবে? সব ডাটা একসাথে না হলে আমাদের কাজ হবে না। সে সমস্যা মেটাতে এসেছে প্রি-ট্রেইনড মডেল। কোটি কোটি শব্দকে একসাথে ট্রেইন করিয়ে তারপর এনকোডিং করেছে একসাথে। নিজে বানানো কঠিন। সেটার জন্য যে রিসোর্স দরকার সেটা নেই আমাদের কাছে। ফলে এধরনের প্রি-ট্রেইনড মডেলগুলো অসাধারণ পারদর্শী, কারণ সে ট্রেনিং এর সময় সব শব্দ হাতের কাছে পেয়েছে। শুধু নামিয়ে নিতে হবে দরকারের সময়ে। তাদের শক্তি এবং দুর্বলতা নিয়ে আলাপ করবো না এই বইয়ে।
#
# এই প্রি-ট্রেইনড মডেল হিসেবে আমার প্রিয় ফেইসবুকের ফাস্টটেক্সট। ইংরেজি ছাড়াও আরো ১৫৭টা ভাষায় এর প্রি-ট্রেইনড মডেল আছে। বাংলা তো অবশ্যই। পুরোনো লাইব্রেরি হিসেবে ওয়ার্ড২ভেক এখনো জনপ্রিয়। পাশাপাশি পাইথনের জন্য স্পেসি আমার পছন্দের।
# + [markdown] id="YPhkHnJ2Lm4o" colab_type="text"
# ধারণাগুলো এসেছে টেন্সর-ফ্লো ডকুমেন্ট, জেফ হিটনের নোটবুক এবং স্ট্যাকওভারফ্লো থেকে।
#
# https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_03_embedding.ipynb
| embedding_bangla_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/PhilipeRLeal/earthengine-api/blob/master/eeus18_tf_part2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="slnEG77OfZOd" colab_type="text"
# # Earth Engine Object Detection
# In this notebook, we'll develop a model to detect cars in 15cm aerial imagery.
#
# ## Part 1: Creating a Model
#
# Lets start by importing TensorFlow and the Colab auth library for communication with Google Cloud Storage.
# + id="aaUeCSXUAjwO" colab_type="code" colab={}
import tensorflow as tf
from google.colab import auth
auth.authenticate_user()
# + [markdown] id="V-yyJA08grF3" colab_type="text"
# Now we'll need to generate training / evaluation data. We'll start by hand annotating the outlines of cars in a roughly 1km^2 region of Mountain View, CA. [We can do this using the geometry editor](https://code.earthengine.google.com/1b573c8d1b3b4bcb9e972eb8994abc4f) in the Earth Engine Code Editor. We can use this annotated data to create a vector mask of cars/non-cars.
#
# With the car mask, [we'll generate training and evaluation FeatureCollections and export them to cloud.](https://code.earthengine.google.com/c84a1d9e610ec91044c82766e53fe48a)
# + [markdown] id="1yG71AN_WFQw" colab_type="text"
# Lets create a dataset reader in TensorFlow for training/eval data.
# + id="kVd8XwjrCSuT" colab_type="code" colab={}
# Our input function will return 4 features, each a 'side' x 'side' tensor
# representing the area centered on a pixel with the label 'class'
def input_fn(fileNames, numEpochs=None, shuffle=True, batchSize=100, side=61):
ds = tf.data.TFRecordDataset(fileNames, compression_type='GZIP')
feature_columns = {
'R': tf.FixedLenFeature([side, side], dtype=tf.float32),
'G': tf.FixedLenFeature([side, side], dtype=tf.float32),
'B': tf.FixedLenFeature([side, side], dtype=tf.float32),
'L': tf.FixedLenFeature([side, side], dtype=tf.float32),
'class': tf.FixedLenFeature([1, 1], dtype=tf.float32)
}
def parse(example_proto):
parsed_features = tf.parse_single_example(example_proto, feature_columns)
# Separate the class labels from the training features
labels = parsed_features.pop('class')
# For faster training / stability, we'll bring our [0, 255] RGBL values into
# the range [0, 1]
parsed_features = {
k:tf.divide(v, 255.0) for (k,v) in parsed_features.items()}
return parsed_features, labels
ds = ds.map(parse, num_parallel_calls=5)
if shuffle:
# We choose 30 since, with a batch size of 100, we'll keep 3000 (the size
# of the training data) examples in memory for the shuffle
ds = ds.shuffle(buffer_size=batchSize * 30)
ds = ds.batch(batchSize).repeat(numEpochs)
iterator = ds.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
# + [markdown] id="sKIpdpAvXWIx" colab_type="text"
# Its time to create a model. We'll build a [Fully Convolutional NN](https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf) so that we can train our model on 61x61 patches, and later apply it to much larger areas for prediction. Note, using a FCNN allows us to make predictions on image data of any dimensions.
# + id="PDTrkrmySXe7" colab_type="code" colab={}
# A helper function for defining a convolutional layer. We use batch
# normalization to speed up training given our limited training data, therefore
# we can't use vanilla conv2d(activation='relu', ...)
def conv_layer(inputs, filters, kernel_size, training):
# Note that the default padding scheme is VALID.
conv = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
data_format='channels_last')
norm = tf.layers.batch_normalization(inputs=conv, training=training)
return tf.nn.relu(norm)
# Our model will combine convolutions of the full patch on the luminance
# channel with convolutions of the RGB channels on a smaller region of the
# patch. The model will finally scale the predicted 2D region to match the size
# of the input features minus the kernel contributions to the edges.
def fcnn(feat, mode):
training = mode == tf.estimator.ModeKeys.TRAIN
# interleave the red, green, and blue channels so that a batch is along axis=0
rgb = tf.stack([feat['R'], feat['G'], feat['B']], axis=1)
# Strip a 15 pixel border from the rgb channels. We'll only use the larger
# area to provide context to the foveated rgb region.
rgb = rgb[:, :, 15:-15, 15:-15]
# Convert from NCHW to NHWC
rgb = tf.transpose(rgb, [0, 2, 3, 1])
# Add a dimension for 'channel' to make this tensor 4D
l = tf.expand_dims(feat['L'], 3)
# We'll get the size of the original source pixels from l minus the "kernel"
# surrounding each pixel. We choose to build the meat of our CNN around this
# reduced region to reduce the model size, training time, etc...
original_dims=tf.add(tf.shape(l)[1:3], -60)
# Perform 5 convolutions in a row, reducing the information in the luminance
# channel to a 25x25 region per-pixel.
for i in range(6):
l = conv_layer(inputs=l, filters=3 + i, kernel_size=7, training=training)
rgb = conv_layer(inputs=rgb, filters=8, kernel_size=7, training=training)
# Combine rgb and l to form a 4D tensor with 16 filters
rgbl = tf.concat([rgb, l], 3)
comb1 = tf.layers.max_pooling2d(
inputs=rgbl,
pool_size=3,
strides=2,
data_format='channels_last')
comb2 = conv_layer(inputs=comb1, filters=32, kernel_size=5, training=training)
comb2 = tf.layers.max_pooling2d(
inputs=comb2,
pool_size=3,
strides=2,
data_format='channels_last')
comb3 = conv_layer(inputs=comb2, filters=64, kernel_size=3, training=training)
# We stay convolutional by substituting a conv op for a dense layer, and
# keeping the kernel size 1x1.
dense = conv_layer(
inputs=comb3,
filters=64,
kernel_size=1,
training=training)
dropout = tf.layers.dropout(
inputs=dense,
rate=0.4,
training=training)
# The final layer is just linear activiation; we use the same trick we did
# with the previous conv layer to produce a single classification.
dense_final = tf.layers.conv2d(
inputs=dropout,
filters=1,
kernel_size=1,
data_format='channels_last')
# Squash all predictions into the range (0, 1)
probs = tf.multiply(tf.add(tf.tanh(dense_final), 1.0), 0.5)
# We won't bother adding the resize op to the graph unless we're running
# predictions.
#
# In a more mature model, we might use a "deconvolution" here by 4x followed
# by a slight resize to recover a finer amount of detail. Training this way
# would require larger (in area) training samples so we could give the
# transposed convolution op something to learn from.
if mode == tf.estimator.ModeKeys.PREDICT:
probs = tf.image.resize_images(
images=probs,
size=original_dims)
# Remove the un-needed channel dimension of 1
probs = tf.squeeze(probs)
# When training/evaluating, 1D tensor of shape [N]. When predicting, 3D tensor
# of shape [N, H, W]
return probs
# + [markdown] id="jfiC57br4igz" colab_type="text"
# To facillitate easier training/evaluation/prediction, we'll use TensorFlow's estimator API. We're required to
# define a function that the estimator can configure with a mode that will return [estimator specs](https://www.tensorflow.org/api_docs/python/tf/estimator/EstimatorSpec) describing how our model
# should behave depending on the mode.
# + id="ILm_M9ED4sY-" colab_type="code" colab={}
def model_fn(features, labels, mode):
# Whatever mode we're in, we'll always want to generate predictions from the
# incoming features.
probs = fcnn(features, mode)
predicted_class = tf.cast(tf.greater(probs, 0.5), tf.float32)
if mode == tf.estimator.ModeKeys.PREDICT:
# We reshape the predictions into 1D arrays to make writing prediction data
# into TFRecord files easier
#
# We'll need these prediction labels later when we build TFRecord files
return tf.estimator.EstimatorSpec(mode=mode, predictions = {
'class_id': tf.reshape(predicted_class, [-1]),
'probability': tf.reshape(probs, [-1])
})
labels = tf.squeeze(labels)
# Since we're performing a binary classification, we can use a simple loss
# function.
loss = tf.losses.mean_squared_error(labels, probs)
if mode == tf.estimator.ModeKeys.TRAIN:
# Adaptive moment estimation has been shown to converge faster than plain
# old gradient descent in CNNs.
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
# We need the weight updates to perform the minimization step as batch
# normalization depends on it
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
logging_hook = tf.train.LoggingTensorHook(
{"batch_predictions" : predicted_class,
"batch_labels": labels},
every_n_iter=1000)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[logging_hook])
eval_metric_ops = {"accuracy": tf.metrics.accuracy(
labels=labels,
predictions=predicted_class)
}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops=eval_metric_ops)
# + [markdown] id="W1Vv7kyuA46L" colab_type="text"
# Now lets create the model object. Don't forget to replace the paths below with the paths to your own GCS bucket / training / evaluation inputs!
# + id="EsbuDn8FNEnc" colab_type="code" colab={}
tf.logging.set_verbosity(tf.logging.INFO)
auto_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir="gs://cfb-batch-export/eeus18/autoclassifier")
# + [markdown] id="i6ZnjiTZBC5s" colab_type="text"
# **And train it!**
# + id="O_Dp5Z0n82c-" colab_type="code" colab={}
# If we want to clear the checkpointed model, we can delete the mode directory
# to start fresh
# # !rm -rf "/autoclassifier"
train_file = 'gs://cfb-batch-export/cars_training.tfrecord.gz'
auto_classifier.train(
input_fn=lambda: input_fn(fileNames=[train_file]),
steps=50000)
# + [markdown] id="StKXr5YkBRO5" colab_type="text"
# And evaluate it! Estimator is awesome!
# + id="o7w0JMdVXsgw" colab_type="code" colab={}
eval_file = 'gs://cfb-batch-export/cars_training.tfrecord.gz'
acc = auto_classifier.evaluate(input_fn=lambda: input_fn(
fileNames=[eval_file],
numEpochs=1,
batchSize=100,
shuffle=False))['accuracy']
# + [markdown] id="h-E9Fz38B6Hk" colab_type="text"
# ## Part 2: Creating / Visualizing Predictions
#
# We'll now need to [export an area on which to perform inference](https://code.earthengine.google.com/3ece5d0b4b2e0f0d4371ba3f5eb5940d). Note we get a "-mixer.json" with our export which we'll leave alone for now. Be sure to export this image at 15cm/px.
#
# We'll define a similar dataset input function as our training / evaluation input function, except we don't carry
# any class labels in, we'll instead predict these.
# + id="CUH2j2NNGgU6" colab_type="code" colab={}
# The default value of side is now 316, as our intent is to create predictions
# for 256x256 image patches with a 30 pixel wide border.
def infer_input_fn(fileNames, side=316, batchSize=100):
ds = tf.data.TFRecordDataset(fileNames, compression_type='GZIP')
feature_columns = {
'R': tf.FixedLenFeature([side,side], dtype=tf.float32),
'G': tf.FixedLenFeature([side,side], dtype=tf.float32),
'B': tf.FixedLenFeature([side,side], dtype=tf.float32),
'L': tf.FixedLenFeature([side,side], dtype=tf.float32),
}
def parse(example_proto):
parsed_features = tf.parse_single_example(example_proto, feature_columns)
parsed_features = {
k:tf.divide(v, 255.0) for (k,v) in parsed_features.items()}
return parsed_features
ds = ds.map(parse, num_parallel_calls=5).batch(batchSize)
iterator = ds.make_one_shot_iterator()
features = iterator.get_next()
return features
# + [markdown] id="IgyQYL7tGujG" colab_type="text"
# Lets define a function to take a dictionary of a single patch's predictions and write them to an example. By
# writing examples this way, we'll wind up with an image with 2 bands: 'class_id' and 'probability'
# + id="zIPbYpIsZh-O" colab_type="code" colab={}
def make_example(pred_dict):
class_id = pred_dict['class_id']
probability = pred_dict['probability']
return tf.train.Example(
features=tf.train.Features(
feature={
'class_id': tf.train.Feature(
float_list=tf.train.FloatList(
value=class_id)),
'probability': tf.train.Feature(
float_list=tf.train.FloatList(
value=probability))
}
)
)
# + [markdown] id="qay8n5suqktj" colab_type="text"
# Don't forget to replace the paths below with the paths to your prediction inputs!
# + id="9tp26R2HZa2l" colab_type="code" colab={}
predict_files = ['gs://cfb-batch-export/cars_inference2-00000.tfrecord.gz',
'gs://cfb-batch-export/cars_inference2-00001.tfrecord.gz',
'gs://cfb-batch-export/cars_inference2-00002.tfrecord.gz',
'gs://cfb-batch-export/cars_inference2-00003.tfrecord.gz',
'gs://cfb-batch-export/cars_inference2-00004.tfrecord.gz']
# + [markdown] id="O1lE2y3klWEo" colab_type="text"
# We're ready to make our predictions. We'll move our predictions into TFRecord files while following a few constraints
# so that we can re-ingest these files into Earth Engine. Firstly, we must provide as many predictions as there
# were examples in each patch. As each incoming patch has (256+60) x (256+60) examples (pixels), we'll
# need our model to produce 256 x 256 labels. Note we ignore the 30 pixel border for ingesting our predictions as this is only context for classifications of the pixels *(we specified 256, 256 as our patch dimensions in Earth Engine, and a kernel of 61, 61)*.
#
# To avoid too many large files, we'll keep each file to a minimum of 50 patches of inference labels.
# + id="_70T41gGAsGV" colab_type="code" colab={}
predictions = auto_classifier.predict(input_fn=lambda: infer_input_fn(
fileNames=predict_files,
batchSize=1,
side=316),
yield_single_examples=False)
MAX_RECORDS_PER_FILE = 50
output_path = 'gs://cfb-batch-export/labels/cars_labels-{:05}.tfrecord'
# Create the records we'll ingest into EE
file_number = 0
still_writing = True
total_patches = 0
while still_writing:
file_path = output_path.format(file_number)
writer = tf.python_io.TFRecordWriter(file_path)
print "Writing file: {}".format(file_path)
try:
written_records = 0
while True:
pred_dict = predictions.next()
writer.write(make_example(pred_dict).SerializeToString())
written_records += 1
total_patches += 1
if written_records % 5 == 0:
print " Writing patch: {}".format(written_records)
if written_records == MAX_RECORDS_PER_FILE:
break
except:
# Stop writing for any exception. Note that reaching the end of the prediction
# dataset throws an exception.
still_writing=False
finally:
file_number += 1
writer.close()
print('Wrote: {} patches.').format(total_patches)
# + id="qz7Yf4XJS87g" colab_type="code" colab={}
# + [markdown] id="jK_65kp4myEU" colab_type="text"
# With our TFRecords in hand, we're ready to ingest them into Earth Engine. Lets get authorized!
# + id="aMR54CsFeREh" colab_type="code" colab={}
# !pip install earthengine-api
# !earthengine authenticate --quiet
# + [markdown] id="5jTo5l77m_Yv" colab_type="text"
# Be sure to replace *YOUR AUTH HERE* with your auth code!
# + id="lrqRqZfKeiNu" colab_type="code" colab={}
# !earthengine authenticate --frontend=http://localhost:8080 --authorization-code=<KEY>
# + [markdown] id="p3jAR9mYnLv3" colab_type="text"
# We'll now start the ingestion. If you intend on running this yourself, you'll have to replace `cfb-batch-export` with your cloud bucket and provide your own asset id. We'll also need to pass the mixer file we ignored earlier so Earth Engine knows where our labeled patches came from.
# + id="FieIYhG7eob9" colab_type="code" colab={}
# !earthengine upload image --asset_id=users/cfb/badge gs://cfb-batch-export/test_help/tile3_23-00000.tfrecord gs://cfb-batch-export/test_help/tile3_23-00001.tfrecord gs://cfb-batch-export/test_help/tile3_23-00002.tfrecord gs://cfb-batch-export/test_help/tile3_23-00003.tfrecord gs://cfb-batch-export/test_help/tile3_23-mixer.json
# + [markdown] id="jwR7hG8RY5nT" colab_type="text"
# Now that we have some predictions, lets use Earth Engine's powerful image processing to extract a bounding rectangle for each car. Our strategy will be to compute the connect components of the `class_id` band, then reduce the components to vectors from which we can produce a bounding box. [See it done here!](https://code.earthengine.google.com/6da5d95ff658f69a3e2bb645ad9ab11b)
| eeus18_tf_part2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Simple agent the that randomly chooses allowable actions within a silhouette ('viable' actions)
#
# Programming this agent as a baseline.
# If we can bias the agent towards e.g. bigger blocks, that's a plus.
#
# ### input
# Bitmap representation of each stimulus
#
# ### output
# Output block placements as dataframe of same type as action dataframe used in other analyses (dfi) i.e.
# targetName, blockNum, x,y,w,h
#
# ### stability
# Blocks have to be placed on a 'floor', which includes two separated floors (to make a henge)
#
# In the experiment, unstable placements end the trial. We could:
# a) allow the agent to make unstable placements, but end the trial when they do
# b) not allow the agent to consider unstable placements
# Here I go for option b, where possible actions don't include those that would fall
#
# ### action selection
# There are various ways to make a random agent:
# a) enumerate all possible actions (all blocks in all locations), then uniformly select from these.
# b) uniformly select a block, then uniformly select a location.
# c) uniformly select a location, then uniformly select a block that fits there.
# d) uniformly select a block **and** uniformly select a location, reject if not possible.
#
#
# +
from __future__ import division
import numpy as np
import os, sys
from PIL import Image
import os
from os import listdir
from os.path import isfile, join
import urllib, io
os.getcwd()
sys.path.append("..")
sys.path.append("../utils")
proj_dir = os.path.abspath('../..')
from matplotlib import pylab, mlab, pyplot
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.path import Path
import matplotlib.patches as patches
# %matplotlib inline
import sklearn.metrics as metrics
from IPython.core.pylabtools import figsize, getfigs
import seaborn as sns
import random
from scipy.stats import norm
from IPython.display import clear_output
import numpy as np
import pandas as pd
import os
import json
import copy
import importlib
### Add Paths
## root paths
curr_dir = os.getcwd()
proj_dir = os.path.abspath(os.path.join(curr_dir,'..','..')) ## use relative paths
## add helpers to python path
import sys
if os.path.join(proj_dir, 'stimuli') not in sys.path:
sys.path.append(os.path.join(proj_dir, 'stimuli'))
## custom helper modules
import separation_axis_theorem as sat
#import blockworld_helpers as utils
#import display_world as stability #may want to make a separate module for stability
def cls():
os.system('cls' if os.name=='nt' else 'clear')
import scoring
# +
## directory & file hierarchy
proj_dir = os.path.abspath('..')
datavol_dir = os.path.join(proj_dir,'data')
analysis_dir = os.path.abspath(os.path.join(os.getcwd(),'..'))
results_dir = os.path.join(proj_dir,'results')
plot_dir = os.path.join(results_dir,'plots')
csv_dir = os.path.join(results_dir,'csv')
json_dir = os.path.join(results_dir,'json')
exp_dir = os.path.abspath(os.path.join(proj_dir,'experiments'))
png_dir = os.path.abspath(os.path.join(datavol_dir,'png'))
jefan_dir = os.path.join(analysis_dir,'jefan')
will_dir = os.path.join(analysis_dir,'will')
## add helpers to python path
if os.path.join(proj_dir,'stimuli') not in sys.path:
sys.path.append(os.path.join(proj_dir,'stimuli'))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
# +
#### Target maps: grab the bitmap representation of each stim
targets = ['hand_selected_004', 'hand_selected_005', 'hand_selected_006',
'hand_selected_008', 'hand_selected_009', 'hand_selected_011',
'hand_selected_012', 'hand_selected_016']
target_maps = {}
with open(os.path.abspath('../results/csv/targetMaps.txt')) as json_file:
target_maps = json.load(json_file)
def check_overlap(x,y,w,h,world, mode='inside'):
overlaps = False
if mode == 'inside':
overlaps = np.all(world[x:(x+w),y:(y+h)])
elif mode == 'outside':
overlaps = ~np.any(world[x:(x+w),y:(y+h)])
else:
return
return overlaps
def check_stability(x,y,w,h,world):
'''
checks to see if block would be supported without falling using heuristics.
Does not allow side-supported blocks, which are sometimes possible in the real experiments
'''
# if ((w==4) & (y==2) & (x==8)):
# print(np.rot90(world.astype(int)))
if y == 0: #if on the floor then will be stable
return True
else: #if greater than 1/2 of the block is supported then stable
support = world[x:(x+w),y-1:y].astype(int)
if np.sum(support) > w/2:
return True
# supports on both sides of long block
elif (w == 4):
left_sum = sum(world[x:(x+2),y-1:y].astype(int))
right_sum = sum(world[x+2:(x+w),y-1:y].astype(int))
if ((left_sum>= 1) & (right_sum >= 1)):
return True
else: return False
else:
return False
def find_positions(world, block, x_offset = 5):
positions = []
for i in range(world.shape[0]-block['width']+1):
if (~np.any(world[i:i+block['width'],0])):
positions.append({'x': i + x_offset,
'y': 0})
for j in range(1,world.shape[1]-block['height']+1):
for i in range(world.shape[0]-block['width']+1):
if ((~np.any(world[i:i+block['width'],j])) & np.any(world[i:i+block['width'],j-1])):
positions.append({'x': i + x_offset,
'y': j})
return positions
def simulate(targets, niter, verbose = False, provide_actual_target=False):
block_dims = [{'width':1,
'height':2},
{'width':2,
'height':1},
{'width':2,
'height':2},
{'width':2,
'height':4},
{'width':4,
'height':2}]
block_dims.reverse()
world_bounds = {'left': 5,
'right': 13}
columns = ['targetName','run','blockNum','discreteWorld','perfect','x','y','w','h']
df = pd.DataFrame(columns=columns)
for target in targets:
if provide_actual_target:
target_map = target
else:
target_map = np.logical_not(np.array(target_maps[target]))
for run in range(0,niter):
discrete_world = np.zeros([18,13]).astype(bool)
block_num = 0
completed = False
tested_all_blocks = False
while (~completed & ~tested_all_blocks):
placed = False
random.shuffle(block_dims)
b = 0
while((b < len(block_dims)) & ~placed): #keep trying blocks until placed or none left
#select next block from shuffled list
block = block_dims[b]
if verbose: print(" "*0,'block:', block)
# position-centric
# enumerate all positions for that block
positions = find_positions(discrete_world[world_bounds['left']:world_bounds['right'],0:8], block, x_offset=5)
if verbose: print(positions)
random.shuffle(positions) # shuffle positions
p = 0
while(~placed & (p < len(positions))): #keep trying positions until placed or none left
position = positions[p]
if verbose: print(" "*4,'position:', position)
x_loc = position['x']
y_loc = position['y']
# check if valid location
# check if in silhouette
within_silhouette = check_overlap(x_loc,y_loc,block['width'],block['height'], target_map, mode = 'inside')
if verbose: print(" "*4,'within silhouette:', within_silhouette)
if within_silhouette:
# check if free in current world
free_space = check_overlap(x_loc,y_loc,block['width'],block['height'], discrete_world, mode = 'outside')
if verbose: print(" "*5,'free space:', free_space)
if free_space:
# check stability
stable = check_stability(x_loc, y_loc, block['width'], block['height'], discrete_world)
if verbose: print(" "*6,'stable:', stable)
#if added:
if stable:
# add to world
discrete_world[x_loc:x_loc+block['width'],y_loc:y_loc+block['height']] = 1
completed = np.all(np.equal(discrete_world,target_map))
df = df.append({'targetName': str(target),
'run': run,
'blockNum': block_num,
'discreteWorld':discrete_world.copy(),
'perfect':completed,
'x':x_loc,
'y':y_loc,
'w':block['width'],
'h':block['height']}, ignore_index=True)
if verbose: print(np.rot90(discrete_world.astype(int)))
placed = True
if (completed & verbose):
print('completed structure!')
block_num += 1
else:
p += 1 # check next position
else:
p += 1 # check next position
else:
p += 1 # check next position
if(p == len(positions)): # if no positions work
b += 1 # check next block
if b == len(block_dims):
if verbose: print('no viable blocks- giving up')
tested_all_blocks = True
return df
# -
test_target = np.zeros([18,13]).astype(bool)
#test_target[5:9,0:2] = 1
test_target[5:13,0:8] = 1
#test_target[5:9,2:4] = 1
print(np.rot90(test_target).astype(int))
print(np.rot90(target_maps[targets[0]]).astype(int))
df_try = simulate([test_target],1,verbose=True, provide_actual_target=True)
df_try = simulate([targets[0]],1,verbose=True)
# ## Run simulations
df_random_agent = simulate(targets,1000,verbose=False)
df_random_agent
out_path = os.path.join(csv_dir,'block_silhouette_initial_random_agent.csv')
df_random_agent.to_csv(out_path)
# +
import ast
import seaborn as sns
sns.set_context('talk')
sns.set_style('darkgrid')
from IPython.display import clear_output
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import plotly
import plotly.graph_objects as go
import plotly.io as pio
pio.orca.config.use_xvfb = True
plotly.io.orca.config.save()
import importlib
import trajectory as g
# -
#df_random_agent['discreteWorld']
df_random_agent['rawF1DiscreteScore'] = df_random_agent.apply(scoring.get_f1_score_lambda, axis = 1)
df_random_agent['discreteWorld'] = df_random_agent['discreteWorld'].apply(lambda a: a*1)
df_random_agent
# +
targets = ['hand_selected_004', 'hand_selected_005', 'hand_selected_006',
'hand_selected_008', 'hand_selected_009', 'hand_selected_011',
'hand_selected_012', 'hand_selected_016']
df_random_agent['gameID'] = df_random_agent['run']
df_random_agent['phase_extended'] = 'random agent'
df_random_agent['flatDiscreteWorld'] = df_random_agent['discreteWorld'].apply(lambda a: (1+(-1)*np.array(a)).flatten())
importlib.reload(g) ## reimport graph utils
make_plot = True
if make_plot:
phases = ['simulation']
for this_target in targets:
for this_phase in phases:
g.plot_trajectory_graph(data = df_random_agent,
target_name = this_target,
phase = this_phase,
save = False,
out_dir = plot_dir,
extension = 'test',
x_lower_bound = 4,
x_upper_bound = 13,
edge_width_scale_factor = 0.4,
node_size_scale_factor = 0.4)
# -
# ## Load csvs from multithreaded agent
# +
import glob
in_path = os.path.join(csv_dir,'agent_results/')
all_files = glob.glob(in_path + "/*100_thread_*.csv")
[print(file) for file in all_files]
li = []
for filename in all_files:
batch = pd.read_csv(filename, index_col=None, header=0)
li.append(batch)
df = pd.concat(li, axis=0, ignore_index=True)
# -
df.groupby('targetName')['runID'].nunique()
df
# ! git push
| analysis/will/blockworld_random_agent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Understanding
# ### Import Libraries
# +
# %matplotlib inline
import pandas as pd
import numpy as np
from datetime import datetime
# -
pd.set_option("display.max_rows", 500)
dataPath_Raw = ("../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
dataPath_Processed = ("../data/processed/")
pd_raw = pd.read_csv(dataPath_Raw)
pd_raw.head();
# ### Extract the columns
pd_raw.columns[:];
time_idX = pd_raw.columns[4:]
# +
df_plot = pd.DataFrame({
'date':time_idX})
df_plot.head();
# -
len(pd_raw['Country/Region'].unique())
pd_raw['Country/Region'].unique()
# ### Define a DF with Country as a Column Name
pd_raw[pd_raw['Country/Region']=="Afghanistan"];
pd_raw[pd_raw['Country/Region']=="India"].iloc[:,4::].sum(axis=0);
country_list = [
'Italy',
'US',
'Spain',
'Germany',
'India',
'Korea, South',
'China',
'Brazil'
]
for i in country_list:
df_plot[i] = np.array(pd_raw[pd_raw['Country/Region'] == i].iloc[:,4::].sum(axis=0))
df_plot;
df_plot.set_index('date').plot()
# ### Data Type Date
df_plot.head()
#
#
#
df_plot.date[0]
datetime.strptime(df_plot.date[0], "%m/%d/%y")
# +
#Convert the String to DateTime object
time_idX = [datetime.strptime(i, "%m/%d/%y") for i in df_plot.date]
# Converting back to ISO (str)
time_str = [i.strftime("%y-%m-%d") for i in time_idX]
#time_idX
# +
#time_idX[0:5]
#type(time_idX)
# -
df_plot['date']=time_idX
type(df_plot["date"][0])
df_plot.head()
df_plot.to_csv(dataPath_Processed + "COVID_Flat_Table.csv", sep=",", index=False)
| notebooks/.ipynb_checkpoints/1_Data_Preparation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dano's braindecode notebook
#
# In this notebook, I try and use the braindecode Experiment quickstart to learn some neurodoro data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as sk
from os import walk
from os import listdir
from os.path import isfile, join
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.neighbors import KNeighborsRegressor
from tpot import TPOTRegressor
from math import sqrt
import pyriemann
from mne import create_info, concatenate_raws
from mne.io import RawArray
from mne.channels import read_montage
from glob import glob
from pyriemann.utils.viz import plt, plot_confusion_matrix, plot_embedding
from pyriemann.estimation import Covariances, HankelCovariances
from pyriemann.tangentspace import TangentSpace
from pyriemann.clustering import Potato
# +
# Data has been collected, let's import it
data = pd.read_csv("../muse-data/josh_sep_21RawEEG2.csv", header=0, index_col=False)
# +
# Remove all the early epochs before the experiment started
data = data[data.Performance != 0]
# +
# State is handcoded in right now to make this a classification problem. We should work an algorithm to compute this state from performance and difficulty
data['State'] = 1
data.loc[data.Difficulty > 40, 'State'] = 2
data.iloc[52000:, data.columns.get_loc('State')] = 3
# rearrange columns, remove timestamp
data = data[['Difficulty', 'Performance', 'State', 'Channel 1', 'Channel 2', 'Channel 3', 'Channel 4']]
# +
# %matplotlib inline
data.Difficulty.plot()
data.Performance.plot()
data.State.plot()
# +
sfreq = 256
# name of each channels
ch_names = ['Diff', 'Perf', 'State', 'TP9', 'FP1', 'FP2', 'TP10']
# type of each channels
ch_types = ['stim'] * 3 + ['eeg'] * 4
montage = read_montage('standard_1005')
# get data
data = data.T
data
# convert in Volts (from uVolts)
#data[:-1] *= 1e-6
# create mne objects
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq, montage=montage)
raw = (RawArray(data=data, info=info))
# +
## Plot the PSD of the EEG data just to make sure it looks alright
raw.plot_psd(picks=[3]);
# -
raw.filter(2, 50, method='iir',picks=([3,4,5,6]))
# ## Epochs
# +
from mne import make_fixed_length_events, Epochs
# Make an events array with epoch times every .5 seconds
event = make_fixed_length_events(raw, 1, duration=0.5)
# Make an epochs array object from the raw dataset with events array event, length of 2 seconds
epochs = Epochs(raw, event, tmin=0, tmax=2, preload=True, reject={'eeg': 200})
epochs
# +
X = epochs.copy().pick_types(eeg=True).get_data()
y = epochs.copy().pick_types(eeg=False, stim=True).get_data().mean(axis=2)
print(X.shape, y.shape)
# +
# The labels matrix has three columns, Difficulty (0) and Performance (1) and State (2)
# We'll need to use our State variable for decode (it only does classification not regression rn)
y = y[:,2]
# -
y = np.round(y)
# +
# Convert to braindecode format
from braindecode.datautil.signal_target import SignalAndTarget
from braindecode.datautil.splitters import split_into_two_sets
X = (X* 1e6).astype(np.float32)
y = y.astype(np.int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
train_set = SignalAndTarget(X_train, y=y_train)
test_set = SignalAndTarget(X_test, y=y_test)
train_set, valid_set = split_into_two_sets(train_set, first_set_fraction=0.8)
# -
y_train
# +
# Create the model
from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
from torch import nn
from braindecode.torch_ext.util import set_random_seeds
from braindecode.models.util import to_dense_prediction_model
import torch as th
cuda = th.cuda.is_available()
set_random_seeds(seed=20170629, cuda=False)
# This will determine how many crops are processed in parallel
input_time_length = 450
n_classes = 4
in_chans = train_set.X.shape[1]
# final_conv_length determines the size of the receptive field of the ConvNet
model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes, input_time_length=input_time_length,
final_conv_length=12).create_network()
to_dense_prediction_model(model)
if cuda:
model.cuda()
# +
from torch import optim
optimizer = optim.Adam(model.parameters())
# -
from braindecode.torch_ext.util import np_to_var
# determine output size
test_input = np_to_var(np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
if cuda:
test_input = test_input.cuda()
out = model(test_input)
n_preds_per_input = out.cpu().data.numpy().shape[2]
print("{:d} predictions per input/trial".format(n_preds_per_input))
# +
from braindecode.experiments.experiment import Experiment
from braindecode.datautil.iterators import CropsFromTrialsIterator
from braindecode.experiments.monitors import RuntimeMonitor, LossMonitor, CroppedTrialMisclassMonitor, MisclassMonitor
from braindecode.experiments.stopcriteria import MaxEpochs
import torch.nn.functional as F
from braindecode.torch_ext.modules import Expression
# Iterator is used to iterate over datasets both for training
# and evaluation
iterator = CropsFromTrialsIterator(batch_size=32,input_time_length=input_time_length,
n_preds_per_input=n_preds_per_input)
# Loss function takes predictions as they come out of the network and the targets
# and returns a loss
loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2, keepdim=False), targets)
# Could be used to apply some constraint on the models, then should be object
# with apply method that accepts a module
model_constraint = None
# Monitors log the training progress
monitors = [LossMonitor(), MisclassMonitor(col_suffix='sample_misclass'),
CroppedTrialMisclassMonitor(input_time_length), RuntimeMonitor(),]
# Stop criterion determines when the first stop happens
stop_criterion = MaxEpochs(20)
exp = Experiment(model, train_set, valid_set, test_set, iterator, loss_function, optimizer, model_constraint,
monitors, stop_criterion, remember_best_column='valid_misclass',
run_after_early_stop=True, batch_modifier=None, cuda=cuda)
# -
# need to setup python logging before to be able to see anything
import logging
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.DEBUG, stream=sys.stdout)
exp.run()
# Best Epoch:
# ```
# 2017-09-22 11:15:15,655 INFO : Epoch 6
# 2017-09-22 11:15:15,656 INFO : train_loss 0.55220
# 2017-09-22 11:15:15,661 INFO : valid_loss 0.17202
# 2017-09-22 11:15:15,664 INFO : test_loss 0.55220
# 2017-09-22 11:15:15,665 INFO : train_sample_misclass 0.19732
# 2017-09-22 11:15:15,675 INFO : valid_sample_misclass 0.05496
# 2017-09-22 11:15:15,683 INFO : test_sample_misclass 0.19732
# 2017-09-22 11:15:15,686 INFO : train_misclass 0.13667
# 2017-09-22 11:15:15,692 INFO : valid_misclass 0.00833
# 2017-09-22 11:15:15,695 INFO : test_misclass 0.13667
# 2017-09-22 11:15:15,698 INFO : runtime 11.22553
#
# ```
| classifier/.ipynb_checkpoints/Dano's decode notebook-checkpoint.ipynb |
try:
import openmdao.api as om
import dymos as dm
except ImportError:
# !python -m pip install openmdao[notebooks]
# !python -m pip install dymos[docs]
import openmdao.api as om
import dymos as dm
# # SSTO Lunar Ascent with Polynomial Controls
#
# This example demonstrates the use of polynomial controls in Dymos.
# Polynomial controls define the control profile as a *single* polynomial
# across the entire phase where the control values are specified at the
# Legendre Gauss Lobatto (LGL) nodes in *phase dimensionless time*. These
# controls can be of any arbitrary order greater than 1 (linear).
#
# We've already demonstrated that the optimal single stage ascent in the
# absense of an atmosphere follows the linear tangent guidance law. In
# this example, we'll change the control parameterization such that
# $\tan \theta$ is provided by a polynomial control of order 1. The LGL
# nodes of a first order polynomial are the endpoints of the phase, thus
# the optimizer will be governing the value of $\tan \theta$ at the
# initial and final times of the phase, and the Dymos will interpolate the
# values of $\tan \theta$ to all other nodes in the Phase.
#
# This example is equivalent to the previous linear tangent example in
# that we've reduced the problem from finding the appropriate control
# value at all nodes to that of finding the optimal value of just two
# quantities. But instead of optimizing the slope and intercept given by
# the parameters $a$ and $b$, we're parameterizing the control using the
# endpoint values of the linear polynomial.
#
# Now the guidance comp needs to convert the inverse tangent of the
# current value of the polynomial controls.
#
# $$\theta = \arctan{p}$$
#
# ## Solving the problem
# +
import numpy as np
import matplotlib.pyplot as plt
import openmdao.api as om
import dymos as dm
g = 1.61544 # lunar gravity, m/s**2
class LaunchVehicle2DEOM(om.ExplicitComponent):
"""
Simple 2D Cartesian Equations of Motion for a launch vehicle subject to thrust and drag.
"""
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
# Inputs
self.add_input('vx',
val=np.zeros(nn),
desc='x velocity',
units='m/s')
self.add_input('vy',
val=np.zeros(nn),
desc='y velocity',
units='m/s')
self.add_input('m',
val=np.zeros(nn),
desc='mass',
units='kg')
self.add_input('theta',
val=np.zeros(nn),
desc='pitch angle',
units='rad')
self.add_input('thrust',
val=2100000 * np.ones(nn),
desc='thrust',
units='N')
self.add_input('Isp',
val=265.2 * np.ones(nn),
desc='specific impulse',
units='s')
# Outputs
self.add_output('xdot',
val=np.zeros(nn),
desc='velocity component in x',
units='m/s')
self.add_output('ydot',
val=np.zeros(nn),
desc='velocity component in y',
units='m/s')
self.add_output('vxdot',
val=np.zeros(nn),
desc='x acceleration magnitude',
units='m/s**2')
self.add_output('vydot',
val=np.zeros(nn),
desc='y acceleration magnitude',
units='m/s**2')
self.add_output('mdot',
val=np.zeros(nn),
desc='mass rate of change',
units='kg/s')
# Setup partials
ar = np.arange(self.options['num_nodes'])
self.declare_partials(of='xdot', wrt='vx', rows=ar, cols=ar, val=1.0)
self.declare_partials(of='ydot', wrt='vy', rows=ar, cols=ar, val=1.0)
self.declare_partials(of='vxdot', wrt='vx', rows=ar, cols=ar)
self.declare_partials(of='vxdot', wrt='m', rows=ar, cols=ar)
self.declare_partials(of='vxdot', wrt='theta', rows=ar, cols=ar)
self.declare_partials(of='vxdot', wrt='thrust', rows=ar, cols=ar)
self.declare_partials(of='vydot', wrt='m', rows=ar, cols=ar)
self.declare_partials(of='vydot', wrt='theta', rows=ar, cols=ar)
self.declare_partials(of='vydot', wrt='vy', rows=ar, cols=ar)
self.declare_partials(of='vydot', wrt='thrust', rows=ar, cols=ar)
self.declare_partials(of='mdot', wrt='thrust', rows=ar, cols=ar)
self.declare_partials(of='mdot', wrt='Isp', rows=ar, cols=ar)
def compute(self, inputs, outputs):
theta = inputs['theta']
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
vx = inputs['vx']
vy = inputs['vy']
m = inputs['m']
F_T = inputs['thrust']
Isp = inputs['Isp']
outputs['xdot'] = vx
outputs['ydot'] = vy
outputs['vxdot'] = F_T * cos_theta / m
outputs['vydot'] = F_T * sin_theta / m - g
outputs['mdot'] = -F_T / (g * Isp)
def compute_partials(self, inputs, jacobian):
theta = inputs['theta']
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
m = inputs['m']
F_T = inputs['thrust']
Isp = inputs['Isp']
# jacobian['vxdot', 'vx'] = -CDA * rho * vx / m
jacobian['vxdot', 'm'] = -(F_T * cos_theta) / m ** 2
jacobian['vxdot', 'theta'] = -(F_T / m) * sin_theta
jacobian['vxdot', 'thrust'] = cos_theta / m
# jacobian['vydot', 'vy'] = -CDA * rho * vy / m
jacobian['vydot', 'm'] = -(F_T * sin_theta) / m ** 2
jacobian['vydot', 'theta'] = (F_T / m) * cos_theta
jacobian['vydot', 'thrust'] = sin_theta / m
jacobian['mdot', 'thrust'] = -1.0 / (g * Isp)
jacobian['mdot', 'Isp'] = F_T / (g * Isp ** 2)
class LaunchVehicleLinearTangentODE(om.Group):
"""
The LaunchVehicleLinearTangentODE for this case consists of a guidance component and
the EOM. Guidance is simply an OpenMDAO ExecComp which computes the arctangent of the
tan_theta variable.
"""
def initialize(self):
self.options.declare('num_nodes', types=int,
desc='Number of nodes to be evaluated in the RHS')
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('guidance', om.ExecComp('theta=arctan(tan_theta)',
theta={'value': np.ones(nn),
'units': 'rad'},
tan_theta={'value': np.ones(nn)}))
self.add_subsystem('eom', LaunchVehicle2DEOM(num_nodes=nn))
self.connect('guidance.theta', 'eom.theta')
#
# Setup and solve the optimal control problem
#
p = om.Problem(model=om.Group())
traj = p.model.add_subsystem('traj', dm.Trajectory())
phase = dm.Phase(ode_class=LaunchVehicleLinearTangentODE,
transcription=dm.Radau(num_segments=20, order=3, compressed=False))
traj.add_phase('phase0', phase)
phase.set_time_options(fix_initial=True, duration_bounds=(10, 1000), units='s')
#
# Set the state options. We include rate_source, units, and targets here since the ODE
# is not decorated with their default values.
#
phase.add_state('x', fix_initial=True, lower=0, rate_source='eom.xdot', units='m')
phase.add_state('y', fix_initial=True, lower=0, rate_source='eom.ydot', units='m')
phase.add_state('vx', fix_initial=True, lower=0, rate_source='eom.vxdot',
units='m/s', targets=['eom.vx'])
phase.add_state('vy', fix_initial=True, rate_source='eom.vydot',
units='m/s', targets=['eom.vy'])
phase.add_state('m', fix_initial=True, rate_source='eom.mdot',
units='kg', targets=['eom.m'])
#
# The tangent of theta is modeled as a linear polynomial over the duration of the phase.
#
phase.add_polynomial_control('tan_theta', order=1, units=None, opt=True,
targets=['guidance.tan_theta'])
#
# Parameters values for thrust and specific impulse are design parameters. They are
# provided by an IndepVarComp in the phase, but with opt=False their values are not
# design variables in the optimization problem.
#
phase.add_parameter('thrust', units='N', opt=False, val=3.0 * 50000.0 * 1.61544,
targets=['eom.thrust'])
phase.add_parameter('Isp', units='s', opt=False, val=1.0E6, targets=['eom.Isp'])
#
# Set the boundary constraints. These are all states which could also be handled
# by setting fix_final=True and including the correct final value in the initial guess.
#
phase.add_boundary_constraint('y', loc='final', equals=1.85E5, linear=True)
phase.add_boundary_constraint('vx', loc='final', equals=1627.0)
phase.add_boundary_constraint('vy', loc='final', equals=0)
phase.add_objective('time', index=-1, scaler=0.01)
#
# Add theta as a timeseries output since it's not included by default.
#
phase.add_timeseries_output('guidance.theta', units='deg')
#
# Set the optimizer
#
p.driver = om.pyOptSparseDriver()
p.driver.options['optimizer'] = 'SLSQP'
p.driver.declare_coloring()
#
# We don't strictly need to define a linear solver here since our problem is entirely
# feed-forward with no iterative loops. It's good practice to add one, however, since
# failing to do so can cause incorrect derivatives if iterative processes are ever
# introduced to the system.
#
p.model.linear_solver = om.DirectSolver()
p.setup(check=True)
#
# Assign initial guesses for the independent variables in the problem.
#
p['traj.phase0.t_initial'] = 0.0
p['traj.phase0.t_duration'] = 500.0
p['traj.phase0.states:x'] = phase.interp('x', [0, 350000.0])
p['traj.phase0.states:y'] = phase.interp('y', [0, 185000.0])
p['traj.phase0.states:vx'] = phase.interp('vx', [0, 1627.0])
p['traj.phase0.states:vy'] = phase.interp('vy', [1.0E-6, 0])
p['traj.phase0.states:m'] = phase.interp('m', [50000, 50000])
p['traj.phase0.polynomial_controls:tan_theta'] = [[0.5 * np.pi], [0.0]]
#
# Solve the problem.
#
dm.run_problem(p)
#
# Get the explitly simulated results
#
exp_out = traj.simulate()
#
# Plot the results
#
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(10, 8))
axes[0].plot(p.get_val('traj.phase0.timeseries.states:x'),
p.get_val('traj.phase0.timeseries.states:y'),
marker='o',
ms=4,
linestyle='None',
label='solution')
axes[0].plot(exp_out.get_val('traj.phase0.timeseries.states:x'),
exp_out.get_val('traj.phase0.timeseries.states:y'),
marker=None,
linestyle='-',
label='simulation')
axes[0].set_xlabel('range (m)')
axes[0].set_ylabel('altitude (m)')
axes[0].set_aspect('equal')
axes[1].plot(p.get_val('traj.phase0.timeseries.time'),
p.get_val('traj.phase0.timeseries.theta'),
marker='o',
ms=4,
linestyle='None')
axes[1].plot(exp_out.get_val('traj.phase0.timeseries.time'),
exp_out.get_val('traj.phase0.timeseries.theta'),
linestyle='-',
marker=None)
axes[1].set_xlabel('time (s)')
axes[1].set_ylabel('theta (deg)')
plt.suptitle('Single Stage to Orbit Solution Using Polynomial Controls')
fig.legend(loc='lower center', ncol=2)
plt.show()
# +
from openmdao.utils.assert_utils import assert_near_equal
#
# Check the results.
#
assert_near_equal(p.get_val('traj.phase0.timeseries.time')[-1], 481, tolerance=0.01)
| docs/examples/ssto_moon_polynomial_controls/ssto_moon_polynomial_controls.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import warnings
warnings.filterwarnings( 'ignore' )
import gc
import os
import time
import numpy as np
import pandas as pd
import pickle
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
import tensorflow as tf
import keras.backend as K
from keras.models import load_model
from keras.preprocessing import text, sequence
from tqdm import tqdm_notebook
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.util import ngrams
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import warnings
warnings.filterwarnings( 'ignore' )
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
#from thundersvm import SVC
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score
from keras.models import Model
from keras.layers import Input, Embedding
from keras.layers import CuDNNGRU, CuDNNLSTM, Conv1D, Conv2D, Dense, Bidirectional, GRU, LSTM, MaxPool1D
from keras.layers import SpatialDropout1D, Dropout, Concatenate, concatenate, Softmax, Flatten, Reshape
from keras.layers import GlobalMaxPooling1D, GlobalAveragePooling1D, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.utils import multi_gpu_model
from keras.optimizers import *
# -
# from tensorflow.random import set_seed
# from numpy.random import seed
# import random
#
#
# seed_value = 0
# set_seed(seed_value)
# seed(seed_value)
# os.environ['PYTHONHASHSEED'] = str(seed_value)
# random.seed(seed_value)
import All_RUT_Models
import RUT_Utils
# +
# hyper parameters for this model
penalty = 'l2'
C = 18
solver = 'newton-cg'
class_weight='balanced'
# +
modelname = 'LR'
modelpath = './Models/' + modelname + '/'
if not os.path.exists( modelpath ):
os.makedirs( modelpath )
if not os.path.exists( './Results/' ):
os.makedirs( './Results/' )
# -
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
# +
def convert_lower_case(data):
# Convert the input text into lowercase text
return np.char.lower(str(data))
def remove_stop_words(data):
# Tokenize the input text and remove stopwords from the corpus
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
words = word_tokenize(str(data))
new_text = ""
for w in words:
if w not in stop_words and len(w) > 3:
new_text = new_text + " " + lemmatizer.lemmatize(w)
return new_text
def remove_punctuation(data):
# Remove punctuations defined below from input text
symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
for i in range(len(symbols)):
data = np.char.replace(data, symbols[i], ' ')
data = np.char.replace(data, " ", " ")
data = np.char.replace(data, ',', '')
return data
def remove_apostrophe(data):
# Remove apostrophe from the input text
return np.char.replace(data, "'", "")
def preprocess(data):
# Preprocess the input text
data = convert_lower_case(data)
data = remove_punctuation(data) #remove comma seperately
data = remove_apostrophe(data)
data = remove_stop_words(data)
return data
def get_tokens(dataframe, column):
tokens = []
for i in tqdm_notebook(dataframe[column][:]):
_tokens = word_tokenize(str(i))
tokens.append(_tokens)
return tokens
# +
train_data = pd.read_csv('data\\wiki_debias_train.csv')
train_data = train_data.dropna(axis = 0)
#train_data = train_data.sample(n=100000, random_state=0)
train_data['toxicity'] = train_data['toxicity'].round()
df_test = pd.read_csv('test_data.csv')
df_test = df_test.dropna(axis = 0)
df_test.loc[df_test['Label'] == 'BAD', 'Label'] = 1
df_test.loc[df_test['Label'] == 'NOT_BAD', 'Label'] = 0
train_feature = get_tokens(train_data, 'comment')
train_label = train_data['toxicity']
test_feature = get_tokens(df_test, 'Text')
test_label = df_test['Label']
identity_terms = []
for i in tqdm_notebook(range(len(df_test['Text']))):
_comment = df_test.loc[i, 'Text'].split(" ")
if len(_comment) < 3:
_term = _comment[1]
identity_terms.append(_term)
identity_terms = list(set(identity_terms))
terms = []
for i in range(len(df_test['Text'])):
_text = df_test.loc[i, 'Text'].split(' ')
_term = list(set(_text).intersection(set(identity_terms)))
if len(_term) > 0:
terms.append(_term[0])
else:
terms.append(np.nan)
df_test['Identity_Terms'] = terms
# +
vectorizer = TfidfVectorizer()
vectorizer.fit( train_data['comment'] )
xtrain = vectorizer.transform( train_data['comment'].values )
xtest = vectorizer.transform( df_test[ 'Text' ].values )
ytrain = train_data[ 'toxicity' ].values
ytest = df_test[ 'Label' ].values
# -
model = LogisticRegression( penalty=penalty, C=C, solver=solver, class_weight=class_weight )
model.fit( xtrain, ytrain )
with open( modelpath + modelname + '.pkl', 'wb' ) as f:
pickle.dump( model, f )
with open( modelpath + modelname + '.pkl', 'rb' ) as f:
model = pickle.load( f )
pred = model.predict(xtrain)
accuracy_score(ytrain, pred)
# +
xf_positive = 0
xd_positive = 0
xf_total = 0
xd_total = 0
for i in tqdm_notebook(range(len(train_feature))):
if(train_data['toxicity'].values[i] == 1 and len(list(set(train_feature[i]).intersection(set(terms)))) > 0):
xd_positive += 1
xd_total += 1
elif(len(list(set(train_feature[i]).intersection(set(terms)))) > 0):
xd_total += 1
elif(train_data['toxicity'].values[i] == 1 and len(list(set(train_feature[i]).intersection(set(terms))))==0):
xf_positive += 1
xf_total += 1
elif(len(list(set(train_feature[i]).intersection(set(terms))))== 0):
xf_total += 1
# -
pf = xf_positive / xf_total
pd = xd_positive / xd_total
discrimination = pf - pd
discrimination
# +
pred = model.predict(xtrain)
xf_positive = 0
xd_positive = 0
xf_total = 0
xd_total = 0
for i in tqdm_notebook(range(len(train_feature))):
if(pred[i].round() == 1 and len(list(set(train_feature[i]).intersection(set(terms)))) > 0):
xd_positive += 1
xd_total += 1
elif(len(list(set(train_feature[i]).intersection(set(terms)))) > 0):
xd_total += 1
elif(pred[i].round() == 1 and len(list(set(train_feature[i]).intersection(set(terms))))==0):
xf_positive += 1
xf_total += 1
elif(len(list(set(train_feature[i]).intersection(set(terms))))== 0):
xf_total += 1
# -
pf = xf_positive / xf_total
pd = xd_positive / xd_total
discrimination = pf - pd
discrimination
pred
pred = model.predict(xtest)
df_test['prediction_scores'] = pred
accuracy_score(df_test['Label'].astype(float), pred)
# +
xf_positive = 0
xd_positive = 0
xf_total = 0
xd_total = 0
for i in tqdm_notebook(range(len(test_feature))):
if(pred[i].round() == 1 and len(list(set(test_feature[i]).intersection(set(terms)))) > 0):
xd_positive += 1
xd_total += 1
elif(len(list(set(test_feature[i]).intersection(set(terms)))) > 0):
xd_total += 1
elif(pred[i].round() == 1 and len(list(set(test_feature[i]).intersection(set(terms))))==0):
xf_positive += 1
xf_total += 1
elif(len(list(set(test_feature[i]).intersection(set(terms))))== 0):
xf_total += 1
# -
pf = xf_positive / xf_total
pd = xd_positive / xd_total
discrimination = pf - pd
discrimination
# +
import pandas as pd
def perf_measure(y_actual, y_hat):
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(y_hat)):
if y_actual[i]==y_hat[i]==1:
TP += 1
if y_hat[i]==1 and y_actual[i]!=y_hat[i]:
FP += 1
if y_actual[i]==y_hat[i]==0:
TN += 1
if y_hat[i]==0 and y_actual[i]!=y_hat[i]:
FN += 1
return(TN, FP, FN, TP)
total_tn, total_fp, total_fn, total_tp = confusion_matrix(df_test['Label'].astype(float), pred.round()).ravel()
total_fpr = total_fp / (total_fp + total_tn )
total_fnr = total_fn / (total_fn + total_tp)
false_positive = []
false_negative = []
identity_terms = []
for identity_term in set(terms):
data = df_test[df_test['Identity_Terms'] == identity_term].reset_index()
y_true, y_pred = data['Label'].astype(int), data['prediction_scores']
tn, fp, fn, tp = perf_measure(y_true, y_pred.round())
try:
fpr = fp / (fp + tn)
fnr = fn / (fn + tp)
false_positive.append(fpr)
false_negative.append(fnr)
identity_terms.append(identity_term)
except:
print("Error in ", identity_term)
eval_scores = pd.DataFrame(identity_terms, columns = ['Identity_Titles'])
eval_scores['Identity_Term_False_Positive'] = false_positive
eval_scores['Total_False_Positive'] = total_fpr
eval_scores['Identity_Term_False_Negatives'] = false_negative
eval_scores['Total_False_Negative'] = total_fnr
eval_scores['FPR - FPRt'] = abs(total_fpr - eval_scores['Identity_Term_False_Positive'])
eval_scores['FNR - FNRt'] = abs(total_fnr - eval_scores['Identity_Term_False_Negatives'])
eval_scores
# -
eval_scores['FPR - FPRt'].sum(), eval_scores['FNR - FNRt'].sum()
# +
total_auc = roc_auc_score(df_test['Label'].astype(float), pred.round())
terms_auc = []
identity_terms = []
for identity_term in set(terms):
term_data = df_test[df_test['Identity_Terms'] == identity_term].reset_index()
data = df_test.sample(n=len(term_data['Text']), random_state=0)
data = term_data.append(data, ignore_index=True)
y_true, y_pred = data['Label'].astype(int), data['prediction_scores']
try:
term_auc = roc_auc_score(y_true, y_pred.round())
terms_auc.append(term_auc)
identity_terms.append(identity_term)
except:
print("Error in ",identity_term)
eval_scores = pd.DataFrame(identity_terms, columns = ['Identity_Titles'])
eval_scores['AUCt'] = terms_auc
eval_scores['AUC'] = total_auc
eval_scores['AUC - AUCt'] = abs(eval_scores['AUC'] - eval_scores['AUCt'])
eval_scores
# -
print(eval_scores['AUC - AUCt'].sum())
pred_prob = model.predict_proba(xtest)
theta = 0.8
ROC_pred = []
for i in tqdm_notebook(range(len(test_feature))):
p_positive = pred_prob[i][1]
p_negative = 1 - p_positive
deprived_term = list(set(test_feature[i]).intersection(set(terms)))
if max(p_positive, p_negative) < theta and len(deprived_term) > 0:
#print(i, deprived_term, favoured_term)
ROC_pred.append(0)
elif max(p_positive, p_negative) < theta and len(deprived_term) == 0 :
ROC_pred.append(1)
else:
ROC_pred.append(int(pred[i].round()))
# +
xf_positive = 0
xd_positive = 0
xf_total = 0
xd_total = 0
for i in tqdm_notebook(range(len(test_feature))):
if(ROC_pred[i] == 1 and len(list(set(test_feature[i]).intersection(set(terms)))) > 0):
xd_positive += 1
xd_total += 1
elif(len(list(set(test_feature[i]).intersection(set(terms)))) > 0):
xd_total += 1
elif(ROC_pred[i] == 1 and len(list(set(test_feature[i]).intersection(set(terms))))== 0):
xf_positive += 1
xf_total += 1
elif(len(list(set(test_feature[i]).intersection(set(terms))))== 0):
xf_total += 1
# -
pf = xf_positive / xf_total
pd = xd_positive / xd_total
discrimination = pf - pd
discrimination
accuracy_score(df_test['Label'].astype(float), ROC_pred)
# +
#test_data['prediction_scores'] = ROC_pred
import pandas as pd
total_tn, total_fp, total_fn, total_tp = confusion_matrix(df_test['Label'].astype(float), ROC_pred).ravel()
total_fpr = total_fp / (total_fp + total_tn )
total_fnr = total_fn / (total_fn + total_tp)
false_positive = []
false_negative = []
identity_terms = []
for identity_term in set(terms):
data = df_test[df_test['Identity_Terms'] == identity_term].reset_index()
y_true, y_pred = data['Label'].astype(int), data['prediction_scores']
tn, fp, fn, tp = perf_measure(y_true, y_pred.round())
try:
fpr = fp / (fp + tn)
fnr = fn / (fn + tp)
false_positive.append(fpr)
false_negative.append(fnr)
identity_terms.append(identity_term)
except:
print("Error in ", identity_term)
eval_scores = pd.DataFrame(identity_terms, columns = ['Identity_Titles'])
eval_scores['Identity_Term_False_Positive'] = false_positive
eval_scores['Total_False_Positive'] = total_fpr
eval_scores['Identity_Term_False_Negatives'] = false_negative
eval_scores['Total_False_Negative'] = total_fnr
eval_scores['FPR - FPRt'] = abs(total_fpr - eval_scores['Identity_Term_False_Positive'])
eval_scores['FNR - FNRt'] = abs(total_fnr - eval_scores['Identity_Term_False_Negatives'])
eval_scores
# -
eval_scores['FPR - FPRt'].sum(), eval_scores['FNR - FNRt'].sum()
# +
total_auc = roc_auc_score(df_test['Label'].astype(float), ROC_pred)
terms_auc = []
identity_terms = []
for identity_term in set(terms):
term_data = df_test[df_test['Identity_Terms'] == identity_term].reset_index()
data = df_test.sample(n=len(term_data['Text']), random_state=0)
data = term_data.append(data, ignore_index=True)
y_true, y_pred = data['Label'].astype(int), data['prediction_scores']
try:
term_auc = roc_auc_score(y_true, y_pred.round())
terms_auc.append(term_auc)
identity_terms.append(identity_term)
except:
print("Error in ",identity_term)
eval_scores = pd.DataFrame(identity_terms, columns = ['Identity_Titles'])
eval_scores['AUCt'] = terms_auc
eval_scores['AUC'] = total_auc
eval_scores['AUC - AUCt'] = abs(eval_scores['AUC'] - eval_scores['AUCt'])
eval_scores
# -
print(eval_scores['AUC - AUCt'].sum())
| ROC-LR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Xe3nDlAumlP4"
### Value Function Approximation
### Often the state spaces are huge and if we use table based methods, they take up enormous space to store the action-state values
### that is infeasable, and insufficient.
### In such cases, we need something for generalization, i.e, even our agent has never seen the state before, still it can make good decisions
### In this cases, we will want to represent the value function, or the state-action value function as a parameterized function
### instead of a table.
## S --> f(S:W) ---> V(S)
## S,A ---> g(S,A:W)---> Q(S,A)
### W is the wieght parameters of the function, So,we give (State, Action) as input to the parameterized function.
### It gives us Q[s,a]
### We just try to create a general compact representation, to reduce the memory and computation required to find a good policy.
### For VFA, most used algorithms like:
### 1. Linear Regression
### 2. Neural Networks
## The parameterized Neural Networks predict the state-action value function Q'(s,a: w) in case of policy control and V'(s: w) in case of policy evaluation
## To predict the parameterized function tries to decrease the MSE between Q(s,a) and Q'(s,a: w) in case of policy control, where Q(s,a) is the actual
## target value of the Q function, and Q'(s,a: w) is the Q value predicted by the approximation model. We replace Q[s,a] by V[s] and every thing remains same for
## Policy Evaluation
### The loss function: J(w) = E[(Q(s,a) - Q'(s,a: w))^2] i.e, the MSE between actual and the predicted value or Q-function, for policy contol.
### The loss function: J(w) = E[(V(s) - V'(s: w))^2] i.e, the MSE between actual and the predicted value or V-function, for policy contol.
### Gradient descent is used on parameters w to minimize J(w): W = W - alpha * dJ(w)/dw.
### alpha is the learning parameter. Now, this gradient descent is handled and learnt as parameterized loss function in a neural network architecture.
### Now, as we have seen, to train the Neural Networks, we need a source to obtain the actual Q or V function values-> Q[s,a] or V[s]
### This source is called an ORACLE.
### Initially, the model was trained on a single sample, or a single state-value tuple at a time, to avoid the expectation, and so SGD was used.
#### VALUE FUNCTION: FOR POLICY EVALUATION
### To create this oracle, we use the estimation methods, we used previously,
### 1. Monte-carlo based methods
### 2. TD Learning Based methods
#### STATE-ACTION VALUE FUNCTION: FOR POLICY CONTROL
### To create this oracle, we use the estimation methods, we used previously,
### 1. Monte-carlo based methods
### 2. SARSA Based methods
### 3. Q-leaning Based methods
### The only difference is, in the update step after finding the updated V, or Q function, we have to fit it to the approximator for it to update its weight update.
### Formulation:
## For the formulation, states are respresenatation are represented as a vector. A state is a represented as a set of n features. These n features form a vector.
## So, V'(s: w)= B0+ W1*X1(s)+..........WnXn(s).
### => V'(s:w) = transpose(X(s)).W
### => dV'(s:w)/dw = X(s)
### W is the weight parameter matrix and X is the state vector.
### Similarly for Q'(s,a: w)= transpose(X(s,a)).W
# + id="j1ThA6kz6OLA"
#### WE UPDATE THE WEIGHTS OF THE APPROXIMATORS IN CASE OF VFA
### FOR VALUE FUNCTION:
### Monte-Carlo Update:
### We sample ((s1,G1), (s2,G2).................(sn,Gn))... for batch training
## W = W + alpha * (Gt - V'(st : w))dV'(st:w)/dw.................(1)
## W = W + alpha * (Gt - transpose(X(st)).W)).X(st)................(2)
### True for episodic setting, Noisy unbaised estimate.
### TD-Learning Update:
### Uses Bootstrap.
### We sample ((s1, r1, s'1),(s2, r2, s'2),.......(sn, rn, s'n))... for batch training
## W = W + alpha * ((rt + gamma*V(s't :w)) - V(st :w)).dV'(st:w)/dw...............(3)
### W = W + alpha * ((rt + gamma* transpose(X(s't)).W) - transpose(X(st)).W).dV'(st:w)/dw
### W = W + alpha * ((rt + gamma* transpose(X(s't)).W) - transpose(X(st)).W).X(st)......(4)
### True for non-episodic setting, baised estimate, no variance.
#### FOR STATE-ACTION VALUE FUNCTION:
### Monte-Carlo Update:
### We sample ((s1, a1, G1), (s2, a2, G2).................(sn, an, Gn))... for batch training
## W = W + alpha * (Gt - Q'(st, at : w))dQ'(st, at :w)/dw.................(1)
### SARSA Update:
### Uses Bootstrap.
### We sample ((s1, a1, r1, s'1, a'1),(s2, a2, r2, s'2, a_2),.......(sn, an, rn, s'n, a'n))... for batch training
## W = W + alpha * ((rt + gamma*Q'(s't,a't :w)) - Q'(st,at :w)).dQ'(st,at :w)/dw...............(2)
### Q-learning Update:
### Uses Bootstrap, Optimistic policy
### We sample ((s1, a1, r1, s'1),(s2, a2, r2, s'2),.......(sn, an, rn, s'n))... for batch training
## W = W + alpha * ((rt + gamma* max(Q'(s't, a :w))) - Q'(st,at :w)).dQ'(st,at :w)/dw...............(3)
### a = all actions in action space. So, the maximum value of all actions for that state.
# + id="YXS7wgKyaf8w"
### We use the Q learning based oracle in the DQN.
### The maximization bias exists as Q-learning uses a optimistic policy.
# + id="z40U4pI1cwJm"
### The above equation is for Linear VFA: V(s)=transponse(X(s)).W
### The linear VFA may have a some constraint, if the value function is non-linear in nature.
### So, we use a Deep Learning, which can replicate non-linear as well as linear functions.
### V(s)=g1(B1+ W1. transpose(g0(B0 + W0.transpose(X(s)))))
### where V(s) is the value function for state s. X(s) is the feature set, sent to a 2 layered Neural Network.
### g0, g1 are activation functions, W0, W1 and B0, B1 are the weights and biases of the layers of the Neural Networks.
# + id="vSGVh6xHgcoX"
### There are some problems using Neural Networks based VFA.
### 1. In case of Linear VFA has less correlations among data so it gives an unbaised estimate, but for Non-linear VFA using Neural Networks
### has a high degree correlations, so it may cause a biased learning process, so it will start performing better for the actions and states
### it has seen in maximum cases. So, to prevent this from happening, DQN uses an Experience Replay Buffer to reduce bias.
### The Prior experiences are stored in a buffer. (s,a,r,s') is stored in the buffer.
### The buffer is of a fixed size of 10k or 1M samples,, which are either the best or the recet samples, mostly the lattter is used.
### The idea is as we udate using different states, from experience, to prevent multiple occurence of the states.
### 2. As we have seen the in case of the Q learning VFA based update:
### W= W - alpha * (r + gamma* max(Q'(s',a :W))- (Q(s,a :W))).dQ'(s,a :w)/dw
### we can see that, the oracle and the training network has the same parameters. So, when the training network is updated
### the oracle also shifts, so the model tries to achieve a moving target, which creates instability,
### for the oracle we use a different parameterized network, and for the trainee we use a different.
### After a few training epochs we update the oracle with the trainee networks weights
## This makes the target fixed and increases stability.
### W= W - alpha * (r + gamma* max(Q'(s',a :W'))- (Q(s,a :W))).dQ'(s,a :W)/dW
## W is the training parameter, W' is the oracle parameter.
# + [markdown] id="-x694u8b33gM"
# ### DQN on Cart-Pole environment
# + id="Kg3nVD6A1DiR"
import gym
import tensorflow as tf
import numpy as np
from memory_module import replayBuffer
# + id="0DEyLiKU5AV6"
#### instantiating environment
#### instantiating replay-buffer to 100k samples size
env=gym.make('CartPole-v0')
env._max_episode_steps=400
memory=replayBuffer(100000)
# + id="3CJcGoXu9VMd"
class DQN:
def __init__(self,env,buffer):
self.env=env
self.buffer=buffer ### Replay buffer
self.state_dimension=env.observation_space.shape ### Input state dimension
self.no_of_action=env.action_space.n ### No of actions
self.learning_rate=0.01
self.gamma=0.99
self.optimizer=tf.keras.optimizers.RMSprop(lr=0.00025, rho=0.95, epsilon=0.01)
self.train_DQN=None #### Tranining network
self.fixed_DQN=None #### Oracle network
def get_model(self):
### Q = f(s,a: w)
state_input=tf.keras.layers.Input(self.state_dimension,name="State_input") ### state input
action_input=tf.keras.layers.Input((self.no_of_action,),name="Action_input") ### Action input
net=tf.keras.layers.Dense(256,activation='relu')(state_input)
net=tf.keras.layers.Dense(256,activation='relu')(net)
output=tf.keras.layers.Dense(self.no_of_action,name="function_out")(net)
### So, the model takes in the state representation as input and produces the Q values for the all the actions
### Then for each action, given by: action 1: [0 1], the [0 1] is multiplied with the output of the model in form [a1,a2]
### to get the output of corresponding to the action required. [a1, a2].[0, 1] = [0, a2]
Q_values=tf.multiply(output,action_input, name="masking_output")
model=tf.keras.Model(inputs=[state_input,action_input],outputs=[Q_values],name="DQN")
### array of the Q values is the final output of the model.
model.compile(loss="mse",optimizer=self.optimizer)
### as we want to minimize (Q[s,a]-Q'[s,a : w])^2 we use MSE.
return model
def update_fixed(self):
self.fixed_DQN.set_weights(self.train_DQN.get_weights())
### We will need to update the target or fixed networks with the trainee networks weight
### after a few epochs.
def get_epsilon(self,episode,steady_epsilon=0.01,steady_episode=100000):
#### Getting the epilon for the the greedy epsilon policy,
### epsilon linearly decays till the steady step and then becomes constant
if episode>steady_episode: ##If we are above the steady episode, we return a steady epsilon
return steady_epsilon
else:
slope=(steady_epsilon - 1)/(steady_episode - 0)
### Line (1,0) to (steady_epsilon,steady_episode)
### slope*episode will give us the decrease in the value of epsilon
### To get the value we add 1 to the value so it is (1 - decrease), as epsilon starts from 1.
return slope*episode + 1
def get_action(self,state,epsilon):
if np.random.random()<epsilon:
return np.random.randint(self.no_of_action)
### choosing random action with probability epsilon/|actions| for each.
else:
### State is given in the shape: array([-0.0213599 , -0.03238987, -0.0356761 , -0.0347844 ])
### as a 1D array, for each shape, we need to reshape it to provide a 2D array like:
### array([[-0.0213599 , -0.03238987, -0.0356761 , -0.0347844 ]])
reshaped_state=state.reshape(1,-1)
### We need to pick the action which provides maximum action. To get all actions Q values, we need
### to send 1 for all the actions. so in this case, the action input to the model should be: [1,1]
action_input=np.ones((1,self.no_of_action))
action_probs=self.train_DQN.predict([reshaped_state,action_input])
### Action_probs has dimension 2: [[a1, a2]] as: array([[-0.00160907, -0.00242554]], dtype=float32)
### We need to take the maximum of the of the results of the actions. so, we take np.argmax()
### But we take on the axis=1 as:
### in case there are mini-batches it is required to get the action for all the predictions.
### array([[-0.00242554, -0.00160907]], dtype=float32) for this action
### np.argmax(res_2,axis=0) => 1
### array([[-0.00160907, -0.00242554],
### [-0.00242554, -0.00160907]], dtype=float32) -> for this prediction
### np.argmax(res_2,axis=0) => 0 while,
### np.argmax(res_2,axis=1) => [0,1], so we take on axis =1
optimal_action=np.argmax(action_probs,axis=1)[0]
return optimal_action
def on_batch(self,s,a,r,s_,not_done,gamma=0.99):
### batch inputs
batch_size=s.shape[0]
## if s is of dimension (50,4). 50 is the batch size.
### as we know in q function, we take the maximum of the Q values for all the functions in the next_state.
### same as get_Action function, but here already in shape (,4) no need to reshape.
### the Q function is set using the target or fixed DQN.
action_probs=self.fixed_DQN.predict([s_,np.ones((batch_size,self.no_of_action))])
## Now the Q target
q_targets= r + gamma*np.multiply(not_done,np.max(action_probs,axis=1))
### Updated Q targets for all the states, and all the actions.
### If done, not done=0, for that state, only the rewards are considered.
#### Q_targets is of the shape [v1, v2, v3.... vn] ### where v1 is the q value updated, for that state.
### but to train the network, we need it in format, [[0,v1],[v2,0]...] considering for 1st sample, action 1
### was selected by the model, i.e, the value must be corresponding to the action for the state.
q_target_formatted=np.multiply(q_targets.reshape(-1,1),tf.keras.utils.to_categorical(a,self.no_of_action))
self.train_DQN.train_on_batch([s,tf.keras.utils.to_categorical(a,self.no_of_action)],q_target_formatted)
### Training for the state on which the action is taken.
def get_experience(self):
curr_state=self.env.reset()
for _ in range(50000):
### Creating 50k steps in experience to start the initial training
act=self.env.action_space.sample() ### initially we randomly sample from the action space.
next_state,reward,done,_=self.env.step(act) ### Taking actions
self.buffer.push(curr_state,act,reward,next_state,not done) ### Recording the details in buffer.
if done:
curr_state=self.env.reset() ### If done is 1, environment is reset.
else:
curr_state=next_state ### state is updated.
def train(self):
self.train_DQN=self.get_model()
self.fixed_DQN=self.get_model()
self.get_experience()
### All Initialization steps done
episode_reward=0
no_of_comp=0
curr_state=self.env.reset()
for step in range(1000000):
### training on 1M steps
act=self.get_action(curr_state,self.get_epsilon(step)) #### getting action according to current epsilon, and state
next_state,reward,done,_=self.env.step(act) ### Taking the action
episode_reward+=reward ## updating the reward for the step
self.buffer.push(curr_state,act,reward,next_state,not done) ### Pushing the details in the buffer.
### Size of the buffer is fixed. so it works on LRU or first in first out policy.
if done:
curr_state=self.env.reset()
if no_of_comp%50==0:
print('On step {}, no. of complete episodes {} episode reward {}'.format(step,no_of_comp,episode_reward))
episode_reward=0 ### Updating the reward to 0
no_of_comp+=1
else:
curr_state=next_state
if step%5000==0: ### after 5000 steps the fixed or target DQN is updated.
self.update_fixed()
if step%4==0: ### after training for 4 steps on the batch we sample new batch.
s,a,r,s_,nd=self.buffer.sample(32)
self.on_batch(s,a,r,s_,nd)
# + colab={"base_uri": "https://localhost:8080/"} id="xaOQ5wVW2DnJ" outputId="ea285bdd-95e7-4cfa-d1ac-3a81ff0e3cc9"
dqn=DQN(env,memory)
# + colab={"base_uri": "https://localhost:8080/"} id="cM6jYNHC2NZS" outputId="268b439e-a356-4f45-8587-98ede1be6e42"
dqn.train()
# + id="AA10YfcgaSTC"
### in this we have used action replay buffer to gather experience and train. The buffer uses a LRU
### or the most recent tuple stays, and the least recent is overwritten.
### A more stable version is created using a prioritized experienced replay buffer, i.e, instead of randomly stacking
### experience, it is a priority based experience replay buffer.
### in this case, less the bootstrapping error like TD error, more is the priority of the sample, and more the priority,
### more is the chance, the tuple stays in the buffer.
### It is priority scheduling instead of LRU.
| DQNs/Deep_Q_Network (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from tinyec import registry
import secrets
def compress(pubKey):
return hex(pubKey.x) + hex(pubKey.y % 2)[2:]
curve = registry.get_curve('brainpoolP256r1')
alicePrivKey = secrets.randbelow(curve.field.n)
alicePubKey = alicePrivKey * curve.g
print("Alice public key:", compress(alicePubKey))
bobPrivKey = secrets.randbelow(curve.field.n)
bobPubKey = bobPrivKey * curve.g
print("Bob public key:", compress(bobPubKey))
print("Now exchange the public keys (e.g. through Internet)")
aliceSharedKey = alicePrivKey * bobPubKey
print("Alice shared key:", compress(aliceSharedKey))
bobSharedKey = bobPrivKey * alicePubKey
print("Bob shared key:", compress(bobSharedKey))
print("Equal shared keys:", aliceSharedKey == bobSharedKey)
# +
curve = registry.get_curve('brainpoolP256r1')
alicePrivKey = secrets.randbelow(curve.field.n)
alicePubKey = alicePrivKey * curve.g
print("Alice public key:", compress(alicePubKey))
bobPrivKey = secrets.randbelow(curve.field.n)
bobPubKey = bobPrivKey * curve.g
print("Bob public key:", compress(bobPubKey))
print("Now exchange the public keys (e.g. through Internet)")
aliceSharedKey = alicePrivKey * bobPubKey
print("Alice shared key:", compress(aliceSharedKey))
bobSharedKey = bobPrivKey * alicePubKey
print("Bob shared key:", compress(bobSharedKey))
print("Equal shared keys:", aliceSharedKey == bobSharedKey)
# +
from ecies.utils import generate_eth_key
from ecies import encrypt, decrypt
import binascii
privKey = generate_eth_key()
privKeyHex = privKey.to_hex()
pubKeyHex = privKey.public_key.to_hex()
print("Encryption public key:", pubKeyHex)
print("Decryption private key:", privKeyHex)
plaintext = b'Some plaintext for encryption'
print("Plaintext:", plaintext)
encrypted = encrypt(pubKeyHex, plaintext)
print("Encrypted:", binascii.hexlify(encrypted))
decrypted = decrypt(privKeyHex, encrypted)
print("Decrypted:", decrypted)
# -
| python-playground/hkdf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prepare example data
#
# In order to illustrate the application of the tools contained here, some example datasets are provided. This Notebook outlines the pre-processing steps involved in preparing these datasets.
#
# The data will be drawn from the [Crime Open Database (CODE)](https://osf.io/zyaqn/), maintained by <NAME>. This collates crime data from a number of open sources in a harmonised format. Snapshots of this data for several years were downloaded in CSV format.
#
# The spatial data is provided in lat/lon format; here the PyProj library will be used to re-project the coordinates to metric units for distance calculations.
import pandas as pd
from pyproj import CRS, Transformer
# For the test data, data from the city of **Chicago** will be used, for the offence category '**residential burglary/breaking & entering**'. Data is concatenated for 2014-2017, inclusive.
data14 = pd.read_csv("../data/crime_open_database_core_2014.csv", parse_dates=['date_single'])
data15 = pd.read_csv("../data/crime_open_database_core_2015.csv", parse_dates=['date_single'])
data16 = pd.read_csv("../data/crime_open_database_core_2016.csv", parse_dates=['date_single'])
data17 = pd.read_csv("../data/crime_open_database_core_2017.csv", parse_dates=['date_single'])
data = pd.concat([data14, data15, data16, data17], axis=0)
data = data[data['city_name'] == "Chicago"]
data = data[data['offense_type'] == "residential burglary/breaking & entering"]
data.shape
# The total number of incidents across the 4 years is 45,319.
#
# The re-projection will use the [Illinois State Plane](http://www.spatialreference.org/ref/epsg/26971/) as the target reference system.
# +
wgs84 = CRS.from_epsg(4326)
isp = CRS.from_epsg(26971)
transformer = Transformer.from_crs(wgs84, isp)
x, y = transformer.transform(data["latitude"].values, data["longitude"].values)
data = data.assign(x=x, y=y)
# -
# Finally, save the derived data in minimal form.
data.to_csv("../data/chicago_burglary_2014_2017.csv",
columns=['x','y','date_single'],
date_format='%d/%m/%Y', index=False)
| notebooks/Prepare example data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import yfinance as yf
from scipy.stats import gmean
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
gstock_data = pd.read_csv('goog.csv')
gstock_data .head()
# This will download data from yahoo finance - more than enough for prototyping
gstock_data = yf.download(
# tickers list or string as well
tickers = "goog",
# use "period" instead of start/end
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# (optional, default is '1mo')
period = "5y",
# fetch data by interval (including intraday if period < 60 days)
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# (optional, default is '1d')
interval = "1d",
# group by ticker (to access via data['SPY'])
# (optional, default is 'column')
group_by = 'ticker',
# adjust all OHLC automatically
# (optional, default is False)
auto_adjust = True,
# download pre/post regular market hours data
# (optional, default is False)
prepost = True,
# use threads for mass downloading? (True/False/Integer)
# (optional, default is True)
threads = True,
# proxy URL scheme use use when downloading?
# (optional, default is None)
proxy = None
)
gstock_data .tail()
# +
gstock_data.info()
#gstock_data = gstock_data [['Date','Open','Close']]
#gstock_data ['Date'] = pd.to_datetime(gstock_data ['Date'])
#gstock_data .set_index('Date',drop=True,inplace=True)
gstock_data .head()
gstock_data['Close_Diff']=gstock_data['Close'].diff()
# +
fg, ax =plt.subplots(1,2,figsize=(20,7))
ax[0].plot(gstock_data['Open'],label='Open',color='green')
ax[0].set_xlabel('Date',size=15)
ax[0].set_ylabel('Price',size=15)
ax[0].legend()
ax[1].plot(gstock_data['Close'],label='Close',color='red')
ax[1].set_xlabel('Date',size=15)
ax[1].set_ylabel('Price',size=15)
ax[1].legend()
fg.show()
# -
from sklearn.preprocessing import MinMaxScaler
MMS = MinMaxScaler()
gstock_data [gstock_data.columns] = MMS.fit_transform(gstock_data )
training_size = round(len(gstock_data ) * 0.80)
# +
train_data = gstock_data [:training_size]
test_data = gstock_data [training_size:]
train_data.shape, test_data.shape
# +
# Function to create sequence of data for training and testing
def create_sequence(dataset):
sequences = []
labels = []
start_idx = 0
for stop_idx in range(50,len(dataset)): # Selecting 50 rows at a time
sequences.append(dataset.iloc[start_idx:stop_idx])
labels.append(dataset.iloc[stop_idx])
start_idx += 1
return (np.array(sequences),np.array(labels))
# -
train_seq, train_label = create_sequence(train_data)
test_seq, test_label = create_sequence(test_data)
train_seq.shape, train_label.shape, test_seq.shape, test_label.shape
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, Bidirectional
#pip install tensorflow
# +
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape = (train_seq.shape[1], train_seq.shape[2])))
model.add(Dropout(0.1))
model.add(LSTM(units=50))
model.add(Dense(6))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error'])
model.summary()
# -
model.fit(train_seq, train_label, epochs=80,validation_data=(test_seq, test_label), verbose=1)
# +
test_predicted = model.predict(test_seq)
test_predicted[:5]
# -
test_inverse_predicted = MMS.inverse_transform(test_predicted) # Inversing scaling on predicted data
test_inverse_predicted[:5]
gstock_data.info()
# +
# Merging actual and predicted data for better visualization
gs_slic_data = pd.concat([gstock_data.iloc[-202:].copy(),pd.DataFrame(test_inverse_predicted,columns=['open_predicted','high_predicted','low_predicted','close_predicted','volume_predicted'],index=gstock_data.iloc[-202:].index)], axis=1)
# +
gs_slic_data[['Open','High',"Low",'Close','Volume']] = MMS.inverse_transform(gs_slic_data[['Open','High',"Low",'Close','Volume']]) # Inverse scaling
# +
gs_slic_data.tail()
# +
gs_slic_data[['Open','open_predicted']].plot(figsize=(10,6))
plt.xticks(rotation=45)
plt.xlabel('Date',size=15)
plt.ylabel('Stock Price',size=15)
plt.title('Actual vs Predicted for open price',size=15)
plt.show()
# +
gs_slic_data[['Close','close_predicted']].plot(figsize=(10,6))
plt.xticks(rotation=45)
plt.xlabel('Date',size=15)
plt.ylabel('Stock Price',size=15)
plt.title('Actual vs Predicted for close price',size=15)
plt.show()
# +
# Creating a dataframe and adding 10 days to existing index
gs_slic_data = gs_slic_data.append(pd.DataFrame(columns=gs_slic_data.columns,index=pd.date_range(start=gs_slic_data.index[-1], periods=11, freq='D', closed='right')))
# -
gs_slic_data.tail(20)
gs_slic_data['2022-02-01':'2021-02-22']
upcoming_prediction = pd.DataFrame(columns=['Open','High','Low','Close','Volume'],index=gs_slic_data.index)
upcoming_prediction.index=pd.to_datetime(upcoming_prediction.index)
# +
curr_seq = test_seq[-1:]
for i in range(-10,0):
up_pred = model.predict(curr_seq)
upcoming_prediction.iloc[i] = up_pred
curr_seq = np.append(curr_seq[0][1:],up_pred,axis=0)
curr_seq = curr_seq.reshape(test_seq[-1:].shape)
# +
upcoming_prediction[['Open','High','Low','Close','Volume']] = MMS.inverse_transform(upcoming_prediction[['Open','High','Low','Close','Volume']])
# -
upcoming_prediction.tail(20)
# +
fg,ax=plt.subplots(figsize=(10,5))
ax.plot(gs_slic_data.loc['2022-01-12':,'Open'],label='Current Open Price')
ax.plot(upcoming_prediction.loc['2022-02-28':,'Open'],label='Upcoming Open Price')
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
ax.set_xlabel('Date',size=15)
ax.set_ylabel('Stock Price',size=15)
ax.set_title('Upcoming Open price prediction',size=15)
ax.legend()
fg.show()
# +
fg,ax=plt.subplots(figsize=(10,5))
ax.plot(gs_slic_data.loc['2021-04-01':,'Close'],label='Current close Price')
ax.plot(upcoming_prediction.loc['2021-04-01':,'Close'],label='Upcoming close Price')
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45)
ax.set_xlabel('Date',size=15)
ax.set_ylabel('Stock Price',size=15)
ax.set_title('Upcoming close price prediction',size=15)
ax.legend()
fg.show()
# -
| Test_LSTM_goog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Goals:
# - Talk about likelihood as a model fit metric in comparison with sum of squared error
# - Talk about different choice rules (Softmax, epsilon-greedy) and how they yield likelihood
# - Compute likelihood of a dataset given a model
# - Fit model to dataset using maximum likelihood
# ## Discussion: model fit metrics
# ##### Why do we need different model fit metrics than the summed squared error (SSE)?
# 1. Precision. We see in the example at the end of notebook 2 that many different values of alpha all give the same (minimum) model error. We might want more precision in the model prediction to arbitrate between these different optimal alphas. We can achieve this by predicting _probability distributions_ instead of _point predictions_ from the model and from those probability distributions computing the likelihood of an observed data point.
# 2. Convenience. Many choice models simply don't provide a point prediction, but only a probability distribution. This is the case in for instance drift-diffusion models, as well as common choice rules in RL such as the soft-max (more on that later). In these cases, a 'point prediction error' is in that case _harder_ to compute than the likelihood of an observation.
# 3. Optimality. Likelihood will allow us to express model fit as a probability. This is useful for Bayesian inference, where we want to be able to say e.g. what the likelihood of the data given the model is (i.e. the probability of observing exactly this data if your model were true). We can then integrate this with a prior probability that our model is true and make more informed inferences.
# 4. Realism. Models that produce probability distributions can account for noise/exploration/non-deterministic choice in datasets. This is adaptive in artifical agents!
#
# Note: You can in principle _convert_ SSE into likelihood by making some assumptions about the distribution of error around the point predictions of your model. But often it makes more sense to directly compute a probability distribution from your model.
# ## Load packages
import os, sys, glob, scipy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
base_dir = os.path.realpath('.')
print(base_dir)
# ## Writing choice rules
# These are from the previous notebook(s).
def greedy(utilities, params = []):
choice = np.where(utilities == np.max(utilities))[0][0]
return choice
def e_greedy(utilities, epsilon):
random_number = np.random.rand()
if random_number < epsilon:
# Choose randomly
choice = np.random.randint(0,2)
else:
# Choose deterministically
# choice = np.where(utilites == np.max(utilities))
choice = greedy(utilities)
return choice
def softmax_with_choice(utilities, beta):
# Beta is sometimes called 'inverse temperature' or 'explore parameter'
# Version for choice A only => then prob_B will be 1 - prob_A
prob_A = (np.exp(utilities[0] * beta) /
(np.exp(utilities[0] * beta) + np.exp(utilities[1] * beta)))
# General version (this works too, try it out!)
probs = (np.exp(np.multiply(utilities, beta)) /
(np.sum(np.exp(np.multiply(utilities, beta)))))
random_number = np.random.rand()
if random_number < prob_A:
choice = 0
else:
choice = 1
return choice
# ## Simulate some data for each choice rule
# ##### E-greedy
model = e_greedy
utilities = [6, 5.9] # just randomly chosen numbers
choices = []
epsilon = .9
for trial in range(10000):
choice = model(utilities, epsilon)
choices.append(choice)
plt.hist(choices);
plt.xlim([0,1])
# Try adjusting the value of epsilon. What happens to the choice distribution? How might this help us explain the behavior of an agent (an animal, a robot etc)?
# ##### Softmax
# +
utilities_list = [[i,10] for i in range(20)]
utilities_list
probabilities = []
beta = .7
for ut in range(len(utilities_list)):
choices = []
for trial in range(100):
choice = softmax_with_choice(utilities_list[ut], beta)
choices.append(choice)
probability = 1 - np.mean(choices)
probabilities.append(probability)
sns.scatterplot(np.diff(utilities_list).flatten(), probabilities)
# -
# ## Writing choice rules that produce probability distributions
# The key is that instead of simulating a given choice (e.g. softmax) rule a number of times and measuring how far the prediction tends to be from some obesrved data set, we can just return the probabilities of each possible choice that could be produced by the choice rule.
def softmax(utilities, beta):
probs = (np.exp(np.multiply(utilities, beta)) /
(np.sum(np.exp(np.multiply(utilities, beta)))))
return probs
probs_A = []
beta = 0.7
for utilities in utilities_list:
probs = softmax(utilities, beta)
probs_A.append(probs[0])
plt.plot(np.diff(utilities_list).flatten(), probs_A)
plt.xlabel('Utility difference B - A')
plt.ylabel('Probability choosing A')
# We can now see that under a given choice rule with a given parametrization, a given choice (e.g. A or B) gets assigned a different probability. We can use this to compute the probability of observing a number of sequential data points (i.e. a data set) under a particular model, by multiplying the probabilities of each data point under the model.
# ## Zooming out: using choice rules to compute the probability of an entire choice dataset
# 1. Load Joey's data again
# 2. Combine the inequity aversion model with the softmax choice rule
# 3. Compute the likelihood of a subject's data _set_ given different values for alpha (inequity aversion) and beta (softmax inverse temperature)
# 4. Finding the 'most likely' values for alpha and beta for a participant (= model fitting!)
# ##### Load some data
data_dir = base_dir + '/Data'
data = pd.read_csv(data_dir + '/Study1_UG.csv')
data = data[['sub','trial','unfairness','choice']]
data['offer'] = 100 - data['unfairness']
data.head()
# Single subject:
sub = 2
sub_data = data.query('sub == @sub').reset_index(drop=True)
sub_data.head()
plt.scatter(sub_data['unfairness'], sub_data['choice'])
# ##### Define our inequity aversion model again:
def inequity_aversion(offer, alpha):
# Inequity aversion model with one inequity term (= symmetric) and a money term
payoffs = np.array([offer, 0])
inequities = np.array([(100-offer)-offer, 0])
utilities = payoffs - np.multiply(alpha,inequities)
# choice = np.where(utilities == np.max(utilities))[0][0]
return utilities
# ##### Simulate data for inequity aversion + softmax with varying parameter values
def simulate_data(sub_data, alpha, beta):
simulated_data = pd.DataFrame(columns=['trial', 'offer', 'probability_accept'])
for trial in sub_data['trial'].unique():
trial_data = sub_data.query('trial == @trial')
offer = trial_data['offer'].values[0]
# Value function: inequity aversion ==> utilities
utilities = inequity_aversion(offer, alpha)
# Choice rule: softmax ==> probabilities
choice_probabilities = softmax(utilities, beta)
# Store results
simulated_data = simulated_data.append(pd.DataFrame(
[[trial, offer, choice_probabilities[0]]], columns = simulated_data.columns)).reset_index(drop=True)
return simulated_data
# Vary alpha and beta:
alphas = [i for i in np.arange(0,2,.4)]
betas = [i for i in np.arange(0,.51,.1)]
fig, axes = plt.subplots(nrows = len(betas), ncols = len(alphas), figsize = [13,13])
for ai, alpha in enumerate(alphas):
for bi, beta in enumerate(betas):
sim_data = simulate_data(sub_data, alpha, beta)
plot_data = sim_data.sort_values(by=['offer'])
axes[bi,ai].plot(plot_data['offer'], plot_data['probability_accept'])
axes[bi,ai].set(title = 'alpha = %.2f, beta = %.2f'%(alpha, beta), ylim = [0,1],
xlabel = 'offer', ylabel = 'Probability accept')
plt.tight_layout()
# ##### What do you see here? What happens as alpha goes up from 0? And what happens when beta goes up from 0?
# Both parameters change different aspects of the dataset. Alpha (the inequity aversion weight parameter) dictates how much unfairness the agent is willing to accept, with lower offers being accepted more when alpha is closer to 0. Beta (the inverse temperature parameter in the softmax) dictates how much noise is expected in the choice, with more noise when beta is closer to 0.
# The crucial thing to realize here is that under different values of alpha and beta, different behavior is differently likely. For example, suppose we knew the _true_ alpha and beta of an agent (for example because the agent is a robot that we programmed ourselves). Now if this true alpha = 1.6 and beta = 0.5, it is _very unlikely_ that the agent would always accept every offer (just look at the bottom-right panel of the figure above). On the other hand, it alpha = 0 and beta = 0.5, it is actually _pretty likely_ that the agent would always accept every offer. Therefore, if we observe an agent who accepts every offer in an experiment, it is _more likely_ that (alpha = 0 and beta = 0.5) than that (alpha = 1.6 and beta = 0.5).
#
# ##### Here are some implications of this intuition:
# 1. If we have a model that spits out choice probabilities, we can compute the likelihood of some data under that model. In other words, we can compute _the probability of observing this particular data set if the model were true_.
# 2. This is NOT (!) the same as _the probability that the model is true_. The difference becomes important (and useful) when you start applying Bayesian inference, in which the likelihood of a model given some observed data P(M|D) is a function of both the likelihood of the observed data given that model P(D|M) and the prior likelihood of the model P(M) - but this is beyond the scope of the current tutorial. For now, we will just work with the notion that the probability of a model being true is proportional to the probability that this model would have produced the data we observed in our experiment.
# 3. Finding the model or model parameters that produce the highest probability of observing the data set is known as _maximum likelihood estimation_. It is a model fitting strategy.
# 4. The more data we have, the better we can distinguish between models and model parameters, since each candidate model is required to explain ALL data points at once.
# ## Fitting a model using maximum likelihood - in practice:
# ##### Create log-likelihood function that computes the log-likelihood of some data given a model + model parameters
# This function will compute (the log of) the probability that the model we choose would have produced _precisely the observed data set_. It does so by computing the probability that the model would have produced one trial's data point, and then multiplying all these probabilities for all trials.
# We take the log of this probability since the probability that a model produces _precisely the observed data set_ is almost always going to be very small since it's a product of many trial probabilities between 0 and 1. Our computers don't like working with these super small numbers. To make matters worse, the number will only shrink with an increasing number of data points, but of course we don't want to discourage collecting more data! Therefore, we just take the log of each probability and sum over these logs, which is equivalent to taking the log over the product of all probabilities.
def compute_loglike(data, alpha, beta):
# For each trial, compute the probability of observing THIS PARTICULAR BEHAVIOR given alpha and beta
trial_probs = []
for trial in data['trial'].unique():
trial_data = data.query('trial == @trial')
offer = trial_data['offer'].values[0]
choice = trial_data['choice'].values[0]
utilities = inequity_aversion(offer, alpha)
choice_probabilities = softmax(utilities, beta)
prob_of_observed = choice_probabilities[choice]
trial_probs.append(prob_of_observed)
# Multiply the probabilities of all trials to get the likelihood of the whole dataset –– here use the log trick!
loglike = np.sum(np.log(trial_probs))
return loglike
# ##### Evaluate the log-likelihood of the subject's data for a variety of alphas and betas
loglike_results = pd.DataFrame()
alphas = np.round(np.arange(0, 2, .05), decimals = 2) # Get weird precision issue if I don't do this
betas = np.round(np.arange(0, 1, .05), decimals = 2)
for alpha in alphas:
for beta in betas:
print('(%.2f,%.2f) | '%(alpha,beta), end = '')
loglike = compute_loglike(sub_data, alpha, beta)
loglike_results = loglike_results.append(
pd.DataFrame([[alpha, beta, loglike]],
columns = ['alpha', 'beta', 'loglike'])
).reset_index(drop=True)
# ##### Find best-fitting parameter pair
maxlike = max(loglike_results['loglike'])
bestmodel = loglike_results.query('loglike == @maxlike')
alpha, beta = bestmodel[['alpha','beta']].iloc[0] # Take the first row, in case there are multiple solutions
print('Best parameters: alpha = %.2f, beta = %.2f'%(alpha,beta))
# ##### Plot
heatmap_data = loglike_results.pivot(
index = 'beta', columns = 'alpha', values = 'loglike')
hm = sns.heatmap(heatmap_data,
cmap = sns.color_palette("coolwarm", 200),
# cmap = sns.color_palette("cubehelix", 200),
# cmap = sns.cubehelix_palette(100, start=-.5, rot = 2, reverse=True),
cbar_kws={'label': 'Log likelihood'})
# Annotate the best parameter combination (can be more than 1):
for ri,row in bestmodel.iterrows():
plt.scatter(.5 + np.where(alphas == row['alpha'])[0][0],
.5 + np.where(betas == row['beta'])[0][0],
marker = 'o', facecolor = 'w', edgecolor = 'k', s = 50,
label = 'best-fitting solution')
plt.legend(loc = [1.2, .9]);
# From this plot (the so-called _likelihood surface_ ) it can be seen that under alpha = .35 and beta = .1, the observed data (subject 2) was more likely than under, say, alpha = 1 and beta = .9. This is NOT the same as saying that if alpha = .35 and beta = .1 it is would be most likely to observe subject 2's data! Just that alpha = .35 and beta = .1 are the best combination of parameters for producing data that looks like subject 2's data. This means that between the parameter sets we have compared here, alpha = .35 and beta = .1 is the most likely one. Alpha = .35 and beta = .1 is thus the maximum likelihood estimate for this subject's data.
# ##### 3d version
from mpl_toolkits.mplot3d import Axes3D
x = loglike_results['alpha'].values
y = loglike_results['beta'].values
z = loglike_results['loglike'].values
# %matplotlib notebook
ax = plt.axes(projection = '3d')
ax.scatter3D(x, y, z, c=z, cmap='jet')
# ##### Meshgrid solution:
alpha = np.unique(loglike_results['alpha'].values)
beta = np.unique(loglike_results['beta'].values)
ALPHA, BETA = np.meshgrid(alpha, beta)
Z = np.mat(loglike_results['loglike'])
Z = Z.reshape(len(alpha), len(beta))
Z = [[loglike_results.query('alpha == @i and beta == @j')['loglike'].iloc[0]
for i in alpha] for j in beta]
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.contour3D(ALPHA, BETA, np.array(Z))
# Who knows how to fix this?
# ##### Simulate the model at the maximum likelihood solution
simulated_data = simulate_data(sub_data, alpha, beta)
simulated_data['probability_reject'] = 1 - simulated_data['probability_accept']
simulated_data.head()
# +
fig, ax = plt.subplots(nrows = 1, ncols = 2, figsize = [10,5])
# Choice over trial
sns.scatterplot(data = simulated_data, x = 'trial', y = 'probability_reject', ax = ax[0])
sns.scatterplot(data = sub_data, x = 'trial', y = 'choice', ax = ax[0])
# Choice by offer
sns.scatterplot(data = simulated_data, x = 'offer', y = 'probability_reject', ax = ax[1])
sns.scatterplot(data = sub_data, x = 'offer', y = 'choice', ax = ax[1])
# -
# ##### Plot a simulation associated with low log likelihood
simulated_data = simulate_data(sub_data, 1.7, 0.9)
simulated_data['probability_reject'] = 1 - simulated_data['probability_accept']
# +
fig, ax = plt.subplots(nrows = 1, ncols = 2, figsize = [10,5])
# Choice over trial
sns.scatterplot(data = simulated_data, x = 'trial', y = 'probability_reject', ax = ax[0])
sns.scatterplot(data = sub_data, x = 'trial', y = 'choice', ax = ax[0])
# Choice by offer
sns.lineplot(data = simulated_data, x = 'offer', y = 'probability_reject', ax = ax[1])
sns.scatterplot(data = sub_data, x = 'offer', y = 'choice', ax = ax[1],
color = sns.color_palette('tab10')[1])
# -
# ## Posterior predictive check
# Steps:
# 1. Simulate data from the winning model 1000 times
# 2. Visualize the distribution of simulated choices per offer amount
# 3. Overlay the true behavior of the subject
# 4. Test whether the subject is in or outside the 95% confidence interval
# ##### Simulate a lot of times
n_iterations = 1000
alpha, beta = [0.35, .1]
print(alpha, beta)
all_simulated_choices = pd.DataFrame()
for i in range(n_iterations):
if np.mod(i,100)==0:
print(i)
simulated_data = simulate_data(sub_data, alpha, beta)
# Create predictions by evaluating the probability
simulated_data['choice'] = simulated_data['probability_accept'] < np.random.rand(len(simulated_data))
avg_choice = simulated_data.groupby('offer', as_index=False).mean()
avg_choice['iteration'] = i
all_simulated_choices = all_simulated_choices.append(avg_choice[['iteration','offer','choice']])
fig, ax = plt.subplots(nrows = 1, ncols = 2, figsize = [10,5])
sns.scatterplot(data = sub_data.groupby('offer', as_index=False).mean(),
x = 'offer', y = 'choice', ax = ax[0])
sns.pointplot(data = all_simulated_choices, x = 'offer', y = 'choice', ci = 'sd', ax = ax[1])
ax[0].set(title = 'Observed behavior', xlabel = 'Offer', ylabel = 'Choice', ylim = [0,1])
ax[1].set(title = 'S.d. of simulations', xlabel = 'Offer', ylabel = 'Choice', ylim = [0,1])
# ## Next session:
# 1. Find maximum-likelihood parameters for a model using gradient descent algorithms (based on SSE or LL)
# 2. Model comparison using AIC and BIC
# 3. Model and parameter recovery
# ## Then:
# 1. Summarize what we've seen so far. Take a step back and talk about why it's useful what we've done and what we're still lacking. What do we still need?
# 2. Other model types - RL, DDM etc
| 3.Choice_rules_and_likelihood_cleaned.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Spyder)
# language: python3
# name: python3
# ---
# # Potential field extrapolation
# This notebook shows how to continue a magnetic field from a certain level from a certain level. Note that here the field is random below that point, while in a real case the field should already be close to potential, to avoud artificial currents at the interface.
# We need FFTs and random numbers
import numpy as np
from numpy import fft, random
n=16
b=random.random((3,n,n,n))
(b**2).mean()
# Check that the norm is conserved
b2=fft.ifft2(fft.fft2(b))
(b2**2).mean()
# Get the wavenumbers, corresponding to a box of size 1.
k=fft.fftfreq(n)*n*2*np.pi
k
# Set the boundary level to 12
i2_bdry=12
bx=b[0,:,:,:]
by=b[1,:,:,:]
bz=b[2,:,:,:]
bz0=bz[:,:,i2_bdry]
bz0.shape
# The extrapolation is based on the FFT of Bz at the boundary:
bz0_fft=fft.fft2(bz0)
# Compute the vertical and horizontal components of the potential field
bx_fft=np.zeros((n,n),dtype=np.complex)
by_fft=np.zeros((n,n),dtype=np.complex)
bz_fft=np.zeros((n,n),dtype=np.complex)
kk=np.zeros((n,n))
dz=1/n
for i2 in range(i2_bdry,n):
for i1 in range(n):
k1=k[i1]
for i0 in range(n):
k0=k[i0]
k2=np.sqrt(k0**2+k1**2)
bz_fft[i0,i1]=bz0_fft[i0,i1]*np.exp(-k2*dz*(i2-i2_bdry))
if k2>0:
phi=-bz_fft[i0,i1]/k2
else:
phi=0.0
bx_fft[i0,i1]=1j*k0*phi
by_fft[i0,i1]=1j*k1*phi
kk[i0,i1]=k2
#print(i2,k0,k1,bx_fft[i0,i1])
bx[:,:,i2]=np.real(fft.ifft2(bx_fft))
by[:,:,i2]=np.real(fft.ifft2(by_fft))
bz[:,:,i2]=np.real(fft.ifft2(bz_fft))
print(i2,bx[:,:,i2].max())
# Bz(z) become smoother with height, as high-k components are damped out
import pylab as pl
pl.figure(figsize=(12,5))
pl.subplot(1,3,1)
pl.imshow(bz[:,:,13].T,origin='lower'); pl.title('Bz(13)');
pl.subplot(1,3,2)
pl.imshow(bz[:,:,14].T,origin='lower'); pl.title('Bz(14)');
pl.subplot(1,3,3)
pl.imshow(bz[:,:,15].T,origin='lower'); pl.title('Bz(15)');
# Verify that the signs of Bx and By are correct, relative to decreasing Bz
pl.figure(figsize=(12,5))
pl.subplot(1,3,1)
pl.imshow(bx[:,:,15].T,origin='lower'); pl.title('Bx');
pl.subplot(1,3,2)
pl.imshow(by[:,:,15].T,origin='lower'); pl.title('By');
pl.subplot(1,3,3)
pl.imshow(bz[:,:,15].T,origin='lower'); pl.title('Bz');
# **NOTE 1**: If derivatives of B are needed, they can of course be obtained from finite differences, but also analytically
# **NOTE 2**: If the field is only for use in guard zones it shold be fine as is, but if it is to be made compatible with a numerical div(B)=0 requirement one can leave the horizontal components as they are, and adjust the Bz field to make it exactly div-free
| docs/overview/IAC/HandsOn/4-Potential-Magnetic-Fields/PotentialBC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Passo 0 - O que desejamos obter com as analises
# Quais das variaves numericas influenciam mais no consumo médio das pessoas no ano
#
# Queremos saber qual o melhor meio de negócio
# ## Passo1 - Bibliotecas
# +
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn import metrics
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import r2_score
# -
# #### Passo 1.1 - Lendo o arquivo
#
# Nesta etapa é importante verificar como o arquivo esta disposto, observar suas colunas e linhas e qual a qualidade dos dados
# se possue lacunas
#
# Trabalharemos com o arquivo csv do Ecommerce Customers da empresa. Possui informações do cliente, como Email, Endereço e sua cor Avatar. Em seguida, ele também possui colunas de valores numéricos:.
#
# * Avg. Session Length: Tempo médio das sessões de consultoria de estilo na loja.
# * Time on App: tempo médio gasto no app em minutos.
# * Time on Website: tempo médio gasto no site em minutos.
# * Lenght of Membership: Há quantos anos o cliente é membro.
#
# ## Os dados deste arquivos não são reais
clientes = pd.read_csv("Ecommerce Customers")
clientes.head()
variavel_mais_importante = clientes['Yearly Amount Spent']
variavel_mais_importante.head()
clientes.describe()
clientes.info()
cliente.columns
clientes.count()
# ## Passo 2 - Análise de dados exploratória
# #### Website
sns.jointplot(x='Time on Website', y='Yearly Amount Spent', data=clientes)
# sns.jointplot(x=clientes['Time on Website'], y=clientes['Yearly Amount Spent'])
# +
# Em geral o tempo de pesquisa das pessoas no website esta entre 36 a 37 minutos tendo um valume entre 400 a n
# Entretano como os dados estão muito dispersos não conseguimos obter uma correlação linear
# Podemos ver que mesmo que a pessoa seja membro a muito tempo isso não faz com que ela passe mais tempo no website
# -
# #### App
sns.jointplot(x='Time on App', y='Yearly Amount Spent', data=clientes)
# +
# Para o App o tempo dedicado dos clientes é mais espaçado contendo uma leve intessificação entre 11 a 13 minutos com valume de
# 400 a 600
# Podemos obserbar uma certa correlação linear, mostrando que clientes mais antigos passam mais tempo no App
# -
sns.jointplot(x='Time on App', y='Length of Membership', data=clientes, kind='hex')
# #### Analise geral com parplot
# **Queremos saber quais variaveis tem as melhores correlações com 'Yearly Amount Spent**
# '
sns.pairplot(clientes)
# #### Gráfico com reta de regressão utilisando lmplot
plt.figure(figsize=(18,8))
sns.lmplot(x='Length of Membership', y='Yearly Amount Spent',data=clientes)
# ## Passo 3 - Criando o modelo
X= clientes[['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']]
# Todos os dados numericos
y = clientes['Yearly Amount Spent']
X_train, X_test, y_train,y_test =train_test_split(X, y, test_size=0.3,random_state=101)
# +
# Separando os dados de treino e teste
# -
# ## Passo 4 - Treinando o modelo
model_lr = LinearRegression() # Instancia
model_lr.fit(X_train,y_train)
# ## Passo 5 - Testando os dados
# #### 5.1 - Parametros da equação
coeficientes_c = model_lr.coef_
coef = pd.DataFrame(coeficientes_c, X.columns, columns=['Coefs n'])
coef
m = model_lr.intercept_
# #### 5.2 - Previsão dos dados
hipotese = model_lr.predict(X_test)
hipotese.size
y_test.count()
df1 = {'y_test':y_test,'Hipótese': model_lr.predict(X_test)}
analise_previsao = pd.DataFrame(df1)
analise_previsao # Como podemos observar os valores da coluna hipótese esta muito proximo do y_test
plt.figure(figsize=(12,6))
plt.scatter(hipotese,y_test)
plt.title('Regressão Linerar simples')
plt.xlabel('y_test')
plt.ylabel('Hipótese')
plt.figure(figsize=(12,6))
sns.lmplot(x='Hipótese', y='y_test',data=analise_previsao)
# #### 5.3 - Resíduos
#
# Você deve ter obtido um modelo muito bom com um bom ajuste.
# +
plt.figure(figsize=(12,6))
sns.histplot((y_test-hipotese), bins=50,kde=True)
# Podemos notar que obtivemos aproximadamente uma distribuição normal
# -
# #### 5.4 - Testes de erros
print('MAE:', metrics.mean_absolute_error(y_test, hipotese))
print('MSE:', metrics.mean_squared_error(y_test, hipotese))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, hipotese)))
print('MAPE', mean_absolute_percentage_error(y_test, hipotese)*100,'%')
print('R²:',r2_score(y_test, hipotese)*100,'%')
# # Passo 6 - Conclusão
coef
# Podemos observar que para cada unidade de tempo médio gasto na sessão com o estilista resulta em um aumento anual médio de
# aproximadamente 26um
#
#
# Podemos observa também que a loja deve-se consentrar principalmente em meios de fidelizar seus clientes, pois o tempo média que a pessoa é cliente retorna quase o dobre do website mais o aplicativo, resultando em um aumente de aproximadamente 61.3um
#
# O tempo gasto no App se mostra muito mais rentavem para a loja em comparação ao website
| ModeloLinearComercioEletronicoSiteVSApp/1 Modelo_linear_Ecommerce Customers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import libraries
import requests ## library to pull down from the web
from bs4 import BeautifulSoup ## library that parses raw data into what we need
import csv ## library that translates data between python and csv
import re ## regular expressions
import os.path ## library that tracks files on your computer
# -
# url to scrape
url = "https://www.uscourts.gov/judges-judgeships/judicial-compensation"
# ## Special Proxy Settings:
# #### (required if scraping from Bloomberg Corp)
# +
## Uncomment if working on Bloomberg Corp
# proxies = {
# 'http': 'http://proxy.bloomberg.com:80',
# 'https': 'https://proxy.bloomberg.com:80'
# }
# +
## Uncomment if working on Bloomberg Corp
## GET grabs the raw material on the server and stores in an object called page.
# page = requests.get(url, proxies=proxies)
# -
# ## Non Bloomberg Corp
# +
## Uncomment if working on non Bloomberg Corp
## GET grabs the raw material on the server and stores in an object called page.
# page = requests.get(url)
# -
# ## Back to Platform Agnostic Code
# +
## You can see it is something we have never seen before
# -
## We now use BeautifulSoup to translate that raw object into HTML with an HTML.parser
## Prettify to make it easier to read
## Most importantly we see this not datatype we have seen before
## it's a BeautifulSoup object
## we want to target just the table on this page since it holds all the content we are looking for
## but we get a different table than anticpated.
## remove sup tag in 2014 and other years
## We can be more specific in case there were multiple tables on this page.
## we can target the class of table
## What type of object is "table"?
## Now lets find column headers
## When you print our header, it looks like what?
## function to lowercase, strip and underscore header labels
## Don't worry about the re. part. We will cover in detail in a few weeks.
def sanitize_label(label):
"""This function takes a string and lowercases it; strips it of trailing and leading spaces; and puts underscores between words"
"""
value = label.lower().replace(":", "").strip()
value = re.sub(r'[^A-Za-z0-9]+', '_', value)
return value
## function to turn string dollars into integers
def dollars(amount):
value = int(amount.replace("$", "").replace(",", "_"))
return value
## store labels in list
# find ALL rows and place into specific variables for each data point.
## Place list into a list of dictionaries called judges_salaries
# +
#csv file to be created with data
csv_file_name = "federal_judges_salaries.csv"
file_exists = os.path.isfile(csv_file_name)
try:
with open(csv_file_name, 'a') as file:
writer = csv.DictWriter(file, fieldnames = labels )
if not file_exists:
writer.writeheader()
for salary in judges_salaries:
writer.writerow(salary)
except IOError:
print("sorry, some error...grrrr")
print(f"CSV file named {csv_file_name} is ready. Find it in your project folder!")
# -
| in-class-exercises/week-5 in-class single page - judicial pay.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Generate a mapping file with UUIDs for use in openLCA
# 1. Run cell 1 to generate the upload button. Select your mapping file by clicking upload. The mapping file must be csv file that conforms to the mapping standard.
# 2. Run cell 2 to add UUIDs. Conversion factors will be applied for any mappings that change units for which the alternate unit exists within the FEDEFL. The outcome will be printed on screen.
# 3. Run cell 3 to generate a link for downloading the json mapping file (.zip) for use in openLCA
#
# _To restart, select Kernel --> "Restart and Clear Output"_
# +
## Step 1
import pandas as pd
import fedelemflowlist
from fedelemflowlist.globals import flowmappingpath, flowmapping_fields
import os
import ipywidgets as widgets
from IPython.display import display, FileLink
if not os.path.exists('./scratch'):
os.mkdir('./scratch')
uploader = widgets.FileUpload(
accept = '.csv'
,multiple = False
)
display(uploader)
# -
# ### Select the upload button above to upload your csv mapping file
# Then proceed to the next cell
# +
## Step 2
if not uploader.value:
raise ValueError('Input Excel file is required to continue. Make sure to select the Upload button above before continuing.')
input_file = next(iter(uploader.value))
input_full = r"./scratch/" + input_file
if os.path.exists(input_full):
os.remove(input_full)
with open(input_full,"wb") as fp:
fp.write(uploader.value[input_file]['content'])
mapping = pd.read_csv(input_full)
mapping_length = len(mapping)
print(str(mapping_length) + ' flows in the mapping file.')
all_flows = fedelemflowlist.get_flows()
all_flows = all_flows[['Flowable', 'Context', 'Flow UUID', 'Unit']]
mapping_w_flowinfo = pd.merge(mapping, all_flows,
left_on=['TargetFlowName', 'TargetFlowContext', 'TargetUnit'],
right_on=['Flowable', 'Context', 'Unit'])
mapping_w_flowinfo = mapping_w_flowinfo.drop(columns=['TargetFlowUUID', 'Flowable',
'Context', 'Unit'])
mapping_w_flowinfo = mapping_w_flowinfo.rename(columns={'Flow UUID': 'TargetFlowUUID'})
mapping_merged_len = len(mapping_w_flowinfo)
if mapping_length > mapping_merged_len:
print("Not all flows were mapped to flows in the list")
print('UUIDs added for ' + str(mapping_merged_len) + ' flows.')
conversions = fedelemflowlist.get_alt_conversion()
# merge in conversion factors where source unit = alternate unit
mapping_w_conversion = pd.merge(mapping_w_flowinfo, conversions, how='left',
left_on=['TargetFlowName', 'SourceUnit', 'TargetUnit'],
right_on=['Flowable', 'AltUnit', 'Unit'])
# update conversion factor where current conversion is 1 and the updated conversion exists
converted1 = mapping_w_conversion['InverseConversionFactor'].notnull()
converted2 = mapping_w_conversion['ConversionFactor']==1
mapping_w_conversion['Convert']=converted1 & converted2
mapping_w_conversion.loc[(mapping_w_conversion['Convert']==True),
'ConversionFactor']=mapping_w_conversion['InverseConversionFactor']
converted = mapping_w_conversion['Convert'].sum()
print('Added conversion factors for ' + str(converted) + ' flows.')
mapping_w_conversion = mapping_w_conversion.drop(columns=['Flowable','Unit',
'AltUnit','AltUnitConversionFactor',
'InverseConversionFactor', 'Convert'])
flowmapping_order = list(flowmapping_fields.keys())
mapping_w_conversion = mapping_w_conversion[flowmapping_order]
print(mapping_w_conversion)
# +
## Step 3
output_file = 'flows_w_mappings.zip'
output_full = r"scratch/" + output_file
if os.path.exists(output_full):
os.remove(output_full)
mapping_flow_uuids = pd.DataFrame(pd.unique(mapping_w_conversion['TargetFlowUUID']),columns=["Flow UUID"])
#Get all flows
all_flows = fedelemflowlist.get_flows()
#Subset all flows to get just those used in selected mapping
flows_used_in_mapping = pd.merge(all_flows,mapping_flow_uuids)
fedelemflowlist.write_jsonld(flows_used_in_mapping,output_full,mapping_w_conversion)
print("mapping file created as json")
download = FileLink(output_full,result_html_prefix="Click here to download: ")
display(download)
| Jupyter/run_mappings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.021829, "end_time": "2021-12-19T23:23:31.180832", "exception": false, "start_time": "2021-12-19T23:23:31.159003", "status": "completed"} tags=[]
# <h1 style='text-align: center'>Predict House Popularity In New York 🥇</h1>
#
# <p style='text-align: center'>
# This notebook is in <span style='color: green; font-weight: 700'>Active</span> state of development!
# <a style='font-weight:700' href='https://github.com/LilDataScientist'> Code on GitHub! </a></p>
# + [markdown] papermill={"duration": 0.020175, "end_time": "2021-12-19T23:23:31.223489", "exception": false, "start_time": "2021-12-19T23:23:31.203314", "status": "completed"} tags=[]
# <div style='text-align: center'>
# <img src='https://media.istockphoto.com/photos/winter-in-manhattan-picture-id1292824324?b=1&k=20&m=1292824324&s=170667a&w=0&h=GpVutoJrAAYP_h_ddXm_hIR1_22WebYn3ym6jz6hRNQ=' width='1000' />
# </div>
# + [markdown] papermill={"duration": 0.01964, "end_time": "2021-12-19T23:23:31.263469", "exception": false, "start_time": "2021-12-19T23:23:31.243829", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Import Dependencies</h1>
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 1.835098, "end_time": "2021-12-19T23:23:33.118478", "exception": false, "start_time": "2021-12-19T23:23:31.283380", "status": "completed"} tags=[]
import catboost
import optuna
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, PolynomialFeatures
from sklearn.preprocessing import StandardScaler, MultiLabelBinarizer
from sklearn.metrics import balanced_accuracy_score, make_scorer
from sklearn.utils.class_weight import compute_class_weight
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA
from catboost import CatBoostClassifier, Pool, EShapCalcType, EFeaturesSelectionAlgorithm
# + [markdown] papermill={"duration": 0.022384, "end_time": "2021-12-19T23:23:33.163959", "exception": false, "start_time": "2021-12-19T23:23:33.141575", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Turn Off Warnings</h1>
# + [markdown] papermill={"duration": 0.022852, "end_time": "2021-12-19T23:23:33.210506", "exception": false, "start_time": "2021-12-19T23:23:33.187654", "status": "completed"} tags=[]
# I came across such common warnings as setting to the copy of pandas dataframe
# + papermill={"duration": 0.030659, "end_time": "2021-12-19T23:23:33.262887", "exception": false, "start_time": "2021-12-19T23:23:33.232228", "status": "completed"} tags=[]
import warnings
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')
pd.options.mode.chained_assignment = None
np.random.seed(0)
sns.set_style("dark")
# + [markdown] papermill={"duration": 0.02285, "end_time": "2021-12-19T23:23:33.307931", "exception": false, "start_time": "2021-12-19T23:23:33.285081", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Load Dataset</h1>
# + papermill={"duration": 1.304922, "end_time": "2021-12-19T23:23:34.634928", "exception": false, "start_time": "2021-12-19T23:23:33.330006", "status": "completed"} tags=[]
df = pd.read_csv('../input/housepricingtg2021/train.csv')
df
# + papermill={"duration": 0.993275, "end_time": "2021-12-19T23:23:35.654097", "exception": false, "start_time": "2021-12-19T23:23:34.660822", "status": "completed"} tags=[]
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
sns.barplot(x="TARGET", y="bedrooms", data=df, ax=ax[0]);
sns.barplot(x="TARGET", y="bedrooms", data=df, ax=ax[1]);
# + papermill={"duration": 0.506959, "end_time": "2021-12-19T23:23:36.183618", "exception": false, "start_time": "2021-12-19T23:23:35.676659", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(15, 5))
sns.barplot(x="TARGET", y="price", data=df, ax=ax);
# + papermill={"duration": 0.40483, "end_time": "2021-12-19T23:23:36.613400", "exception": false, "start_time": "2021-12-19T23:23:36.208570", "status": "completed"} tags=[]
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
N = 10000
ax.scatter(df['longitude'].values[:N], df['latitude'].values[:N],
color='blue', s=1, label='train', alpha=0.1)
fig.suptitle('Houses locations')
ax.legend(loc=0)
ax.set_ylabel('latitude')
ax.set_xlabel('longitude')
plt.ylim((40.65, 40.9))
plt.xlim((-74.05, -73.8))
plt.show()
# + [markdown] papermill={"duration": 0.02436, "end_time": "2021-12-19T23:23:36.663156", "exception": false, "start_time": "2021-12-19T23:23:36.638796", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Extra classes for feature engineering</h1>
# + [markdown] papermill={"duration": 0.025609, "end_time": "2021-12-19T23:23:36.714018", "exception": false, "start_time": "2021-12-19T23:23:36.688409", "status": "completed"} tags=[]
# Since such columns as **description, display_address, features and street_address** looks like a bunch of strings we would like to gain some information form them, probably one hot them, and for doing it I will introduce **BaseExtracter** class that will extract words from string of strings and **BaseMultiLabelBinarizer** that will one hot all these features!
# + papermill={"duration": 0.03846, "end_time": "2021-12-19T23:23:36.778636", "exception": false, "start_time": "2021-12-19T23:23:36.740176", "status": "completed"} tags=[]
class BaseExtracter(BaseEstimator, TransformerMixin):
def __init__(self, column, extraction_function):
self.extraction_function = extraction_function
self.column = column
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_ = X.copy()
X_[self.column] = X_[self.column].apply(self.extraction_function)
return X_
class BaseMultiLabelBinarizer(BaseEstimator, TransformerMixin):
def __init__(self, column):
self.mlb = MultiLabelBinarizer()
self.column = column
def fit(self, X, y=None):
self.mlb.fit(X[self.column])
return self
def transform(self, X, y=None):
X_ = X.copy()
encoded = pd.DataFrame(self.mlb.transform(X_[self.column]), columns=self.mlb.classes_)
X_ = pd.concat([X_.reset_index(), encoded], axis=1)
X_.drop(columns=['index', self.column], inplace=True)
return X_
# + [markdown] papermill={"duration": 0.025549, "end_time": "2021-12-19T23:23:36.830221", "exception": false, "start_time": "2021-12-19T23:23:36.804672", "status": "completed"} tags=[]
# Since it is much easier to deal with pandas, we will implement our **PandasSimpleImputer**. The point of doing this was that **sklearn.impute.SimpleImputer** returns numpy array while for this particular task we want our data to be in pandas format
# + papermill={"duration": 0.034205, "end_time": "2021-12-19T23:23:36.890241", "exception": false, "start_time": "2021-12-19T23:23:36.856036", "status": "completed"} tags=[]
class PandasSimpleImputer(SimpleImputer):
def fit(self, X, y=None):
self.columns = X.columns
return super().fit(X, y)
def transform(self, X):
return pd.DataFrame(super().transform(X), columns=self.columns)
# + [markdown] papermill={"duration": 0.024487, "end_time": "2021-12-19T23:23:36.940618", "exception": false, "start_time": "2021-12-19T23:23:36.916131", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Feature Engineering</h1>
# + [markdown] papermill={"duration": 0.024198, "end_time": "2021-12-19T23:23:36.989834", "exception": false, "start_time": "2021-12-19T23:23:36.965636", "status": "completed"} tags=[]
# Let's add some Nonlinearity to our model by adding more features.
# + papermill={"duration": 0.042786, "end_time": "2021-12-19T23:23:37.057880", "exception": false, "start_time": "2021-12-19T23:23:37.015094", "status": "completed"} tags=[]
class GenerateMoreFeatures(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
coords = np.vstack((X[['latitude', 'longitude']].values))
self.pca = PCA().fit(coords)
return self
def transform(self, X, y=None):
X_ = X.copy()
X_[f'pca{0}'] = self.pca.transform(X_[['latitude', 'longitude']])[:, 0]
"""Price per bedroom and bathroom"""
X_['price_per_bedroom'] = X_["price"] / X_["bedrooms"]
X_["price_per_bathroom"] = X_["price"] / X_["bathrooms"]
X_['price_per_bedroom'][X_['price_per_bedroom'] == np.Inf] = 0
X_['price_per_bathroom'][X_['price_per_bathroom'] == np.Inf] = 0
X_.drop(columns=['bedrooms','bathrooms'], inplace=True)
"""Seller and Building popularity"""
building_ids = X_['building_id'].value_counts()
manager_ids = X_['manager_id'].value_counts()
X_['manager_ids_count'] = X_['manager_id'].apply(lambda x: manager_ids[x] if x in manager_ids else 0)
X_['building_ids_count'] = X_['building_id'].apply(lambda x: building_ids[x] if x in building_ids else 0)
X_.drop(columns=['manager_id','building_id'], inplace=True)
"""Date and time"""
X_["created"] = X_["created"].astype("datetime64")
X_['Weekday'] = X_.created.dt.weekday
X_['day_of_month'] = X_.created.dt.day
X_['hour'] = X_.created.dt.hour
X_['is_weekend'] = X_.created.apply(lambda x: 1 if x.date().weekday() in (5, 6) else 0)
X_['month'] = X_.created.dt.month
X_['week'] = X_.created.dt.isocalendar().week
X_['minute'] = X_['created'].dt.minute
X_['pickup_week_hour'] = X_['Weekday'] * 24 + X_['hour']
timestamp = pd.Timestamp('2016-06-29 18:30:41')
X_['days_since_last'] = X_.created.apply(lambda x: (timestamp - x).days)
X_.drop(columns=['created'], inplace=True)
return X_
# + [markdown] papermill={"duration": 0.026262, "end_time": "2021-12-19T23:23:37.110720", "exception": false, "start_time": "2021-12-19T23:23:37.084458", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Pipeline</h1>
# + [markdown] papermill={"duration": 0.024169, "end_time": "2021-12-19T23:23:37.159637", "exception": false, "start_time": "2021-12-19T23:23:37.135468", "status": "completed"} tags=[]
# Now we are ready to build a pipeline for our particular problem. For such encoded features as **display_address, features and street_address** we will use **VarianceThreshold** which basically removes elements by variance threshold> If you would skip this turn you would end up having million of features!
# + papermill={"duration": 0.037823, "end_time": "2021-12-19T23:23:37.221506", "exception": false, "start_time": "2021-12-19T23:23:37.183683", "status": "completed"} tags=[]
features = make_pipeline(PandasSimpleImputer(strategy='constant', fill_value='xxx'),
BaseExtracter('features', lambda x: x[1:-1].replace("'", "").replace(" ", "").split(',')),
BaseMultiLabelBinarizer('features'),
VarianceThreshold(threshold=0.01))
display_address = make_pipeline(PandasSimpleImputer(strategy='constant', fill_value='xxx'),
BaseExtracter('display_address', lambda x: x.split(' ')),
BaseMultiLabelBinarizer('display_address'),
VarianceThreshold(threshold=0.01))
street_address= make_pipeline(PandasSimpleImputer(strategy='constant', fill_value='xxx'),
BaseExtracter('street_address', lambda x: x.split(' ')),
BaseMultiLabelBinarizer('street_address'),
VarianceThreshold(threshold=0.01))
special_features = make_pipeline(GenerateMoreFeatures(), StandardScaler())
polynomial_features = make_pipeline(PolynomialFeatures(2), StandardScaler())
columns = ColumnTransformer(transformers=[
('Special Features', special_features, ['price', 'bedrooms', 'bathrooms',
'building_id', 'manager_id', 'created',
'latitude', 'longitude', 'listing_id']),
('Polynomial Features', polynomial_features, ['price', 'bedrooms', 'bathrooms',
'latitude', 'longitude', 'listing_id']),
('Features', features, ['features']),
('Display Address', display_address, ['display_address']),
('Street Address', street_address, ['street_address']),
])
# + [markdown] papermill={"duration": 0.024282, "end_time": "2021-12-19T23:23:37.270109", "exception": false, "start_time": "2021-12-19T23:23:37.245827", "status": "completed"} tags=[]
# Now we are ready to split our data and start training
# + papermill={"duration": 0.059021, "end_time": "2021-12-19T23:23:37.353907", "exception": false, "start_time": "2021-12-19T23:23:37.294886", "status": "completed"} tags=[]
X, y = df.loc[:, df.columns != 'TARGET'], df.loc[:, 'TARGET']
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=0)
# + papermill={"duration": 11.194626, "end_time": "2021-12-19T23:23:48.575864", "exception": false, "start_time": "2021-12-19T23:23:37.381238", "status": "completed"} tags=[]
X_train = columns.fit_transform(X_train);
X_test = columns.transform(X_test)
features_num = X_train.shape[1]
print(f'Generated {features_num} features')
# + [markdown] papermill={"duration": 0.02424, "end_time": "2021-12-19T23:23:48.625149", "exception": false, "start_time": "2021-12-19T23:23:48.600909", "status": "completed"} tags=[]
# Dealing with class imbalance problem
# + papermill={"duration": 0.042213, "end_time": "2021-12-19T23:23:48.691748", "exception": false, "start_time": "2021-12-19T23:23:48.649535", "status": "completed"} tags=[]
classes = np.unique(y_train)
weights = compute_class_weight(class_weight='balanced', classes=classes, y=y_train)
class_weights = dict(zip(classes, weights))
class_weights
# + [markdown] papermill={"duration": 0.024452, "end_time": "2021-12-19T23:23:48.740988", "exception": false, "start_time": "2021-12-19T23:23:48.716536", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Feature Selection</h1>
# + [markdown] papermill={"duration": 0.02498, "end_time": "2021-12-19T23:23:48.791304", "exception": false, "start_time": "2021-12-19T23:23:48.766324", "status": "completed"} tags=[]
# Since while some features are good other may not be like that and that's why we need to select the ones which perfoms the best for our model
# + papermill={"duration": 156.551447, "end_time": "2021-12-19T23:26:25.367954", "exception": false, "start_time": "2021-12-19T23:23:48.816507", "status": "completed"} tags=[]
features_select = 90
clf = CatBoostClassifier(class_weights=class_weights, verbose=0,
task_type="GPU", devices='0', random_seed=0,
iterations=2000, depth=5, l2_leaf_reg=4,
leaf_estimation_method='Newton')
train_pool = catboost.Pool(data=X_train, label=y_train)
test_pool = catboost.Pool(data=X_test, label=y_test)
summary = clf.select_features(train_pool, eval_set=test_pool,
features_for_select=f'0-{features_num - 1}',
num_features_to_select=features_select,steps=5,
logging_level='Silent',
algorithm=EFeaturesSelectionAlgorithm.RecursiveByShapValues,
shap_calc_type=EShapCalcType.Regular, train_final_model=False);
selected_features = summary['selected_features']
X_train_subset = X_train[:, selected_features]
X_test_subset = X_test[:, selected_features]
# + [markdown] papermill={"duration": 0.024938, "end_time": "2021-12-19T23:26:25.418498", "exception": false, "start_time": "2021-12-19T23:26:25.393560", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Hyperparameters Tuning</h1>
# + [markdown] papermill={"duration": 0.024512, "end_time": "2021-12-19T23:26:25.467663", "exception": false, "start_time": "2021-12-19T23:26:25.443151", "status": "completed"} tags=[]
# Since we are using **Gradient Boosting** we have to choose between different depth for trees, number of iterations and other important hyperparameters
# + papermill={"duration": 354.575357, "end_time": "2021-12-19T23:32:20.067542", "exception": false, "start_time": "2021-12-19T23:26:25.492185", "status": "completed"} tags=[]
def objective(trial):
depth = trial.suggest_int("rf_max_depth", 3, 6, log=True)
l2_leaf_reg = trial.suggest_int("l2_leaf_reg", 2, 30, log=True)
iterations = trial.suggest_discrete_uniform("iterations", 2000, 3000, q=500)
all_scores = []
for seed in np.random.randint(0, 100, size=(5)):
clf = CatBoostClassifier(class_weights=class_weights, verbose=0,
task_type="GPU", devices='0', random_seed=seed,
iterations=iterations, depth=depth,
l2_leaf_reg=l2_leaf_reg)
clf.fit(X_train_subset, y_train)
score = balanced_accuracy_score(y_test, clf.predict(X_test_subset))
all_scores.append(score)
average_score = sum(all_scores) / len(all_scores)
return average_score
sampler = optuna.samplers.TPESampler(seed=0)
study = optuna.create_study(sampler=sampler, direction='maximize')
study.optimize(objective, n_trials=5)
study.best_params
# + [markdown] papermill={"duration": 0.039261, "end_time": "2021-12-19T23:32:20.146470", "exception": false, "start_time": "2021-12-19T23:32:20.107209", "status": "completed"} tags=[]
# <h1 style='background-color: #dae8fc; border: 1px solid #94add0; padding: 10px; font-weight: 400; text-align:center'>Train final model and submit to competition</h1>
# + papermill={"duration": 13.859271, "end_time": "2021-12-19T23:32:34.045376", "exception": false, "start_time": "2021-12-19T23:32:20.186105", "status": "completed"} tags=[]
clf = CatBoostClassifier(class_weights=class_weights, verbose=0,
task_type="GPU", devices='0', random_seed=42,
iterations=2500, depth=4,
l2_leaf_reg=5)
clf.fit(X_train_subset, y_train);
# + papermill={"duration": 4.319006, "end_time": "2021-12-19T23:32:38.405299", "exception": false, "start_time": "2021-12-19T23:32:34.086293", "status": "completed"} tags=[]
df_test = pd.read_csv('../input/housepricingtg2021/test.csv')
ID = df_test['Id']
df_test = columns.transform(df_test)
new_df_test = df_test[:, selected_features]
preds = clf.predict(new_df_test, prediction_type='Class')
preds = le.inverse_transform(preds)
output = pd.DataFrame({'Id': ID,'TARGET': preds})
output.to_csv('submission.csv', index=False)
| New York House Popularity Prediction/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="h2q27gKz1H20"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="TUfAcER1oUS6"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="Gb7qyhNL1yWt"
# # Flower classification with TensorFlow Lite Model Maker with TensorFlow 2.0
# + [markdown] colab_type="text" id="nDABAblytltI"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/lite/codelabs/flower_classification/ml/Flower_Classification_with_TFLite_Model_Maker.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
# Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/lite/codelabs/flower_classification/ml/Flower_Classification_with_TFLite_Model_Maker.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
# View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="m86-Nh4pMHqY"
# Model Maker library simplifies the process of adapting and converting a TensorFlow neural-network model to particular input data when deploying this model for on-device ML applications.
#
# This notebook shows an end-to-end example that utilizes this Model Maker library to illustrate the adaption and conversion of a commonly-used image classification model to classify flowers on a mobile device.
# + [markdown] colab_type="text" id="bcLF2PKkSbV3"
# ## Prerequisites
#
# To run this example, we first need to install serveral required packages, including Model Maker package that in github [repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).
# + colab={} colab_type="code" id="6cv3K3oaksJv"
# !pip install git+git://github.com/tensorflow/examples.git#egg=tensorflow-examples[model_maker]
# + [markdown] colab_type="text" id="Gx1HGRoFQ54j"
# Import the required packages.
# + colab={} colab_type="code" id="XtxiUeZEiXpt"
import numpy as np
import tensorflow as tf
assert tf.__version__.startswith('2')
from tensorflow_examples.lite.model_maker.core.data_util.image_dataloader import ImageClassifierDataLoader
from tensorflow_examples.lite.model_maker.core.task import image_classifier
from tensorflow_examples.lite.model_maker.core.task.model_spec import mobilenet_v2_spec
from tensorflow_examples.lite.model_maker.core.task.model_spec import ImageModelSpec
import matplotlib.pyplot as plt
# + [markdown] colab_type="text" id="KKRaYHABpob5"
# ## Simple End-to-End Example
# + [markdown] colab_type="text" id="SiZZ5DHXotaW"
# ### Get the data path
#
# Let's get some images to play with this simple end-to-end example. Hundreds of images is a good start for Model Maker while more data could achieve better accuracy.
# + cellView="form" colab={} colab_type="code" id="3jz5x0JoskPv"
image_path = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
# + [markdown] colab_type="text" id="a55MR6i6nuDm"
# You could replace `image_path` with your own image folders. As for uploading data to colab, you could find the upload button in the left sidebar shown in the image below with the red rectangle. Just have a try to upload a zip file and unzip it. The root file path is the current path.
#
# <img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_image_classification.png" alt="Upload File" width="800" hspace="100">
# + [markdown] colab_type="text" id="NNRNv_mloS89"
# If you prefer not to upload your images to the cloud, you could try to run the library locally following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker) in github.
# + [markdown] colab_type="text" id="w-VDriAdsowu"
# ### Run the example
# The example just consists of 4 lines of code as shown below, each of which representing one step of the overall process.
#
# + [markdown] colab_type="text" id="6ahtcO86tZBL"
# 1. Load input data specific to an on-device ML app. Split it to training data and testing data.
# + colab={} colab_type="code" id="lANoNS_gtdH1"
data = ImageClassifierDataLoader.from_folder(image_path)
train_data, test_data = data.split(0.9)
# + [markdown] colab_type="text" id="Y_9IWyIztuRF"
# 2. Customize the TensorFlow model.
# + colab={} colab_type="code" id="yRXMZbrwtyRD"
model = image_classifier.create(train_data)
# + [markdown] colab_type="text" id="oxU2fDr-t2Ya"
# 3. Evaluate the model.
# + colab={} colab_type="code" id="wQr02VxJt6Cs"
loss, accuracy = model.evaluate(test_data)
# + [markdown] colab_type="text" id="eVZw9zU8t84y"
# 4. Export to TensorFlow Lite model.
# You could download it in the left sidebar same as the uploading part for your own use.
# + colab={} colab_type="code" id="Zb-eIzfluCoa"
model.export(export_dir='.', with_metadata=True)
# + [markdown] colab_type="text" id="pyju1qc_v-wy"
# After this simple 4 steps, we can now download the model and label files, and continue to the next step in the [codelab](https://codelabs.developers.google.com/codelabs/recognize-flowers-with-tensorflow-on-android/#4).
#
# For a more comprehensive guide to TFLite Model Maker, please refer to this [notebook](https://colab.sandbox.google.com/github/tensorflow/examples/blob/master/tensorflow_examples/lite/model_maker/demo/image_classification.ipynb) and its [documentation](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).
#
| lite/codelabs/flower_classification/ml/Flower_Classification_with_TFLite_Model_Maker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TF-IDF (Term Frequancy - Inverse Document Frequancy)
# A problem with scoring word frequency is that highly frequent words start to dominate in the document, but may not contain as much ***informational content*** to the model as rarer but perhaps domain specific words.
#
# One approach is to rescale the frequency of words by how often they appear in all documents, so that the scores for frequent words like “the” that are also frequent across all documents are penalized.
#
# This approach to scoring is called ***Term Frequency – Inverse Document Frequency***, or TF-IDF for short, where:
#
# * Term Frequency: is a scoring of the frequency of the word in the current document.
# * Inverse Document Frequency: is a scoring of how rare the word is across documents.
# * The scores are a weighting where not all words are equally as important or interesting.
#
#
# Without going into the math, TF-IDF are word frequency scores that try to highlight words that are more interesting, e.g. frequent in a document but not across documents.
# ## Implementing TF-IDF Model
# ### Load Libraries
# +
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import re
from sklearn.feature_extraction.text import TfidfVectorizer
# -
english_text = """Perhaps one of the most significant advances in made by Arabic mathematics began at this time with the work of al-Khwarizmi, namely the beginnings of algebra. It is important to understand just how significant this new idea was. It was a revolutionary move away from the Greek concept of mathematics which was essentially geometry. Algebra was a unifying theory which allowedrational numbers,irrational numbers, geometrical magnitudes, etc., to all be treated as \"algebraic objects\". It gave mathematics a whole new development path so much broader in concept to that which had existed before, and provided a vehicle for future development of the subject. Another important aspect of the introduction of algebraic ideas was that it allowed mathematics to be applied to itselfin a way which had not happened before."""
english_text
arabic_text =u"""ربما كانت أحد أهم التطورات التي قامت بها الرياضيات العربية التي بدأت في هذا الوقت بعمل الخوارزمي وهي بدايات الجبر, ومن المهم فهم كيف كانت هذه الفكرة الجديدة مهمة, فقد كانت خطوة نورية بعيدا عن المفهوم اليوناني للرياضيات التي هي في جوهرها هندسة, الجبر کان نظرية موحدة تتيح الأعداد الكسرية والأعداد اللا كسرية, والمقادير الهندسية وغيرها, أن تتعامل على أنها أجسام جبرية, وأعطت الرياضيات ككل مسارا جديدا للتطور بمفهوم أوسع بكثير من الذي كان موجودا من قبل, وقم وسيلة للتنمية في هذا الموضوع مستقبلا. وجانب آخر مهم لإدخال أفكار الجبر وهو أنه سمح بتطبيق الرياضيات على نفسها بطريقة لم تحدث من قبل"""
arabic_text
# ### Data Cleaning
# #### Text to sentences
english_sentences = nltk.sent_tokenize(english_text)
arabic_sentences = nltk.sent_tokenize(arabic_text)
print(len(english_sentences), 'English paragraphs')
print(len(arabic_sentences), 'Arabic paragraphs')
english_sentences
# #### Clean English Text
# +
WordNet = WordNetLemmatizer()
english_corpus = []
for i in range(len(english_sentences)):
# work with only text
cleaning_text = re.sub('[^a-zA-Z]', ' ', english_sentences[i])
# text to lower case
cleaning_text = cleaning_text.lower()
# tokenize each sentence
cleaning_text = cleaning_text.split()
# lematize each word
sentence_lem = [WordNet.lemmatize(word) for word in cleaning_text if not word in set(stopwords.words("english"))]
sentence = ' '.join(sentence_lem)
english_corpus.append(sentence)
# -
english_corpus
# #### Clean Arabic Text
# +
WordNet = WordNetLemmatizer()
arabic_corpus = []
for i in range(len(arabic_sentences)):
# tokenize each sentence
cleaning_text = arabic_sentences[i].split()
# lematize each word
sentence_lem = [WordNet.lemmatize(word) for word in cleaning_text if not word in set(stopwords.words("arabic"))]
sentence = ' '.join(sentence_lem)
arabic_corpus.append(sentence)
# -
# # TF - IDF
# ### Create the transform
vectorizer = TfidfVectorizer()
vectorizer.fit(english_corpus)
# ### Summarize
print(vectorizer.vocabulary_)
print(vectorizer.idf_)
# ### Encode document
vectors= vectorizer.transform(english_corpus)
# summarize encoded vector
print(vectors.shape)
print(vectors.toarray())
vectorizer.get_feature_names()
import pandas as pd
pd.DataFrame(vectors.toarray(), columns=vectorizer.get_feature_names())
# ### Advantages:
#
# - Easy to compute
# - You have some basic metric to extract the most descriptive terms in a document
# - You can easily compute the similarity between 2 documents using it
# ### Disadvantages:
# - TF-IDF is based on the bag-of-words (BoW) model, therefore it does not capture position in text, semantics, co-occurrences in different documents, etc.
# - For this reason, TF-IDF is only useful as a lexical level feature
# - Cannot capture semantics (e.g. as compared to topic models, word embeddings)
# ### Printing Dependencies
# ### Printing Dependencies
# %load_ext watermark
# %watermark --iversion
| TF - IDF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda3]
# language: python
# name: conda-env-anaconda3-py
# ---
# # Data and Figures for Centrality Study
# +
# %pylab inline
import pandas as pd
import scipy.sparse as sp
from scipy.sparse import linalg
from collections import defaultdict
from plotting import PLOT_STYLE
# -
from eventgraphs import EventGraph
# ## Communicability
# +
# Generate a synthetic dataset with a preferred node
np.random.seed(1)
events = []
N = 20
M = 1000
t = 0
nodes = np.arange(N)
for ix in range(M):
t = t+1
u,v = np.random.choice(nodes, size=2, replace=False)
events.append((u,v,t))
# If u=0 we force a second immediate path.
if u == 0:
t = t+1
w = v
while w == v:
w = np.random.choice(nodes, size=1, replace=False)
events.append((v,w,t))
events = pd.DataFrame(events, columns=['source', 'target', 'time']).astype(int)
# +
EG = EventGraph.from_pandas_eventlist(events, graph_rules='pfg')
def path_finder(e1,e2):
dt = e2.time - e1.time
if e1.target == e2.source:
return True, dt
else:
return False, dt
EG.event_graph_rules['event_processor'] = path_finder
EG.event_graph_rules['subsequential'] = False
EG.build(verbose=True)
# +
def generate_node_event_matrix(eventgraph, kind='source'): # General method to be included in eventgraphs.
"""Creates a node and event incidence matrix, whether a node is a source or target in an event."""
X = []
Y = []
for ix, row in eventgraph.events.iterrows():
Y.append(ix)
X.append(row[kind])
data = np.ones_like(X)
A = sp.coo_matrix((data, (X,Y)), shape=(eventgraph.N, eventgraph.M), dtype=int)
return A.tocsc()
def generate_rankings(EG, alpha, beta):
"""Calculates the event communicability matrix and broadcast score for each node. """
T = EG.D
Im = np.ones(EG.M)
I = sp.eye(EG.M, format='csc')
X = generate_node_event_matrix(EG)
Y = EG.generate_eg_matrix()
Y.data = np.array([np.exp(-beta*x) for x in Y.data])
M = alpha*linalg.inv(I - alpha*Y)
e = M.dot(Im)
b = X.dot(e)
return pd.Series(b), pd.Series(e)
def percentage_difference(b, focus=0):
"""Calculates the percentage gain that the focus node has over or under the second best (or first best if under)."""
ranks = b.sort_values(ascending=False)
if ranks.index[0] == focus:
return ((ranks.iloc[0] - ranks.iloc[1])/ranks.iloc[1])
else:
return ((ranks.loc[focus] - ranks.iloc[0])/ranks.iloc[0])
# +
x = np.linspace(0.1,0.9,9)
y = np.linspace(0,1,11)
P = defaultdict(dict)
store = defaultdict(dict)
for alpha in x:
for beta in y:
print(alpha,beta, end='\r')
b, e = generate_rankings(EG, alpha, beta)
P[alpha][beta] = percentage_difference(b)
store[alpha][beta] = (b,e)
# +
X = pd.DataFrame(P)
with plt.style.context(['seaborn-paper', PLOT_STYLE]):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect='equal')
im = ax.imshow(X.values, vmin=-0.7, vmax=0.7, cmap=cm.RdBu, origin=0)
fig.colorbar(im)
ax.set_xticks(np.linspace(0,8,9))
ax.set_xticklabels(np.linspace(0.1,0.9,9));
ax.set_xlabel(r'$\alpha$')
ax.set_yticks(np.linspace(1,11,11))
ax.set_yticklabels(np.linspace(0.1,1,10));
ax.set_ylabel(r'$\beta$')
ax.set_ylim(0.5,10.5)
fig.savefig('./figures/communicability_parameters.eps', bbox_inches='tight')
# +
X = generate_node_event_matrix(EG)
examples = defaultdict(dict)
for alpha, beta in [(0.8, 0.30000000000000004),(0.2, 0.8)]:
b, e = store[alpha][beta]
cent = e[X[0].nonzero()[1]]
special_node = cent.value_counts(normalize=False).sort_index().cumsum()
if P[alpha][beta] > 0:
node = b.nlargest().index[1]
else:
node = b.nlargest().index[0]
cent = e[X[node].nonzero()[1]]
other_node = cent.value_counts(normalize=False).sort_index().cumsum()
examples[(alpha,beta)]['special'] = special_node
examples[(alpha,beta)]['other'] = other_node
# -
with plt.style.context(['seaborn-paper', PLOT_STYLE]):
fig, (ax1,ax2) = plt.subplots(1,2,sharey=True, figsize=(10,4))
for ((alpha,beta), centralities), axis in zip(examples.items(), [ax1,ax2]):
centralities['special'].plot(logx=False, logy=False, ax=axis, label='$u^*$')
centralities['other'].plot(logx=False, logy=False, ax=axis, label='$u^c$')
axis.set_ylabel("Number of Events")
axis.set_xlabel("Event Centrality")
axis.set_ylim(0,70)
if axis==ax1:
axis.set_xticks([alpha, 1.5*alpha, 2*alpha, 2.5*alpha])
axis.set_xticklabels([r"$\alpha$", r"$1.5 \alpha$", r"$2.0 \alpha$", r"$2.5 \alpha$"])
axis.text(0.9,0.03, s='(a)', transform=axis.transAxes)
else:
axis.set_xticks([alpha, 1.05*alpha, 1.10*alpha, 1.15*alpha])
axis.set_xticklabels([r"$\alpha$", r"$1.05 \alpha$", r"$1.10\alpha$",r"$1.15\alpha$"])
axis.text(0.9,0.03, s='(b)', transform=axis.transAxes)
if axis==ax2:
axis.legend(loc='best', fontsize=14)
fig.savefig('./figures/communicability_event_distributions.eps', bbox_inches='tight')
| examples/advances_and_applications_paper/centrality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="WutUwTFo2e4j"
# #Lab.10 / IBM3202 – Conformational changes using Structure-Based Models
# + [markdown] id="FQMdCpigVK-X"
# ###Theoretical aspects
#
# The **energy landscape theory** and the **principle of minimum frustration** in protein folding have provided the theoretical basis for the generation of simplified models to simulate the pathways of protein folding of different proteins. Noteworthy, recent work has demonstrated their utility for simulating other functionally relevant phenomena, such as **protein misfolding and conformational changes** associated to biological function. Most of these applications have been generated through savvy and careful combinations of the native bonded and non-bonded terms from **two or more structures deposited in the PDB** in two or more conformational states (i.e. open and closed conformations, alpha and beta states, etc).
# + [markdown] id="mD5dj8UMSqEC"
# <figure>
# <center>
# <img src='https://raw.githubusercontent.com/pb3lab/ibm3202/master/images/smogdual_01.png'/>
# <figcaption>FIGURE 1. Modeling the conformational transitions of adenylate kinase. This enzyme undergoes a >25 Å motion between open (red, PDB 4AKE) and closed (green, PDB 1AKE) states due to ligand binding. The relative occupation of the closed and open states can be tuned to experimental data by varying the strength of the subset of contacts only existing in the closed state between 0.6 (red) to 1.2 (black) relative to the open contacts <br> Noel JK & Onuchic JN (2012) <i> Computational Modeling of Biological Systems, 31-54</i></figcaption></center>
# </figure>
# + [markdown] id="eiopP7EdVjqt"
# ##Experimental Overview
# + [markdown] id="xkpCp93eQsUS"
# In this tutorial we will exemplify how we can **combine the native contacts of two different structures** to simulate the conformational change of adenylate kinase, an enzyme that has three domains (LID, NMP and core) and catalyzes the phosphorylation reaction of AMP, using ATP, generating 2 molecules of ADP as product:
#
# <figure>
# <center>
# <img src='https://raw.githubusercontent.com/pb3lab/ibm3202/master/images/smogdual_02.png'/>
# </center>
# </figure>
#
# This reaction requires a severe conformational change, which can be seen from the structures of the protein in the presence (1AKE) and in the absence (4AKE) of substrates.
# + [markdown] id="buTRd47h4QfK"
# #Part 0. Downloading and Installing the required software
# + [markdown] id="KSAoowL_4XIV"
# Before we start, **remember to start the hosted runtime** in Google Colab.
#
# Then, we must install several pieces of software to perform this tutorial. Namely:
# - **biopython** for manipulation of the PDB files
# - **py3Dmol** for visualization of the protein structure.
# - **cpanm** for installation of several Perl utilities required to run SMOG2
# - **SMOG2** for generating our structure-based models
# - **SBM-enhanced GROMACS** for preparing our MD system and performing our MD simulations.
#
# **⚠️WARNING:** Remember that you MUST have compiled and installed the SBM-enhanced version of GROMACS before this tutorial! This installation takes **~40 min** on Google Colab.
#
# For visualizing our MD trajectories, we will employ a web version of **NGLview**. This is due to the inability of Google Colab to handle a required python package for loading NGLview directly onto Google Colab. Hopefully this will change in the near future.
#
# 1. First, we will start by downloading and setting up SMOG2 on Google Colab, which requires the installation of several Perl utilities using cpanminus
#
# **NOTE**: This installation takes ~10 min. If possible, perform this installation before the tutorial session starts.
# + id="Xe4I0mjh8X_h"
#Downloading and extracting SMOG2 from the SMOG-server
# !wget http://smog-server.org/smog2/smog2.dailybuild.tgz
# !tar zxf smog2.dailybuild.tgz
# + id="8zf7oP-wC2Um"
#Automatic configuration of cpan for Perl
# !echo y | cpan
#Installing cpanm for easy installation of Perl utilities
# !cpan App::cpanminus
#Installing all required Perl utilities for SMOG2
# !cpanm String::Util #--local-lib $nb_path
# !cpanm XML::Simple #--local-lib $nb_path
# !cpanm Exporter #--local-lib $nb_path
# !cpanm PDL #--local-lib $nb_path
# !cpanm XML::Validator::Schema #--local-lib $nb_path
# + id="EJuuoeQP4nsB"
#Download a preconfigured SMOG2 file and test the installation
# %%bash
# rm /content/smog2.dailybuild/configure.smog2
wget -P /content/smog2.dailybuild https://github.com/pb3lab/ibm3202/raw/master/files/configure.smog2
source /content/smog2.dailybuild/configure.smog2
smog2 -h
# + [markdown] id="qcf5VLtv3PKY"
# 2. Then, we will set up our SBM-enhanced GROMACS on Google Colab, based on your previously compiled and installed SBM-enhanced GROMACS readily available in your Google Drive.
# + id="hLoWxW4grDM4"
#First, we will mount your Google Drive to be able to transfer files
#You will be requested to authorize this Google Drive mounting
#By entering an authorization code into an input box and then pressing Enter
from google.colab import drive
drive.mount('/content/gdrive')
# + id="TDx5pCg_rKzK"
#Copying your compiled SBM-enhanced GROMACS back to Google Colab
#Here, we are assuming that the SBM-enhanced GROMACS was copied into the IBM3202 folder
#Specifically, in the IBM3202/gromacs-5.0.4-sbm folder
import os
import shutil
from pathlib import Path
IBM3202 = Path("/content/gdrive/My Drive/IBM3202/")
#Then, we will check if GROMACS exists in this folder, and if it does, copy it
if os.path.exists(IBM3202/'gromacs-5.0.4-sbm'):
print("A compiled version of GROMACS for Google Colab exists!")
print("Copying GROMACS onto Google Colab")
# #!cp -d -r "$IBM3202"/gromacs-5.0.4-sbm /content/gromacs-5.0.4-sbm
shutil.copytree(str(IBM3202/'gromacs-5.0.4-sbm'), str('/content/gromacs-5.0.4-sbm'))
if not os.path.exists(IBM3202/'gromacs-5.0.4-sbm'):
print("A compiled version of GROMACS for Google Colab does not exists!")
print("Please compile and install GROMACS")
# + id="VLypHY40f2Ga"
#Sometimes, files are copied from Drive onto Colab with restricted permissions.
#This command fixes it!
import sys
import stat
os.chmod("/content/gromacs-5.0.4-sbm/bin/gmx", stat.S_IEXEC)
os.chmod("/content/gromacs-5.0.4-sbm/bin/g_kuh", stat.S_IEXEC)
# + [markdown] id="iwqaTlTD8oIT"
# 3. Lastly, we will install biopython and py3Dmol
# + id="FhxhK-3GXzL0"
# !pip install biopython py3dmol
# + [markdown] id="5j24YCyD7Zlf"
# Once these software installation processes are completed, we are ready to perform our experiments
# + [markdown] id="0W5ieq38OB_M"
# # Part I – Generate coarse-grained SBM models using SMOG2
# + [markdown] id="1zUmEAtiQRi-"
# As we did in the previous tutorial, we will first download the coordinates for the solved structures of human adenylate kinase in the open (4AKE) and closed (1AKE) states to generate coarse-grained SBMs for both structures using SMOG2. Then, we will employ a combination of numerical analysis and SMOG2 to generate custom-built SBM models that contain information from both structures in a single file (**dual-basin models**).
#
# 1. We start by creating and accessing a folder for preparing our systems
# + id="26tuPEcHXGOx"
#Let's make a folder first. We need to import the os and path library
import os
from pathlib import Path
#Then, we define the path of the folder we want to create.
#Notice that the HOME folder for a hosted runtime in colab is /content/
smogpath = Path("/content/prepare_dualAKE/")
#Now, we create the folder using the os.mkdir() command
#The if conditional is just to check whether the folder already exists
#In which case, python returns an error
if os.path.exists(smogpath):
print("path already exists")
if not os.path.exists(smogpath):
os.mkdir(smogpath)
print("path was succesfully created")
# + id="6BFqksfnX5fq"
#Changing directory using python
os.chdir(smogpath)
# + [markdown] id="he_1Mdvj_lTX"
# 2. Then, we will download the solved structures of human adenylate kinase in the open (PDB 4AKE) and closed (PDB 1AKE) conformations, and remove alternative side chain conformations, water molecules and ligands using biopython as we have done in our previous tutorials.
#
# **NOTE:** You might get a _chain discontinuity_ warning on biopython. In this particular case, this is due to the non-contiguous annotation of non-protein atoms from chain A and B in the PDB file.
# + id="YZVScwI8X--p"
#Importing your PDB file using biopython
import os
from Bio.PDB import *
pdbid = ['1ake', '4ake']
pdbl = PDBList()
for s in pdbid:
pdbl.retrieve_pdb_file(s, pdir='.', file_format ="pdb", overwrite=True)
os.rename("pdb"+s+".ent", s+".pdb")
# + id="bJk8YcY1te0L"
#Here we set up a parser for our PDB
parser = PDBParser()
io=PDBIO()
#And here we set the residue conformation we want to keep
keepAltID = "A"
class KeepOneConfOnly(Select): # Inherit methods from Select class
def accept_atom(self, atom):
if (not atom.is_disordered()) or atom.get_altloc() == keepAltID:
atom.set_altloc(" ") # Eliminate alt location ID before output.
return True
else: # Alt location was not one to be output.
return False
# end of accept_atom()
#And now we loop for all structures
for s in pdbid:
structure = parser.get_structure('X', s+".pdb")
#This will keep only conformation for each residue
io.set_structure(structure)
io.save(s+"_ready.pdb", select=KeepOneConfOnly())
print("Your PDBs were processed. Alternative side chain conformations removed!")
# + id="xu99dIgvYczH"
#Here we set up a parser for our PDB
parser = PDBParser()
io=PDBIO()
for s in pdbid:
structure = parser.get_structure('X', s+"_ready.pdb")
#And here we remove hydrogens, waters and ligands using Dice
io.set_structure(structure)
sel = Dice.ChainSelector('A', 1, 214)
io.save(s+"_clean.txt", sel)
print("Your PDBs were processed. Only the protein heavy atoms have been kept!")
print("Both PDBs have been saved as text files for editing on Google Colab")
print("Remember to edit them before using SMOG2")
# + [markdown] id="7yjW7dXbBDse"
# 3. Let's examine our structures in py3Dmol
# + id="WSW-8f3QBTEJ"
import py3Dmol
#First we assign the py3Dmol.view as a two-panel viewer
view=py3Dmol.view(800,400,viewergrid=(1,2))
#Here we set the background color as white
view.setBackgroundColor('white')
#The following lines are used to add the addModel class
#to read the open state structure
view.addModel(open('4ake_clean.txt', 'r').read(),'pdb',viewer=(0,0))
#Here we set the visualization style and color
view.setStyle({'chain':'A'},{'cartoon': {'color':'red'}},viewer=(0,0))
#Now we do the same for the closed state structure
view.addModel(open('1ake_clean.txt', 'r').read(),'pdb',viewer=(0,1))
#Here we set the visualization style and color
view.setStyle({'chain':'A'},{'cartoon': {'color':'green'}}, viewer=(0,1))
#Here we center the molecule for its visualization
view.zoomTo()
#And we finally visualize the structures using the command below
view.show()
# + [markdown] id="uoePZR6DBuOr"
# 4. As we saw in our previous tutorial, these PDB files are saved as .txt files to edit the, based on the format requirements of SMOG2. **Fix both files accordingly!**
# + [markdown] id="o5yDhPZGEKan"
# 5. Once this is done, we can process our files in SMOG2 as indicated below:
# + id="ZEqE5jNwJq2U" language="bash"
# source /content/smog2.dailybuild/configure.smog2
# smog2 -i 4ake_clean.txt -CA -dname 4ake_smog
# smog2 -i 1ake_clean.txt -CA -dname 1ake_smog
# + [markdown] id="k_cZJKNkCnAs"
# #Part II – Generate a custom dual-basin SBM model using SMOG2
# + [markdown] id="dvVEpbIXE0da"
# We should now have our SBM models for the open and the closed state of human adenylate kinase. However, if you remember from our lectures and our previous tutorial, these coarse-grained models do not contain water molecules, ligands, etc. **How are we going to simulate a conformational change in the absence of ligands.** Moreover, since we do not have water in our system, the inclusion of a ligand will lead to its drift outside the active site towards infinity (as you can see from the .mdp simulation file, we are not using periodic boundary conditions).
#
# Instead of thinking of including the ligand, we must think about the **consequences of ligand binding**. In this case, ligand binding leads to the three domains of the protein getting closer to each other, which translates into **several native contacts being formed upon ligand binding** (or unique to the closed conformation).
#
# Briefly, to simulate the conformational change of this enzyme, it is most appropriate to consider that:
# - The **open state** is the **initial condition**, since it can exist both in the absence of and at low concentrations of ligand;
# - The native interactions that are **unique to the closed state** correspond to **ligand-induced interactions**;
# - The **native ligand-induced interactions** that exhibit significant changes in distance (eg. dist [4AKE] / dist [1AKE] > 50%) are **the only ones to be included in a combined native contact map** (or dual-basin potential).
#
# This is, in fact, shown in the following figure from an article that inspired this tutorial
# + [markdown] id="UeQ5nmYk_UNV"
# <figure>
# <center>
# <img src='https://raw.githubusercontent.com/pb3lab/ibm3202/master/images/smogdual_03.jpg'/>
# <figcaption>FIGURE 2. Contacts Unique to Closed Form. Each point represents a contact between residue _i_ and residue _j_ that is unique to the closed form. The Y-axis is the distance between the C${\alpha}$ atoms of residues _i_ and _j_ in the open form and the X-axis is the distance in the closed form. Contacts above the line of slope 1.5 (solid line) constitute the set of contacts selected for the dual-basin SBM models<br> Whitford PC et al (2007) <i> J Mol Biol 366(5), 1661-1671</i></figcaption></center>
# </figure>
# + [markdown] id="-hxGp7khD0S2"
# 1. **How can we first filter the contacts that are unique to the closed conformation?** We will use `grep` over the **.contact.CG files** you just obtained for the open and closed states:
# + id="_DjZ60UyFx3u"
#Creating a folder for preparing the dual-basin SBM model
dualpath = Path("dualSBM")
#Now, we create the folder using the os.mkdir() command
#The if conditional is just to check whether the folder already exists
#In which case, python returns an error
if os.path.exists(dualpath):
print("path already exists")
if not os.path.exists(dualpath):
os.mkdir(dualpath)
print("path was succesfully created")
#Switching to this new folder
os.chdir(dualpath)
# + id="_bZXz87JEFLY"
# !grep -Fvf ../4ake_smog.contacts.CG ../1ake_smog.contacts.CG > uniquecontacts.txt
# + [markdown] id="ve0jGQkdEVqT"
# If all goes well, you should obtain a list of 113 contacts in the format `chain_i res_i chain_j res_j`. Now, we have to evaluate if the distance of these contacts is significantly different between the open and closed states.
#
# 2. To determine the distance difference between these contacts in the open and closed states, we will cheat a little bit. We will first use the `trjconv` module from GROMACS to generate a **coarse-grained PDB file** of both states based in our coarse-grained SBM .gro file. Then, we will use SMOG2, along with these coarse-grained PDB files and our list of unique contacts for the closed structure. With this small trick, we will obtain the **LJ parameters (and therefore the distances!)** of the ligand-induced contacts both in the open (4AKE) and closed (1AKE) states.
# + id="zhsvmVHDOfoe"
#Generating coarse-grained PDB for 4AKE and 1AKE using GROMACS
# %%bash
source /content/gromacs-5.0.4-sbm/bin/GMXRC
gmx editconf -f ../4ake_smog.gro -o 4ake_CA.pdb
gmx editconf -f ../1ake_smog.gro -o 1ake_CA.pdb
#Edit your file to comply with SMOG2 parameters
grep ATOM 4ake_CA.pdb > 4ake_CA_clean.pdb
# echo "END" >> 4ake_CA_clean.pdb
grep ATOM 1ake_CA.pdb > 1ake_CA_clean.pdb
# echo "END" >> 1ake_CA_clean.pdb
# + id="05UGj_ziFSCE" language="bash"
# source /content/smog2.dailybuild/configure.smog2
# smog2 -i 4ake_CA_clean.pdb -c uniquecontacts.txt -t /content/smog2.dailybuild/share/templates/SBM_calpha -dname 4ake_unique
# smog2 -i 1ake_CA_clean.pdb -c uniquecontacts.txt -t /content/smog2.dailybuild/share/templates/SBM_calpha -dname 1ake_unique
# + [markdown] id="62iWdwLpMqEa"
# If we remember from our previous tutorial, the `[ pairs ]` section of the .top file contains the native contacts and their parameters.
#
# <figure>
# <center>
# <img src='https://raw.githubusercontent.com/pb3lab/ibm3202/master/images/smogdual_05.png'/>
# </center>
# </figure>
#
# Since we requested the same user-defined contact map for both files in the previous step, we can get the difference in distance between each contact by **dividing column 4 (or 5) from one file by the column 4 (or 5) from another file**
# + [markdown] id="mwm8XpnXS2hE"
# 3. Create two text files on Google Colab in which you will store only the `[ pairs ]` section of these newly generated .top files. Then, we will use `awk` over these files to determine which interactions significantly change between these states:
# + id="gAvCcLyKS7aQ"
# !paste open.pairs closed.pairs | awk '{if(($4/$9)>1.5)print $1, $2, $3, $9, $10}' > Qligand.pairs
# + [markdown] id="GopgLjHeU3XP"
# After this, we will obtain a text file with **83 contacts** that are unique to the closed state for which a significant change in distance (>50%) occurs upon reaching the unbound, open conformation.
#
# 4. We will also use `awk` to generate the `[ exclusion ]` lists for these ligand-induced native contacts.
#
# + id="noEWkqIjVN-t"
# !paste open.pairs closed.pairs | awk '{if(($4/$9)>1.5)print $1, $2}' > Qligand.exclusions
# + [markdown] id="9c2QU1E9VeOR"
# Now we have everything we need to generate a dual-basin model for simulating the conformational change of human adenylate kinase: coordinate and parameter files and ligand-induced native contacts.
#
# 5. We will take one of the .top files for one of the states of human adenylate kinase and manually add the ligand-induced pairs and exclusions that we just obtained. Given our previous assumption on the conformational change of this protein, **the most reasonable strategy is to use the .top file from the open (4AKE) structure for its modification.**
# + id="-CBAyVNXWbZj"
#Copy the .top file for 4AKE into this folder and modify it!
# !cp ../4ake_smog.top dualAKE.top
# + [markdown] id="k4yl7Nj7LlYl"
# #Part III – Run and analyze our dual-basin SBM simulations
#
# Now, we are ready to perform our simulations of the ligand-induced conformational change of human adenylate kinase using these dual-basin SBM models.
#
# 1. We will start by creating a new folder for preparing and running our MD simulations, in which we will copy our SBM coordinate and topology file.
# + id="XxvgMU5AKMLx"
#Defining a new folder for the MD simulations
mdpath = Path("/content/md_dualAKE/")
#Now, we create the folder using the os.mkdir() command
#The if conditional is just to check whether the folder already exists
#In which case, python returns an error
if os.path.exists(mdpath):
print("path already exists")
if not os.path.exists(mdpath):
os.mkdir(mdpath)
print("path was succesfully created")
# + id="XJWHpPM8gjF5"
#Changing to our newly created directory and copying the .gro and .top files
os.chdir(mdpath)
from shutil import copyfile
copyfile(smogpath/'4ake_smog.gro', mdpath/'dualAKE.gro')
copyfile(smogpath/dualpath/'dualAKE.top', mdpath/'dualAKE.top')
# + [markdown] id="9gDIqH5cMe8V"
# 2. Then, we will download the same **MD instruction file** that we used in our previous tutorial (**mdrun_CA_v5.mdp**), changing the simulation temperature to 108 and the number of steps to 5000000. We will also download the Perl script to generate our LJ 12-10 tabulated potentials.
# + id="57089jXZnNzt" language="bash"
# wget https://github.com/pb3lab/ibm3202/raw/master/files/mdrun_CA_v5.mdp
# wget https://github.com/pb3lab/ibm3202/raw/master/files/maketable4.pl
# perl maketable4.pl > table.xvg
# + [markdown] id="8bBxLKxgPHVl"
# 3. Lastly, we will prepare our **.tpr portable binary run input file for GROMACS** in this folder and run our simulation! Please note how we instruct GROMACS to use our custom table of LJ 12-10 tabulated potentials.
#
# This simulation takes ~12 min.
# + id="hGI_2ccYtRge"
#Preparing our binary run input file
# %%bash
source /content/gromacs-5.0.4-sbm/bin/GMXRC
gmx grompp -f mdrun_CA_v5.mdp -c dualAKE.gro -p dualAKE.top -o run.tpr
# + id="vT7S73eBAIdD"
#Running our simulation
# %%time
# %%bash
source /content/gromacs-5.0.4-sbm/bin/GMXRC
gmx mdrun -s run.tpr -table table.xvg -tablep table.xvg -nt 2 -noddcheck
# + [markdown] id="7peG7qsWQZQE"
# 4. Once our simulation is done, we can analyze if the conformational change is observed in our trajectory file. For simplification, we will first use the `rmsd` module along with the initial, open structure, as evidence of this change.
# + id="1H13-hMAe2qx" language="bash"
# source /content/gromacs-5.0.4-sbm/bin/GMXRC
# #Commands for RMSD
# echo "0" > options
# echo " " >> options
# echo "0" >> options
# echo " " >> options
# #RMSD calculation
# gmx rms -s dualAKE.gro -f traj_comp.xtc -xvg none < options
# + id="owbmJez4bVtr"
import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt('rmsd.xvg')
plt.title('Structural fluctuations of the system')
plt.xlabel('Time (tau)')
plt.ylabel('rmsd (nm)')
plt.plot(data[:,0], data[:,1], linestyle='solid', linewidth='2', color='red')
plt.show()
# + [markdown] id="Qcr9xS1bQ0tX"
# 2. A better metric for looking at the conformational change would be to directly determine if the ligand-induced native contacts are formed. For this, we will use the `g_kuh` module again, along with the list of contacts that are unique to the closed structure and the closed structure as reference for the native distances. Please check how we are generating the contact map file required for this analysis:
# + id="3QO99Fv7YZwC"
#Generating the contact map file for g_kuh
f = open(mdpath/"Qligand.ndx", "a")
f.write("[ Qligand ]\n")
with open(smogpath/dualpath/"Qligand.exclusions") as infile:
for line in infile:
if line.strip():
cols = line.split()
f.write(cols[0] + "\t" + cols[1] + "\n")
f.close()
#Copying the reference structure of the closed state for determining native distances
copyfile(smogpath/'1ake_smog.gro', mdpath/'1ake_smog.gro')
# + id="FXgsMkTjU1Ed"
#Analyzing the formation of ligand-induced contacts in our trajectory
# %%bash
source /content/gromacs-5.0.4-sbm/bin/GMXRC
g_kuh -s 1ake_smog.gro -f traj_comp.xtc -n Qligand.ndx -noabscut -noshortcut -cut 0.2
# + [markdown] id="1lT2bjDHT45o"
# 4. Let's plot our results and see what happened during our simulation! We will plot first the change in native contacts (Q), and then the change in potential energy. You can check the change in RMSD yourself
# + id="jKEv1o1tdAU2"
# !paste rmsd.xvg qvals.out > data.txt
import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt('data.txt')
plt.title('Structural fluctuations of the system')
plt.xlabel('Time (tau)')
plt.ylabel('Q')
plt.plot(data[:,0], data[:,2], linestyle='solid', linewidth='2', color='red')
plt.show()
# + [markdown] id="vBeCaXtJcnmC"
# 5. To finalize, we will visualize our simulation. For this, we will use the `trjconv` module to extract only the protein from our system and convert our trajectory into a PDB file and then download this new PDB file and load it onto [**NGLviewer**](http://nglviewer.org/ngl/) as a **trajectory** PDB file.
# + id="rRWX7XguczmS" language="bash"
# source /content/gromacs-5.0.4-sbm/bin/GMXRC
# #This is a trick to provide interactive options to gmx
# echo "Protein" > options
# echo "Protein" >> options
# echo " "
# gmx trjconv -s run.tpr -f traj_comp.xtc -o traj.pdb < options
# + [markdown] id="ACJSETwdymVx"
# **And this is the end of the tenth tutorial!** Good science!
# + id="paFhLNx1e-gh"
from google.colab import files
files.download("/content/md_dualAKE/traj.pdb")
| tutorials/lab10_SMOGdual.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import cv2
from matplotlib import pyplot as plt
from copy import deepcopy
img = cv2.imread("lion.jpg",cv2.IMREAD_GRAYSCALE)
def display_image(images):
cv2.imshow("image",images)
cv2.waitKey(0)
cv2.destroyAllWindows()
# +
# display_image(img)
# -
# cv2.imwrite('lion.png',img)
imgN = deepcopy(img)
cv2.rectangle(imgN,(656,361),(761,444),(17,140,15),20)
cv2.circle(imgN,(704,403),100,(1,140,0),20)
pts = np.array([[10,4],[30,2],[100,25],[30,40]],np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(imgN,[pts],True,(1,140,0),15)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(imgN,'I love You',(0,150),font,5,(255,0,0),2,cv2.LINE_AA)
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow("image",imgN)
cv2.waitKey(0)
cv2.destroyAllWindows()
# display_image(img)
imgN = deepcopy(img)
px = img[55,55]
print(px)
face = img[640:700,650:750]
img[0:60,0:100] = face
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow("image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
img1 = cv2.imread("lion.jpg")
img2 = cv2.imread("leaf.jpg")
rows,cols,channels = img2.shape
roi = img1[0:rows,0:cols]
# +
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret,mask = cv2.threshold(img2gray,240,250,cv2.THRESH_BINARY_INV)
mask_inv = cv2.bitwise_not(mask)
img1_bg = cv2.bitwise_and(roi,roi,mask=mask_inv)
img2_fg = cv2.bitwise_and(img2,img2,mask=mask)
dst = cv2.add(img1_bg,img2_fg)
roi = dst
cv2.namedWindow('dst',cv2.WINDOW_NORMAL)
cv2.imshow("dst",dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
import cv2
webcm = cv2.imread("page.jpg")
grayscaled = cv2.cvtColor(webcm,cv2.COLOR_BGR2GRAY)
gaus = cv2.adaptiveThreshold(grayscaled,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,1)
cv2.namedWindow('gaus',cv2.WINDOW_NORMAL)
cv2.imshow('gaus',gaus)
cv2.waitKey(0)
cv2.destroyAllWindows()
| Image Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import nltk
import re
import difflib
from nltk import agreement
from nltk.tokenize import TweetTokenizer
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random #only used for generating 100 random tweets for manual labelling
from collections import Counter
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict, GridSearchCV
from sklearn.decomposition import PCA
from sklearn.neighbors import LocalOutlierFactor, KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVR, SVC
from sklearn.neural_network import MLPClassifier
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import metrics
from sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, classification_report, roc_auc_score, roc_curve, recall_score, precision_score, f1_score
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import ComplementNB
from sklearn.svm import SVC
from sklearn.metrics import cohen_kappa_score
from nltk.corpus import stopwords
# -
# ## All functions for the notebook
# During the tokenaziation punctuations, emojois, pointless strings and characters are removed
# +
# Required for stopwords
# nltk.download("stopwords")
# -
# Imported stop_words from nltk library (stopwords includes conjunctions, articles and so on)
stop_words = stopwords.words('english')
stop_words.append('th')
stop_words.append('st')
print("|".join(stop_words))
# +
def tokenizer(tweets):
"""
Function that takes a list of strings and returns the tokenized version of each string
"""
#counter = 0
#token_pat = re.compile(r'[\w@’#]+')
token_pat = re.compile(r'\w+')
skippable_pat = re.compile(r'[\s\d]+|@user|(\w+\d\w+)|\b(?:%s)\b' % '|'.join(stop_words))
non_white_space = re.compile(r'[^@’#\w\s]') #Finds characters that are not white_space nor word characters (nor @’#)
#print("these are the tweets")
#print(tweets)
# Initialise lists
tokens = []
unmatchable = []
# Compile patterns for speedup
token_pat = re.compile(r'\w+')
tokenlist = []
for i in tweets:
#counter = counter + 1
#print(counter)
#tokens = []
#unmatchable = []
line = i.lower()
#print("this is i: ",i)
while line:
#print("this is the line")
#print(line)
skippable_match = re.search(skippable_pat, line)
if skippable_match and skippable_match.start() == 0:
line = line[skippable_match.end():]
else:
token_match = re.search(token_pat, line)
#print("tokens_match")
#print(token_match)
#print(token_match.start())
if token_match and token_match.start() == 0:
#print("\nAPPEND IS RUNNING\n")
#print(line[:token_match.end()])
tokens.append(line[:token_match.end()])
line = line[token_match.end():]
else:
unmatchable_end = len(line)
if skippable_match:
unmatchable_end = skippable_match.start()
if token_match:
unmatchable_end = min(unmatchable_end, token_match.start())
unmatchable.append(line[:unmatchable_end])
line = line[unmatchable_end:]
tokenlist.append(tokens)
tokens = []
return(tokenlist)
def compare_tokenizers(bool):
if bool==True:
tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
j = 0
for i in training_data:
temp = i
diff = difflib.context_diff(tknzr.tokenize(i),token_tweets[j])
#print("".join(diff), end = "")
print(i,"tknzr:",tknzr.tokenize(i),"\ntokenlist:",token_tweets[j],"\n")
j+=1
def import_(classification_task, file_name):
with open("../data/raw/"+classification_task+"/"+file_name, "r", encoding="utf-8") as f:
temp = [int(line.strip("\n")) for line in f]
return(temp)
def import_and_tokenize(classification_task, file_name):
with open("../data/raw/"+classification_task+"/"+file_name, "r", encoding="utf-8") as f:
temp = [line for line in f]
return(tokenizer(temp))
def report_clf_stats(predicted, test, classification_task):
name_dict = {"offensive": ["Not offensive","Offensive"], "sentiment": ["Negative", "Neutral", "Positive"]}
print(metrics.accuracy_score(test, predicted))
print(metrics.classification_report(predicted, test, target_names=name_dict[classification_task]),"\n")
print(metrics.confusion_matrix(test, predicted))
# -
# ## Reading data
# ### The Offensive Training Data
# +
f = open("../data/raw/offensive/train_text.txt", 'r', encoding = "utf-8")
inputlist = [line for line in f]
f.close()
training_data, validation_data = inputlist[:len(inputlist)//2], inputlist[len(inputlist)//2:]
# -
token_tweets = tokenizer(training_data)
print(token_tweets)
#print(token_tweets[1])
#[print(*i) for i in token_tweets]
# ### Comparing our own tokenizer with TweetTokenizer from nltk library
# <b>Set below value 'see_output' = True for comparison <i>(It'll run for a while)</i></b>
#
# Comparing our own tokenizer with TweetTokenizer from nltk library
# Set below value 'see_output' = True for comparison
see_output = True
compare_tokenizers(see_output)
# ### Corpus size of Offensive and sentiment training sets respectively:
# + language="bash"
# wc ../data/raw/offensive/train_text.txt
# wc ../data/raw/sentiment/train_text.txt
# -
# <b>Offensive:</b> 11916 lines/tweets, 262370 words <br>
# <b>Sentiment:</b> 45615 lines/tweets, 877516 words
# ### Running tokenizer function on offensive and sentiment training data to get token count right
# +
with open("../data/raw/offensive/train_text.txt", "r", encoding = "utf-8",) as f:
offensive_raw = [line for line in f]
with open("../data/raw/sentiment/train_text.txt", "r", encoding = "utf-8",) as f:
sentiment_raw = [line for line in f]
# -
# <i>Below cell line takes some time to run
offensive_tokens = tokenizer(offensive_raw)
sentiment_tokens = tokenizer(sentiment_raw)
# ## The top 10 most frequent words of each dataset
# +
#from https://stackoverflow.com/questions/45019607/count-occurrence-of-a-list-in-a-list-of-lists
off_uniq = pd.Series(offensive_tokens).explode().value_counts()
sent_uniq = pd.Series(sentiment_tokens).explode().value_counts()
print("Offensive dataset, top 10 tokens:","\n",off_uniq[:10],"\n")
print("Sentiment dataset, top 10 tokens:","\n",sent_uniq[:10])
#Turning above pd.series into dataframes, for ease of use later
#Transformation found at:https://stackoverflow.com/questions/40224319/pandas-series-to-dataframe-using-series-indexes-as-columns
off_uniq = off_uniq.to_frame().reset_index()
sent_uniq = sent_uniq.to_frame().reset_index()
#Renaming columns in dataframes
off_uniq.columns = ["token","count"]
sent_uniq.columns = ["token","count"]
# -
# ### type/token ratio
# +
#Types == Amount of different Tokens in dataset
off_types = len(off_uniq["token"])
sent_types = len(sent_uniq["token"])
print("Offensive Types: {}\nSentiment types: {}\n".format(off_types,sent_types))
#Tokens == Amount of all "Words" in dataset
off_token_amount = off_uniq["count"].sum()
sent_token_amount = sent_uniq["count"].sum()
print("Offensive tokens, amount: {}\nSentiment tokens, amount: {}\n".format(off_token_amount, sent_token_amount))
#Type/token ratio (=ttratio)
off_ttratio = off_types/off_token_amount
sent_ttratio = sent_types/sent_token_amount
print("Offensive type/token ratio: {:.4f}\nSentiment type/token ratio: {:.4f}".format(off_ttratio, sent_ttratio))
# -
# #### Types that only occur 1, 2 or 3 times
# <ul>
# <li>Things like Hashtags and misspelled nouns are prevalent, but they, more importantly, contain most of the Types in the vocabulary</li>
# <li>Tokens that occur only once make up ~ 50% of the types in both datasets!</li>
# </ul>
# +
print("Offensive types w. freq 1, 2, or 3 divided by total types: {:.2f}%".format(
len(off_uniq.loc[(off_uniq["count"]==1) | (off_uniq["count"]==2) | (off_uniq["count"]==3)])/off_types*100))
print("Sentiment types w. freq 1, 2, or 3 divided by total types: {:.2f}%".format(
len(sent_uniq.loc[(sent_uniq["count"]==1) | (sent_uniq["count"]==2) | (sent_uniq["count"]==3)])/sent_types*100))
print()
print("Offensive types w. freq. just 1 divided by total types: {:.2f}%".format(len(off_uniq.loc[off_uniq["count"]==1])/off_types*100))
print("Sentiment types w. freq. just 1 divided by total types: {:.2f}%".format(len(sent_uniq.loc[sent_uniq["count"]==1])/sent_types*100))
# -
# ## Machine learning part
# ### Offensive dataset
# +
random.seed(42)
#Loading in offensive x-train, x-test, y-train, y-test
# x-train
ox_train = import_and_tokenize("offensive", "train_text.txt")
# x-test
ox_test = import_("offensive", "train_labels.txt")
# y-train
oy_train = import_and_tokenize("offensive", "val_text.txt")
# y-test
oy_test = import_("offensive", "val_labels.txt")
# +
#Pipeline for sgdclassifier
sgd_clf = Pipeline([
('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False,
ngram_range=(1,3), max_df = 0.7, min_df = 5, max_features = 5000)),
('tfidf', TfidfTransformer(use_idf=False)),
('clf', SGDClassifier(loss="hinge")),
])
sgd_clf.fit(ox_train, ox_test)
sgd_predicted2 = sgd_clf.predict(oy_train)
#sgd_predicted2
report_clf_stats(sgd_predicted2, oy_test, "offensive")
# -
# <i> Highest Achieved accuracy score for SGDClassifier: 78.6% </i>
# +
# MultinomialNB
multinb_clf = Pipeline([
('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False)),
('tfidf', TfidfTransformer(use_idf=False)),
('clf', MultinomialNB()),
])
multinb_clf.fit(ox_train, ox_test)
multinb_predict = multinb_clf.predict(oy_train)
report_clf_stats(multinb_predict, oy_test, "offensive")
# +
#ComplementNB
complement_clf = Pipeline([
('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False)),
('tfidf', TfidfTransformer(use_idf=False)),
('clf', ComplementNB()),
])
complement_clf.fit(ox_train, ox_test)
complement_predict = complement_clf.predict(oy_train)
report_clf_stats(complement_predict, oy_test, "offensive")
# -
# It takes time to run SVC classifier
# +
# SVC
SVC_clf = Pipeline([
('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False)),
('tfidf', TfidfTransformer(use_idf=False)),
('clf', SVC(kernel='poly', degree = 3)),
])
SVC_clf.fit(ox_train, ox_test)
SVC_predict = SVC_clf.predict(oy_train)
report_clf_stats(SVC_predict, oy_test, "offensive")
# -
| notebooks/Exam_Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pysparkkernel
# language: python
# name: pysparkkernel
# ---
# # SageMaker PySpark XGBoost MNIST Example
#
# 1. [Introduction](#Introduction)
# 2. [Setup](#Setup)
# 3. [Loading the Data](#Loading-the-Data)
# 4. [Training and Hosting a Model](#Training-and-Hosting-a-Model)
# 5. [Inference](#Inference)
# 6. [More on SageMaker Spark](#More-on-SageMaker-Spark)
#
# ## Introduction
# This notebook will show how to classify handwritten digits using the XGBoost algorithm on Amazon SageMaker through the SageMaker PySpark library. We will train on Amazon SageMaker using XGBoost on the MNIST dataset, host the trained model on Amazon SageMaker, and then make predictions against that hosted model.
#
# Unlike the other notebooks that demonstrate XGBoost on Amazon SageMaker, this notebook uses a SparkSession to manipulate data, and uses the SageMaker Spark library to interact with SageMaker with Spark Estimators and Transformers.
#
# You can visit SageMaker Spark's GitHub repository at https://github.com/aws/sagemaker-spark to learn more about SageMaker Spark.
#
# You can visit XGBoost's GitHub repository at https://github.com/dmlc/xgboost to learn more about XGBoost
#
# This notebook was created and tested on an ml.m4.xlarge notebook instance.
# ## Setup
#
# First, we import the necessary modules and create the SparkSession with the SageMaker Spark dependencies.
# +
import os
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import sagemaker
from sagemaker import get_execution_role
import sagemaker_pyspark
role = get_execution_role()
# Configure Spark to use the SageMaker Spark dependency jars
jars = sagemaker_pyspark.classpath_jars()
classpath = ":".join(sagemaker_pyspark.classpath_jars())
# See the SageMaker Spark Github repo under sagemaker-pyspark-sdk
# to learn how to connect to a remote EMR cluster running Spark from a Notebook Instance.
spark = (
SparkSession.builder.config("spark.driver.extraClassPath", classpath)
.master("local[*]")
.getOrCreate()
)
# -
# ## Loading the Data
#
# Now, we load the MNIST dataset into a Spark Dataframe, which dataset is available in LibSVM format at
#
# `s3://sagemaker-sample-data-[region]/spark/mnist/train/`
#
# where `[region]` is replaced with a supported AWS region, such as us-east-1.
#
# In order to train and make inferences our input DataFrame must have a column of Doubles (named "label" by default) and a column of Vectors of Doubles (named "features" by default).
#
# Spark's LibSVM DataFrameReader loads a DataFrame already suitable for training and inference.
#
# Here, we load into a DataFrame in the SparkSession running on the local Notebook Instance, but you can connect your Notebook Instance to a remote Spark cluster for heavier workloads. Starting from EMR 5.11.0, SageMaker Spark is pre-installed on EMR Spark clusters. For more on connecting your SageMaker Notebook Instance to a remote EMR cluster, please see [this blog post](https://aws.amazon.com/blogs/machine-learning/build-amazon-sagemaker-notebooks-backed-by-spark-in-amazon-emr/).
# +
import boto3
cn_regions = ["cn-north-1", "cn-northwest-1"]
region = boto3.Session().region_name
endpoint_domain = "com.cn" if region in cn_regions else "com"
spark._jsc.hadoopConfiguration().set(
"fs.s3a.endpoint", "s3.{}.amazonaws.{}".format(region, endpoint_domain)
)
trainingData = (
spark.read.format("libsvm")
.option("numFeatures", "784")
.option("vectorType", "dense")
.load("s3a://sagemaker-sample-data-{}/spark/mnist/train/".format(region))
)
testData = (
spark.read.format("libsvm")
.option("numFeatures", "784")
.option("vectorType", "dense")
.load("s3a://sagemaker-sample-data-{}/spark/mnist/test/".format(region))
)
trainingData.show()
# -
# ## Training and Hosting a Model
# Now we create an XGBoostSageMakerEstimator, which uses the XGBoost Amazon SageMaker Algorithm to train on our input data, and uses the XGBoost Amazon SageMaker model image to host our model.
#
# Calling fit() on this estimator will train our model on Amazon SageMaker, and then create an Amazon SageMaker Endpoint to host our model.
#
# We can then use the SageMakerModel returned by this call to fit() to transform Dataframes using our hosted model.
#
# The following cell runs a training job and creates an endpoint to host the resulting model, so this cell can take up to twenty minutes to complete.
# +
import random
from sagemaker_pyspark import IAMRole, S3DataPath
from sagemaker_pyspark.algorithms import XGBoostSageMakerEstimator
xgboost_estimator = XGBoostSageMakerEstimator(
sagemakerRole=IAMRole(role),
trainingInstanceType="ml.m4.xlarge",
trainingInstanceCount=1,
endpointInstanceType="ml.m4.xlarge",
endpointInitialInstanceCount=1,
)
xgboost_estimator.setEta(0.2)
xgboost_estimator.setGamma(4)
xgboost_estimator.setMinChildWeight(6)
xgboost_estimator.setSilent(0)
xgboost_estimator.setObjective("multi:softmax")
xgboost_estimator.setNumClasses(10)
xgboost_estimator.setNumRound(10)
# train
model = xgboost_estimator.fit(trainingData)
# -
# ## Inference
# Now we transform our DataFrame.
# To do this, we serialize each row's "features" Vector of Doubles into LibSVM format for inference against the Amazon SageMaker Endpoint. We deserialize the CSV responses from the XGBoost model back into our DataFrame. This serialization and deserialization is handled automatically by the `transform()` method:
# +
transformedData = model.transform(testData)
transformedData.show()
# -
# How well did the algorithm perform? Let us display the digits corresponding to each of the labels and manually inspect the results:
# +
from pyspark.sql.types import DoubleType
import matplotlib.pyplot as plt
import numpy as np
# helper function to display a digit
def show_digit(img, caption="", xlabel="", subplot=None):
if subplot == None:
_, (subplot) = plt.subplots(1, 1)
imgr = img.reshape((28, 28))
subplot.axes.get_xaxis().set_ticks([])
subplot.axes.get_yaxis().set_ticks([])
plt.title(caption)
plt.xlabel(xlabel)
subplot.imshow(imgr, cmap="gray")
images = np.array(transformedData.select("features").cache().take(250))
clusters = transformedData.select("prediction").cache().take(250)
for cluster in range(10):
print("\n\n\nCluster {}:".format(int(cluster)))
digits = [img for l, img in zip(clusters, images) if int(l.prediction) == cluster]
height = ((len(digits) - 1) // 5) + 1
width = 5
plt.rcParams["figure.figsize"] = (width, height)
_, subplots = plt.subplots(height, width)
subplots = np.ndarray.flatten(subplots)
for subplot, image in zip(subplots, digits):
show_digit(image, subplot=subplot)
for subplot in subplots[len(digits) :]:
subplot.axis("off")
plt.show()
# -
# Since we don't need to make any more inferences, now we delete the endpoint:
# +
# Delete the endpoint
from sagemaker_pyspark import SageMakerResourceCleanup
resource_cleanup = SageMakerResourceCleanup(model.sagemakerClient)
resource_cleanup.deleteResources(model.getCreatedResources())
# -
# ## More on SageMaker Spark
#
# The SageMaker Spark Github repository has more about SageMaker Spark, including how to use SageMaker Spark with your own algorithms on Amazon SageMaker: https://github.com/aws/sagemaker-spark
#
| sagemaker-spark/pyspark_mnist/pyspark_mnist_xgboost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:miniconda3-cesm2-marbl]
# language: python
# name: conda-env-miniconda3-cesm2-marbl-py
# ---
# # CO<sub>2</sub> Flux
#
# Compare monthly climatologies of the CO<sub>2</sub> Flux with observations
# ## Imports
# %load_ext autoreload
# %autoreload 2
# +
# %matplotlib inline
import os
from itertools import product
import pandas as pd
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
import cmocean
import cartopy
import cartopy.crs as ccrs
import xpersist as xp
cache_dir = '/glade/p/cgd/oce/projects/cesm2-marbl/xpersist_cache/3d_fields'
if (os.path.isdir(cache_dir)):
xp.settings['cache_dir'] = cache_dir
os.makedirs(cache_dir, exist_ok=True)
import pop_tools
import climo_utils as cu
import utils
import discrete_obs
import plot
# -
# ## Spin up a Cluster
cluster, client = utils.get_ClusterClient()
cluster.scale(12) #adapt(minimum_jobs=0, maximum_jobs=24)
client
# ## Read in the Grid
ds_grid = pop_tools.get_grid('POP_gx1v7')
masked_area = ds_grid.TAREA.where(ds_grid.REGION_MASK > 0).fillna(0.).expand_dims('region')
masked_area.plot()
# ## Compute Monthly Climatologies
def monthly_clim(ds):
vars_time_dep = [v for v in ds.variables if 'time' in ds[v].dims]
vars_other = list(set(ds.variables) - set(vars_time_dep))
encoding = {v: ds[v].encoding for v in vars_time_dep}
dso = ds[vars_time_dep].groupby('time.month').mean('time').rename({'month': 'time'})
for v in vars_time_dep:
dso[v].encoding = encoding[v]
return xr.merge([dso, ds[vars_other]])
# +
nmolcm2s_to_molm2yr = 1e-9 * 1e4 * 86400. * 365.
time_slice = slice("1990-01-15", "2015-01-15")
varlist = [
'FG_CO2',
]
ds_list = []
for variable in varlist:
xp_func = xp.persist_ds(cu.read_CESM_var, name=f'co2-flux-{variable}', trust_cache=True,)
ds_list.append(xp_func(
time_slice,
variable,
postprocess=monthly_clim,
mean_dims=['member_id'],
))
ds = xr.merge(ds_list)
#ds['TAREA'] = grid.TAREA
convert_glb = dict(
FG_CO2=(-1.0) * 1e-9 * 86400. * 365. * 12e-15,
)
ds_glb = xr.Dataset()
for v in convert_glb.keys():
ds_glb[v] = (masked_area * ds[v].mean('time')).sum(['nlat', 'nlon']) * convert_glb[v]
ds_glb[v].attrs['units'] = 'Pg C yr$^{-1}$'
from netCDF4 import default_fillvals
ds.FG_CO2.data = ds.FG_CO2 * nmolcm2s_to_molm2yr * (-1.0) # reverse sign
ds.FG_CO2.attrs['units'] = 'mol m$^{-2}$ yr$^{-1}$'
ds.FG_CO2.encoding['coordinates'] = 'TLONG TLAT time'
ds.FG_CO2.encoding['_FillValue'] = default_fillvals['f4']
ds['time'] = ds.time.astype(np.int32)
ds
# -
ds_glb
# ### After the computation is done, spin down the cluster
del client
del cluster
# ## Open the Gridded Observational Data
# +
with xr.open_dataset('/glade/p/cgd/oce/projects/cesm2-marbl/fgco2-MPI-SOM-FFN_POP_gx1v7.nc') as ds_obs:
with xr.set_options(keep_attrs=True):
ds_obs = monthly_clim(ds_obs.sel(time=slice('1990', '2014'))).load()
ds_obs_glb = (masked_area * ds_obs['fgco2_smoothed'].mean('time')).sum(['nlat', 'nlon']) * 12e-15 * 1e-4
import intake
cat = intake.open_catalog('catalogs/fgco2_MPI-SOM-FFN.yml')
with xr.set_options(keep_attrs=True):
ds_tmp = monthly_clim(cat.fgco2_MPI_SOM_FFN().to_dask()[['fgco2_smoothed']].sel(time=slice('1990', '2014'))).compute()
ds_obs_za = ds_tmp.mean('lon')
#for v in ['fgco2_smoothed', 'fgco2_raw']:
# ds_obs[v].encoding['_FillValue'] = -1e36
#ds_obs['time'] = ds_obs.time.astype(np.int32)
# -
ds_obs_glb
ds_obs.fgco2_raw.isel(time=-1).plot() #('time').plot()
ds.FG_CO2.mean('time').plot()
mask2d = utils.get_pop_region_mask_za(mask_type='2D')
mask2d.plot()
# ## Compute the Zonal Mean
#
ds_za = utils.zonal_mean_via_fortran(ds, grid='POP_gx1v7', region_mask=mask2d,)
ds_za
# ## Plot the Results
ds_za.FG_CO2.isel(basins=0).plot()
ds_obs_za.fgco2_smoothed.plot()
dsa = ds.mean('time').rename({'FG_CO2': 'fgco2'})
dsa['fgco2_obs'] = ds_obs.fgco2_smoothed.mean('time')
dsa = utils.pop_add_cyclic(dsa)
dsa
# +
fig = plt.figure(figsize=(12, 6))
prj = ccrs.Robinson(central_longitude=305.0)
gs = gridspec.GridSpec(
nrows=2, ncols=6,
width_ratios=(1, 0.01, 0.3, 0.3, 0.25, 0.02),
wspace=0.1,
hspace=0.15,
)
axs = np.empty((2, 6)).astype(object)
axs_map = [
plt.subplot(gs[0, 0], projection=prj),
plt.subplot(gs[1, 0], projection=prj),
]
cax = plt.subplot(gs[:, -1])
axs_za = [
plt.subplot(gs[:, 2]),
plt.subplot(gs[:, 3]),
]
ax_za_mean = plt.subplot(gs[:, 4])
cmap = cmocean.cm.balance
levels = np.arange(-3.6, 3.8, 0.2)
for field, ax in zip(['fgco2', 'fgco2_obs'], axs_map):
cf = ax.contourf(
dsa.TLONG, dsa.TLAT, dsa[field],
levels=levels,
extend='both',
cmap=cmap,
norm=colors.BoundaryNorm(levels, ncolors=cmap.N),
transform=ccrs.PlateCarree(),
)
land = ax.add_feature(
cartopy.feature.NaturalEarthFeature(
'physical','land','110m',
edgecolor='face',
facecolor='gray'
)
)
axs_map[0].set_title(f'CESM: {ds_glb.FG_CO2.values[0]:0.2f} Pg C yr$^{{-1}}$')
axs_map[1].set_title(f'SOM-FFN: {ds_obs_glb.values[0]:0.2f} Pg C yr$^{{-1}}$')
axs_za[0].pcolormesh(
np.arange(0, 13, 1),
ds_za.lat_t_edges,
ds_za.FG_CO2.isel(basins=0).T,
cmap=cmap,
norm=colors.BoundaryNorm(levels, ncolors=cmap.N),
)
axs_za[1].pcolormesh(
np.arange(0, 13, 1),
ds_obs_za.lat,
ds_obs_za.fgco2_smoothed.T,
cmap=cmap,
norm=colors.BoundaryNorm(levels, ncolors=cmap.N),
)
ax_za_mean.plot(ds_za.FG_CO2.isel(basins=0).mean('time'), ds_za.lat_t, '-', color='tab:blue', label='CESM2')
ax_za_mean.plot(ds_obs_za.fgco2_smoothed.mean('time'), ds_obs_za.lat, '-', color='tab:red', label='SOM-FFN')
monlabs = np.array(["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"])
for ax in axs_za:
ax.set_ylim([-80, 90.])
ax.set_yticks(np.arange(-75, 90, 15))
ax.set_xticks(np.arange(0, 13))
ax.set_xticklabels([f' {m}' for m in monlabs]+['']);
axs_za[0].set_ylabel('Latitude [°N]')
axs_za[1].set_yticklabels([])
axs_za[0].set_title('CESM')
axs_za[1].set_title('SOM-FFN')
ax_za_mean.set_ylim([-80, 90.])
ax_za_mean.set_yticks(np.arange(-75, 90, 15))
ax_za_mean.set_yticklabels([])
ax_za_mean.set_xticks(np.arange(-2.5, 1.5, 0.5))
ax_za_mean.set_xticklabels(['', '-2', '', -1, '', '0', '', '1'])
ax_za_mean.axvline(0., linewidth=0.5, color='k',)
ax_za_mean.set_xlabel('mol m$^{-2}$ yr$^{-1}$')
ax_za_mean.legend(loc=(0.03, 0.45), frameon=False, handlelength=1.0, fontsize=8, handletextpad=0.5)
utils.label_plots(fig, [ax for ax in axs_map], xoff=0.02, yoff=0)
utils.label_plots(fig, [ax for ax in axs_za + [ax_za_mean]], xoff=-0.01, start=2)
plt.colorbar(cf, cax=cax)
cax.set_title('mol m$^{-2}$ yr$^{-1}$')
utils.savefig('fgco2.pdf')
# -
| notebooks/figures/co2-flux.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# (1) Import the required Python dependencies
import findspark
findspark.init()
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# (2) Instantiate a Spark Context
conf = SparkConf().setMaster("spark://192.168.56.10:7077").setAppName("Multilayer Perceptron - OCR")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
# +
# (3) Load the Letter Recognition Dataset (in CSV format with pre-defined label and features columns)
# (3.1) Create Feature Vectors from the 16 features
# (3.2) Rename the 'lettr' column to 'label' which is a number representing one of the 26 characters in the English alphabet
letter_recognition_df = sqlContext.read.format('com.databricks.spark.csv').options(header = 'true', inferschema = 'true').load('/data/workspaces/jillur.quddus/jupyter/notebooks/Machine-Learning-with-Apache-Spark-QuickStart-Guide/chapter07/data/ocr-data/letter-recognition.csv')
feature_columns = ['x-box','y-box','width','high','onpix','x-bar','y-bar','x2bar','y2bar','xybar','x2ybr','xy2br','x-ege','xegvy','y-ege','yegvx']
vector_assembler = VectorAssembler(inputCols = feature_columns, outputCol = 'features')
vectorised_df = vector_assembler.transform(letter_recognition_df).withColumnRenamed('lettr', 'label').select('label', 'features')
vectorised_df.show(10, False)
# -
# (4) Split the Featurised DataFrame into a Training DataFrame and a Test DataFrame
train_df, test_df = vectorised_df.randomSplit([0.75, 0.25], seed=12345)
train_df.count(), test_df.count()
# (5) Specify the layers for our Neural Network
# (5.1) The 1st element in this list represents the size of the Input Layer. In our case, we have 16 features
# (5.2) The next elements in the list represent the sizes of the intermediate Hidden Layers, in our case 8 and 4
# (5.3) The final element in this list represents the size of the Output. In our case, we have 26 classes
layers = [16, 8, 4, 26]
# (6) Train a Multilayer Perceptron Classifier using our list representing our layers from input to output layers
multilayer_perceptron_classifier = MultilayerPerceptronClassifier(maxIter=100, layers=layers, blockSize=128, seed=1234)
multilayer_perceptron_classifier_model = multilayer_perceptron_classifier.fit(train_df)
# (7) Apply the Trained Multilayer Perceptron Classifier Model to the Test DataFrame to make predictions
test_predictions_df = multilayer_perceptron_classifier_model.transform(test_df)
print("TEST DATASET PREDICTIONS AGAINST ACTUAL LABEL: ")
test_predictions_df.select("label", "features", "probability", "prediction").show()
# (8) Compute the accuracy of our Trained Multilayer Perceptron Classifier Model on the Test DataFrame
prediction_and_labels = test_predictions_df.select("prediction", "label")
accuracy_evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
precision_evaluator = MulticlassClassificationEvaluator(metricName="weightedPrecision")
recall_evaluator = MulticlassClassificationEvaluator(metricName="weightedRecall")
print("Accuracy on Test Dataset = %g" % accuracy_evaluator.evaluate(prediction_and_labels))
print("Precision on Test Dataset = %g" % precision_evaluator.evaluate(prediction_and_labels))
print("Recall on Test Dataset = %g" % recall_evaluator.evaluate(prediction_and_labels))
# (9) To improve the accuracy of our model, let us increase the size of the Hidden Layers
new_layers = [16, 16, 12, 26]
new_multilayer_perceptron_classifier = MultilayerPerceptronClassifier(maxIter=400, layers=new_layers, blockSize=128, seed=1234)
new_multilayer_perceptron_classifier_model = new_multilayer_perceptron_classifier.fit(train_df)
new_test_predictions_df = new_multilayer_perceptron_classifier_model.transform(test_df)
print("New Accuracy on Test Dataset = %g" % accuracy_evaluator.evaluate(new_test_predictions_df.select("prediction", "label")))
# (10) Stop the Spark Context
sc.stop()
| _src/Chapter07/chp07-01-multilayer-perceptron-classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ### Homework part I: Prohibited Comment Classification (3 points)
#
# 
#
# __In this notebook__ you will build an algorithm that classifies social media comments into normal or toxic.
# Like in many real-world cases, you only have a small (10^3) dataset of hand-labeled examples to work with. We'll tackle this problem using both classical nlp methods and embedding-based approach.
# +
import pandas as pd
data = pd.read_csv("comments.tsv", sep='\t')
texts = data['comment_text'].values
target = data['should_ban'].values
data[50::200]
# -
from sklearn.model_selection import train_test_split
texts_train, texts_test, y_train, y_test = train_test_split(texts, target, test_size=0.5, random_state=42)
# __Note:__ it is generally a good idea to split data into train/test before anything is done to them.
#
# It guards you against possible data leakage in the preprocessing stage. For example, should you decide to select words present in obscene tweets as features, you should only count those words over the training set. Otherwise your algoritm can cheat evaluation.
# ### Preprocessing and tokenization
#
# Comments contain raw text with punctuation, upper/lowercase letters and even newline symbols.
#
# To simplify all further steps, we'll split text into space-separated tokens using one of nltk tokenizers.
# +
from nltk.tokenize import TweetTokenizer
tokenizer = TweetTokenizer()
preprocess = lambda text: ' '.join(tokenizer.tokenize(text.lower()))
text = 'How to be a grown-up at work: replace "fuck you" with "Ok, great!".'
print("before:", text,)
print("after:", preprocess(text),)
# -
raw_train = texts_train.copy()
raw_test = texts_test.copy()
# +
# task: preprocess each comment in train and test
texts_train = [preprocess(x) for x in texts_train]
texts_test = [preprocess(x) for x in texts_test]
# -
assert texts_train[5] == 'who cares anymore . they attack with impunity .'
assert texts_test[89] == 'hey todds ! quick q ? why are you so gay'
assert len(texts_test) == len(y_test)
# ### Solving it: bag of words
#
# 
#
# One traditional approach to such problem is to use bag of words features:
# 1. build a vocabulary of frequent words (use train data only)
# 2. for each training sample, count the number of times a word occurs in it (for each word in vocabulary).
# 3. consider this count a feature for some classifier
#
# __Note:__ in practice, you can compute such features using sklearn. Please don't do that in the current assignment, though.
# * `from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer`
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
k = 10000
from collections import Counter
cv = CountVectorizer(max_features =k, analyzer = lambda x: x.split())
#token_pattern = " \w+ ")
cv.fit(texts_train)
# +
# task: find up to k most frequent tokens in texts_train,
# sort them by number of occurences (highest first)
counter = Counter((y for x in texts_train for y in x.split(" ")))
bow_vocabulary = sorted(cv.vocabulary_.keys(), key = lambda x: cv.vocabulary_[x])
# -
print('example features:', sorted(bow_vocabulary)[::100])
import gutil
def text_to_bow(text):
""" convert text string to an array of token counts. Use bow_vocabulary. """
res = cv.transform([text]).toarray().reshape(-1).astype("float32")
return res
X_train_bow = np.stack(list(map(text_to_bow, texts_train)))
X_test_bow = np.stack(list(map(text_to_bow, texts_test)))
np.argwhere(text_to_bow("."))
X_train_bow[6, bow_vocabulary.index('.')]
texts_train[6].split().count('.')
k_max = len(set(' '.join(texts_train).split()))
assert X_train_bow.shape == (len(texts_train), min(k, k_max))
assert X_test_bow.shape == (len(texts_test), min(k, k_max))
assert np.all(X_train_bow[5:10].sum(-1) == np.array([len(s.split()) for s in texts_train[5:10]]))
assert len(bow_vocabulary) <= min(k, k_max)
assert X_train_bow[6, bow_vocabulary.index('.')] == texts_train[6].split().count('.')
# __Naive bayes:__ perhaps the simplest model that can solve your problem is the so called Naive Bayes Classifier.
# Its a trivial linear model that assumes the independence of input features and computes the coefficients by, well, counting probabilities.
#
# If you don't remember the math behind Naive Bayes, read [this chunk](https://lena-voita.github.io/nlp_course/text_classification.html#naive_bayes) to help refresh your memory. Done? Good! Now let's implement that :)
X[y==1],sym
# +
self = lambda x: x
self.delta =1
X = X_train_bow
y = y_train
my_bayes = BinaryNaiveBayes ()
my_bayes.fit(X,y)
self = my_bayes
# -
class BinaryNaiveBayes:
'''
P(y=k|x=c) = P(x=c|y=k) * P(y=k)
'''
delta = 1.0 # add this to all word counts to smoothe probabilities
def fit(self, X, y):
"""
Fit a NaiveBayes classifier for two classes
:param X: [batch_size, vocab_size] of bag-of-words features
:param y: [batch_size] of binary targets {0, 1}
"""
# first, compute marginal probabilities of every class, p(y=k) for k = 0,1
p = (y==0).sum()/len(y)
self.p_y = np.array([p, 1-p])
# count occurences of each word in texts with label 1 and label 0 separately
word_counts_positive = X[y==1].sum(axis=0)
word_counts_negative = X[y==0].sum(axis=0)
# ^-- both must be vectors of shape [vocab_size].
# finally, lets use those counts to estimate p(x | y = k) for k = 0, 1
self.p_x_given_positive = (word_counts_positive+self.delta)/(word_counts_positive+self.delta).sum()
self.p_x_given_negative = (word_counts_negative+self.delta)/(word_counts_negative+self.delta).sum()
# both must be of shape [vocab_size]; and don't forget to add self.delta!
return self
def predict_scores(self, X):
"""
:param X: [batch_size, vocab_size] of bag-of-words features
:returns: a matrix of scores [batch_size, k] of scores for k-th class
"""
# compute scores for positive and negative classes separately.
# these scores should be proportional to log-probabilities of the respective target {0, 1}
# note: if you apply logarithm to p_x_given_*, the total log-probability can be written
# as a dot-product with X
score_positive = (X@np.log(self.p_x_given_positive)) +np.log(self.p_y[1])
score_negative = X@np.log(self.p_x_given_negative) + np.log(self.p_y[0])
# you can compute total p(x | y=k) with a dot product
return np.stack([score_negative, score_positive], axis=-1)
def predict(self, X):
return self.predict_scores(X).argmax(axis=-1)
naive_model = BinaryNaiveBayes().fit(X_train_bow, y_train)
# +
assert naive_model.p_y.shape == (2,) and naive_model.p_y.sum() == 1 and naive_model.p_y[0] > naive_model.p_y[1]
assert naive_model.p_x_given_positive.shape == naive_model.p_x_given_negative.shape == X_train_bow.shape[1:]
assert np.allclose(naive_model.p_x_given_positive.sum(), 1.0)
assert np.allclose(naive_model.p_x_given_negative.sum(), 1.0)
assert naive_model.p_x_given_negative.min() > 0, "did you forget to add delta?"
f_index = bow_vocabulary.index('fuck') # offensive tweets should contain more of this
assert naive_model.p_x_given_positive[f_index] > naive_model.p_x_given_negative[f_index]
g_index = bow_vocabulary.index('good') # offensive tweets should contain less of this
assert naive_model.p_x_given_positive[g_index] < naive_model.p_x_given_negative[g_index]
# +
from sklearn.naive_bayes import MultinomialNB
sk_bayes = MultinomialNB()
sk_bayes.fit(X_train_bow, y_train)
sk_bayes.score(X_test_bow, y_test)
# +
from sklearn.metrics import roc_auc_score, roc_curve
for name, X, y, model in [
('train', X_train_bow, y_train, naive_model),
('test ', X_test_bow, y_test, naive_model)
]:
proba = model.predict_scores(X)[:, 1] - model.predict_scores(X)[:, 0]
auc = roc_auc_score(y, proba)
plt.plot(*roc_curve(y, proba)[:2], label='%s AUC=%.4f' % (name, auc))
plt.plot([0, 1], [0, 1], '--', color='black',)
plt.legend(fontsize='large')
plt.grid()
test_accuracy = np.mean(naive_model.predict(X_test_bow) == y_test)
print(f"Model accuracy: {test_accuracy:.3f}")
assert test_accuracy > 0.75, "Accuracy too low. There's likely a mistake in the code."
print("Well done!")
# -
# Okay, it definitely learned *something*. Now let's figure out what exactly it learned. The simplest way to do that is by highlighting which words have a greatest ratio of positive to negative probability or vice versa. We'll go with the positive one [because reasons](https://www.urbandictionary.com/define.php?term=because%20reasons).
#
# __Your task__ is to compute top-25 words that have the __highest__ ratio of ${p(x_i | y=1)} \over {p(x_i | y=0)}$. Enjoy!
voc_array = np.array(bow_vocabulary)
voc_array
voc_array[np.argsort(sk_bayes.feature_log_prob_[1,:]/sk_bayes.feature_log_prob_[0,:])][:20]
# +
# hint: use naive_model.p_*
probability_ratio = naive_model.p_x_given_positive/naive_model.p_x_given_negative
top_negative_words = voc_array[np.argsort(probability_ratio)][-25:][::-1]
assert len(top_negative_words) == 25 and [isinstance(w, str) for w in top_negative_words]
assert 'j.delanoy' in top_negative_words and 'college' in top_negative_words
for i, word in enumerate(top_negative_words):
print(f"#{i}\t{word.rjust(10, ' ')}\t(ratio={probability_ratio[bow_vocabulary.index(word)]})")
# -
# Now lets try something less prehistoric: __Logistic Regression__. Turns out, if you're using silicon instead of an abacus, you can find model weights by optimizing the log-probability of the answer. Though, of course, you don't even need to write it by hand anymore. Let's sklearn it!
from sklearn.linear_model import LogisticRegression
bow_model = LogisticRegression(C=0.2).fit(X_train_bow, y_train)
# +
from sklearn.metrics import roc_auc_score, roc_curve
for name, X, y, model in [
('train', X_train_bow, y_train, bow_model),
('test ', X_test_bow, y_test, bow_model)
]:
proba = model.predict_proba(X)[:, 1]
auc = roc_auc_score(y, proba)
plt.plot(*roc_curve(y, proba)[:2], label='%s AUC=%.4f' % (name, auc))
plt.plot([0, 1], [0, 1], '--', color='black',)
plt.legend(fontsize='large')
plt.grid()
test_accuracy = np.mean(bow_model.predict(X_test_bow) == y_test)
print(f"Model accuracy: {test_accuracy:.3f}")
assert test_accuracy > 0.77, "Hint: tune the parameter C to improve performance"
print("Well done!")
# -
# ### Task: implement TF-IDF features
#
# Not all words are equally useful. One can prioritize rare words and downscale words like "and"/"or" by using __tf-idf features__. This abbreviation stands for __text frequency/inverse document frequence__ and means exactly that:
#
# $$ feature_i = { Count(word_i \in x) \times { log {N \over Count(word_i \in D) + \alpha} }} $$
#
#
# , where x is a single text, D is your dataset (a collection of texts), N is a total number of documents and $\alpha$ is a smoothing hyperparameter (typically 1).
# And $Count(word_i \in D)$ is the number of documents where $word_i$ appears.
#
# It may also be a good idea to normalize each data sample after computing tf-idf features.
#
# __Your task:__ implement tf-idf features, train a model and evaluate ROC curve. Compare it with basic BagOfWords model from above.
#
# Please don't use sklearn/nltk builtin tf-idf vectorizers in your solution :) You can still use 'em for debugging though.
idf = np.log(X_train_bow.shape[0]/((X_train_bow>0).sum(axis=0)+1))
# +
class MyIdf:
'''
accepts bow matrix
'''
def fit(self, X,y):
self.idf = np.log(X.shape[0]/((X>0).sum(axis=0)+1))
return self
def transform(self,X):
return X*self.idf
# -
tf = TfidfVectorizer(analyzer = lambda x: x.split())
tf.fit(texts_train, )
from sklearn.pipeline import Pipeline
np.linalg.norm(pipe['transform'].transform(texts_train).toarray()[0, :])
my_pipe = Pipeline([('tf-idf', MyIdf()), ("logistic", LogisticRegression(C=0.2))])
my_pipe.fit(X_train_bow, y_train)
my_pipe.dd
my_pipe['tf-idf'].idf
pipe = Pipeline([("transform", TfidfVectorizer(analyzer = lambda x: x.split())),
("logistic", LogisticRegression(C=0.2))])
pipe.fit(texts_train, y_train)
print(pipe.score(texts_train, y_train), my_pipe.score(X_train_bow, y_train))
print(pipe.score(texts_test, y_test), my_pipe.score(X_test_bow, y_test))
print(roc_auc_score(y_test, pipe.predict_proba(texts_test)[:, 1]),
roc_auc_score(y_test, my_pipe.predict_proba(X_test_bow)[:, 1]))
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
# ```
#
#
# ### Solving it better: word vectors
#
# Let's try another approach: instead of counting per-word frequencies, we shall map all words to pre-trained word vectors and average over them to get text features.
#
# This should give us two key advantages: (1) we now have 10^2 features instead of 10^4 and (2) our model can generalize to word that are not in training dataset.
#
# We begin with a standard approach with pre-trained word vectors. However, you may also try
# * training embeddings from scratch on relevant (unlabeled) data
# * multiplying word vectors by inverse word frequency in dataset (like tf-idf).
# * concatenating several embeddings
# * call `gensim.downloader.info()['models'].keys()` to get a list of available models
# * clusterizing words by their word-vectors and try bag of cluster_ids
#
# __Note:__ loading pre-trained model may take a while. It's a perfect opportunity to refill your cup of tea/coffee and grab some extra cookies. Or binge-watch some tv series if you're slow on internet connection
# +
import gensim.downloader
embeddings = gensim.downloader.load("fasttext-wiki-news-subwords-300")
# If you're low on RAM or download speed, use "glove-wiki-gigaword-100" instead. Ignore all further asserts.
# -
comment = texts_train[0]
embeddings.vectors.shape[1]
vectorize_sum("who cares anymore . they attack with impunity .")[::70],
# +
def vectorize_sum(comment):
"""
implement a function that converts preprocessed comment to a sum of token vectors
"""
embedding_dim = embeddings.vectors.shape[1]
features = np.zeros([embedding_dim], dtype='float32')
for word in comment.split(" "):
try:
features += embeddings.get_vector(word)
except KeyError:
pass
return features
assert np.allclose(
vectorize_sum("who cares anymore . they attack with impunity .")[::70],
np.array([ 0.0108616 , 0.0261663 , 0.13855131, -0.18510573, -0.46380025])
)
# -
class Embedding_Sum:
def fit(self,X, y):
return self
def transform(self,X):
return np.stack([vectorize_sum(text) for text in X])
from sklearn.preprocessing import StandardScaler
pipe = Pipeline([("embedding", Embedding_Sum()),
('scaler', StandardScaler()),
("LogReg", LogisticRegression())
])
pipe.fit(texts_train, y_train)
roc_auc_score(y_test, pipe.predict_proba(texts_test)[:, 1])
pipe.score(texts_test, y_test)
X_train_wv = np.stack([vectorize_sum(text) for text in texts_train])
X_test_wv = np.stack([vectorize_sum(text) for text in texts_test])
X_train_wv2 = X_train_wv/X_train_wv.sum(axis=1).reshape(500,-1)
# +
wv_model = LogisticRegression().fit(X_train_wv, y_train)
for name, X, y, model in [
('bow train', X_train_bow, y_train, bow_model),
('bow test ', X_test_bow, y_test, bow_model),
('vec train', X_train_wv, y_train, wv_model),
('vec test ', X_test_wv, y_test, wv_model)
]:
proba = model.predict_proba(X)[:, 1]
auc = roc_auc_score(y, proba)
plt.plot(*roc_curve(y, proba)[:2], label='%s AUC=%.4f' % (name, auc))
plt.plot([0, 1], [0, 1], '--', color='black',)
plt.legend(fontsize='large')
plt.grid()
assert roc_auc_score(y_test, wv_model.predict_proba(X_test_wv)[:, 1]) > 0.92, "something's wrong with your features"
# -
# If everything went right, you've just managed to reduce misclassification rate by a factor of two.
# This trick is very useful when you're dealing with small datasets. However, if you have hundreds of thousands of samples, there's a whole different range of methods for that. We'll get there in the second part.
| week02_classification/homework_part1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Jsevillamol/quantumcomputingtimelines/blob/main/QuantumComputingProgress.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="uHbrTVgll8Su" cellView="both" outputId="7a0ba4be-399e-4e0e-9bbe-38eae3a94cb9" colab={"base_uri": "https://localhost:8080/", "height": 725}
#@markdown **Double click here to see the code to import required libraries and the dataset**
#@markdown To play around with the graphs, execute the cells in order (ctrl + enter), starting with this one
#@markdown You will be prompted to give your credentials to access the data spreadsheet
#@markdown This notebook includes the code to
#@markdown * Visualize the **contour plot of the generalized logical qubit metric**
#@markdown * **visualize the data**
#@markdown * perform a **multivariate loglinear regression with bootstrapping**
#@markdown * perform a **multivariate extrapolation with bootstrapping**
#@markdown * extrapolate via a **geometric drift model**
#@markdown Double click any cell to see the associated code.
import numpy as np
from scipy import special
from scipy import stats
from scipy import odr
from scipy.stats import multivariate_normal
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import math
import pandas as pd
import altair as alt
import collections
from datetime import datetime
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
# worksheet = gc.open('Quantum computing progress').sheet1
worksheet = gc.open_by_url('https://docs.google.com/spreadsheets/d/1pwb4gf0FxlxgfVhtXTaqEGS9b7FwsstsJ0v7Zb1naQ0').sheet1
# worksheet = gc.open_by_url('https://docs.google.com/spreadsheets/d/1utGUc1Okg4XfF06Ilhyy0jFLqL9RnbFaVXDynwFjfb8/edit#gid=0').sheet1
# get_all_records gives a dict with the contents of the spreadsheet
rows = worksheet.get_all_records()
# Convert to a DataFrame
df = pd.DataFrame.from_records(rows)
# Fix metric column types
metrics = ["Physical qubits", "Average two-qubit gate error rate"]
for metric in metrics:
df[metric] = pd.to_numeric(df[metric], errors="coerce")
df["Year"] = pd.to_numeric(df["Year"], errors="coerce")
df["Date"] = pd.to_datetime(df["Date"], errors="coerce")
# Impute missing dates with year when available
df["Date"] = df.apply(lambda row : row["Date"] \
if row["Date"] is not pd.NaT or pd.isna(row["Year"]) \
else pd.Timestamp(year=np.int64(row["Year"]), month=6, day=1),
axis=1)
# (discretization) Rounds dates down to the year
#df["Date"] = df.apply(lambda row : pd.Timestamp(year=np.int64(row["Date"].year), month=1, day=1) if not pd.isna(row["Date"]) else np.nan, axis=1)
# Substitute year with fractional year, rounded to the month
df["Year"] = df.apply(lambda row : row["Date"].year + row["Date"].month/12, axis=1)
#####################################################################
# define generalized logical qubits metric
def GLQ(physical_qubits, error_rate, target_fidelity=1/5.2e17, threshold_fidelity=1e-2): #
if error_rate < target_fidelity: return physical_qubits
if error_rate > threshold_fidelity: return 0.
surface_QEC_factor = (4*np.log(np.sqrt(10)* error_rate / target_fidelity)/np.log(threshold_fidelity/error_rate) + 1)**2
#surface_code_distance = np.log(10*(target_fidelity)**2/error_rate**2)/np.log(100*error_rate)
#surface_QEC_factor = (2*surface_code_distance+1)**2
generalized_logical_qubits = physical_qubits / surface_QEC_factor
return generalized_logical_qubits
vect_GLQ = np.vectorize(GLQ)
# Add GLQs to the df
df["Generalized logical qubits"] = df.apply (lambda row: GLQ(row["Physical qubits"], row["Average two-qubit gate error rate"]), axis=1)
#######################################################################
# show all data
df
# + id="-tQERE3NsgPc" outputId="cb5e0a0f-cf19-43e0-8dc3-ad3e2905509b" colab={"base_uri": "https://localhost:8080/", "height": 35}
#@title GLQ calculator {display-mode: "form", run : "auto"}
log_ph_qubits = 2 #@param {type:"slider", min:0, max:10}
ph_qubits = 10**log_ph_qubits
log_error_rate = -12 #@param {type:"slider", min:-20, max:0}
error_rate = 10**log_error_rate #np.logspace(-20, 0)
glqs = vect_GLQ(ph_qubits, error_rate)
print(f"glqs = {glqs}")
# + id="ZiOuYBoS0pLo" cellView="both" outputId="8d92a738-2550-4a0b-ccfd-60bea537d44c" colab={"base_uri": "https://localhost:8080/", "height": 550}
#@title Generalized logical qubits contour plot
neg_levels = [10**n for n in range(-4,0)]
pos_levels = [10**n for n in range(1,5)]
thresholds = np.array([1,4100])
label_fontsize=15
epsilon = 1e-04
resolution = 1000
error_rates, ph_qubits = np.meshgrid(np.flip(np.logspace(-10,0, resolution)), np.logspace(0., 6.5, resolution))
glqs = vect_GLQ(ph_qubits, error_rates)
fig2 = plt.figure(figsize=(20,10))
ax2 = fig2.add_subplot(111)
ax2.set_title("Generalize logical qubits contour plot", fontsize=30)
ax2.set_xlabel("Average error rate", fontsize=25)
ax2.set_ylabel("Physical qubits", fontsize=25)
ax2.set_xscale("log")
ax2.set_xlim(10, 10**-10)
ax2.set_yscale("log")
# Construct a line where to place the contour levels
a=-1 ; b =1e-2*1.5
line_x = np.logspace(-10,0, resolution)
line_y = b*(line_x**a)
line = np.c_[line_x,line_y]
line_glqs = vect_GLQ(line_y,line_x)
print(line_glqs.shape)
# Find the intersection of the line and the contours
neg_manual_label_locations = []
for level in neg_levels:
idx = np.argmin(np.abs(line_glqs-level))
neg_manual_label_locations.append((line_x[idx], line_y[idx]))
pos_manual_label_locations = []
for level in pos_levels:
idx = np.argmin(np.abs(line_glqs-level))
pos_manual_label_locations.append((line_x[idx], line_y[idx]))
thresholds_manual_label_locations = []
for threshold in thresholds:
idx = np.argmin(np.abs(line_glqs-threshold))
thresholds_manual_label_locations.append((line_x[idx], line_y[idx]))
# Draw the GLQ levels < 1
CS = ax2.contour(error_rates, ph_qubits, glqs, levels=neg_levels,colors='black');
# adjusting sig figs: https://stackoverflow.com/questions/55846749/how-to-reduce-decimal-places-in-a-matplotlib-contour-plot
ax2.clabel(CS, inline=1, fontsize=label_fontsize, fmt="%.0E", manual = neg_manual_label_locations);
# Draw the GLQ levels > 1
CS = ax2.contour(error_rates, ph_qubits, glqs, levels=pos_levels,colors='black');
# adjusting sig figs: https://stackoverflow.com/questions/55846749/how-to-reduce-decimal-places-in-a-matplotlib-contour-plot
ax2.clabel(CS, inline=1, fontsize=label_fontsize, fmt="%1i", manual = pos_manual_label_locations);
# Draw the thresholds
CS2 = ax2.contour(error_rates, ph_qubits, glqs, levels=thresholds, linestyles='dashed', colors='black');
ax2.clabel(CS2, inline=1, fontsize=label_fontsize, fmt="%1i", manual=thresholds_manual_label_locations);
# + [markdown] id="Pw-UR0kyn6cb"
# # Data visualization
# + id="MPFxXsJNn8pf" outputId="e1ca1c6a-3339-4b2b-aec8-ee46746245f1" colab={"base_uri": "https://localhost:8080/", "height": 929}
#@title Plot key metrics against years {display-mode: "form"}
target = "Physical qubits" #@param ["Physical qubits", "Average two-qubit gate error rate", "Generalized logical qubits"]
data_horizon = [2002, 2022] #@param
data_horizon = [pd.Timestamp(year=x, month=1, day=1) for x in data_horizon]
# Filter the data
df_hat = df.dropna(subset=["Date", target])
df_hat = df_hat[df_hat[target]> 0.]
df_hat = df_hat[data_horizon[0] <= df_hat["Date"]]
df_hat = df_hat[df_hat["Date"] <= data_horizon[1]]
print(f"n = {len(df_hat)}")
# Plot the filtered data
alt.Chart(df_hat,width=1000, height=800
).mark_point(size=200).encode(
x=alt.X('Date', scale=alt.Scale(), axis = alt.Axis(format = ("%Y"))),
y=alt.Y(target, scale=alt.Scale(type="log")),
shape = 'Technology',
tooltip='Source'
).configure_axis(grid=False,
labelFontSize=20,
titleFontSize=40).interactive().configure_legend(
titleFontSize=20,
labelFontSize = 18,
gradientLength=400,
gradientThickness=30
)
# + id="3okXU4qW4cI6" outputId="080aec56-0770-4fc9-e0ad-9a7a798137b5" colab={"base_uri": "https://localhost:8080/", "height": 929}
#@title Plot physical qubits against error rate {display-mode: "form"}
mask = df.notna()["Physical qubits"]
mask &= df.notna()["Average two-qubit gate error rate"]
n = np.sum(mask)
print(f"n={ n }")
alt.Chart(df, width=1000, height=800).mark_point(size=200).encode(
x=alt.X("Physical qubits", scale=alt.Scale(type="log")),
y=alt.Y("Average two-qubit gate error rate", scale=alt.Scale(type="log")),
color = alt.Color("Year:O",
scale=alt.Scale(scheme='viridis')
),
shape = 'Technology'
).configure_axis(grid=False,
labelFontSize=20,
titleFontSize=40).interactive().configure_legend(
titleFontSize=20,
labelFontSize = 18,
gradientLength=400,
gradientThickness=30
)
# + [markdown] id="suO-bKfaYcos"
# # Extrapolating
# + id="S6L7PdJ_YhFY" outputId="4ac96c22-6f34-40a1-ae54-4eda85beb251" colab={"base_uri": "https://localhost:8080/", "height": 595}
#@title Multivariate loglinear regression with bootstrapping {display-mode: "form"}
# Specify target
print(f"Date of analysis = {datetime.now()}")
data_horizon = (2000, 2016) #@param
data_horizon = [pd.Timestamp(year=x, month=1, day=1) for x in data_horizon]
predict_limit = 2100 #@param {type:"number"}
plot_range = [2000, 2060] #@param
technology_filter = "Superconductor" #@param ["Superconductor", "all"]
print(f"Technology filter = {technology_filter}")
#@markdown Select `take_max` to aggregate the yearly data by a maximum
take_max = True #@param {"type":"boolean"}
print(f"Take max = {take_max}")
#@markdown Select `combined_mode` to make predictions of GLQs based on the extrapolation
#@markdown of physical qubits and gate fidelity. If False, the GLQs are extrapolated on their own.
combined_mode = True #@param {"type" : "boolean"}
print(f"Combined mode = {combined_mode}")
#@markdown The baseline model is the one generated by the whole dataset, no bootstrapping
plot_baseline = False #@param {"type":"boolean"}
plot_bootstrapping = True #@param {"type":"boolean"}
#@markdown Indicate the bootstraped quantile trajectories that will be plotted.
bootstrapping_quantiles = [0.05, 0.5, 0.95] #@param
#@markdown B is the number of resamples taken for bootstrapping
B = 1000 #@param {type:"number"} number of resamples
print(f"Number of bootstraps = {B}")
#@markdown The critical threshold defines the estimator used for bootstrapping,
#@markdown ie the year on which the specified threshold is crossed
critical_threshold = 4100 #@param
print(f"Critical threshold = {critical_threshold}")
#@markdown Indicate the quantiles of log gaussian noise plotted, approximated via sampling.
#@markdown If `None` is included, a naive analytical extrapolation is plotted instead.
multivariate_quantiles = [None]#@param
n_samples = 10000 #@param {"type" : "number"}
if multivariate_quantiles != [None]:
print(f"n multivariate samples = {n_samples}")
glq_thresholds = [1, 4100] #@param
seed = 1111 #@param {type:"number"}
print(f"Seed = {seed}")
np.random.seed(seed) # set seed
##################################################################
# PREPROCESSING
# # copy dataframe
df_data = df
# filter by technology
if technology_filter != "all":
df_data = df_data[df_data["Technology"] == technology_filter]
# drop rows with invalid years
df_data = df_data.dropna(subset=["Date"])
# select time horizon
df_data = df_data[(data_horizon[0] <= df_data["Date"]) & \
(df_data["Date"] <= data_horizon[1])]
# Note some characteristics about the data
n = len(df_data)
t0 = df_data["Date"].min()
t1 = df_data["Date"].max()
t = t1 - t0
METRICS = ["Physical qubits",
"Average two-qubit gate error rate",
"Generalized logical qubits"]
print(f"n = {n}")
print(f"t = {t} years ; [{t0}, {t1}]")
#################################################################################
# PREAMBLE
class SamplingError(Exception):
"""Raised when the input df is not rich enough for inference"""
pass
def max_aggregation(df_hat, metric):
""" Delete the entries that do not set new records on the specified metric
"""
df_hat = df_hat.copy()
# Temporarily flip the fidelity so the code is equal to the other metrics (max instead of min)
if metric == "Average two-qubit gate error rate":
df_hat[metric] = -df_hat[metric].values
mask = df_hat.apply(lambda row : np.all(row[metric] >= df_hat[df_hat["Date"] <= row["Date"]][metric]), axis=1)
df_hat = df_hat[mask]
# Undo flipping
if metric == "Average two-qubit gate error rate":
df_hat[metric] = -df_hat[metric].values
return df_hat
class PredictiveModel:
"""
Class for automatic inference. On initialization takes a dataset, from which
a model parameterization is inferred.
"""
def __init__(self, df_data):
# Store data
self.df_data_original = df_data.dropna(subset=["Date"])
self.df_data = {}
# Parameterize
self.W = {}
self.s = {}
self.n = {}
for metric in METRICS if not combined_mode else METRICS[:2]:
# Clean data
df_hat = self.df_data_original.dropna(subset=[metric])
df_hat = df_hat[df_hat[metric] > 0.]
# Take max
if take_max:
df_hat = max_aggregation(df_hat, metric)
if len(df_hat) <= 2: raise SamplingError()
else:
self.df_data[metric] = df_hat#.copy()
self.n[metric] = len(df_hat)
# Convert to numpy arrays
x_hat = df_hat["Date"].apply(lambda x: x.year + x.month/12).values
y_hat = df_hat[metric].values
# elevate x onto a projective space
x_hat = np.c_[ np.ones_like(x_hat), x_hat ]
# take logarithms of data
y_hat = np.log(y_hat)
# solve for least squares
a = np.matmul(x_hat.T, x_hat)
b = np.matmul(x_hat.T, y_hat)
W = np.linalg.solve(a,b)
error = y_hat-np.matmul(x_hat, W)
s = np.matmul(error.T, error) / (x_hat.shape[0] - x_hat.shape[1])
# Store parameterization
self.W[metric] = W
self.s[metric] = s
# Predict when the critical threshold will be crossed
x_hat = np.linspace(t0.year, predict_limit, 1000)
_, _, glqs = self.predict(x_hat)
t_idx = np.argmin(np.abs(glqs - critical_threshold))
self.critical_t = x_hat[t_idx]
def predict(self, x_hat, q_sampling=None):
"""
Make predictions on the values of the three key metrics for each time point in x_hat
"""
if not combined_mode and q_sampling is None:
W1 = self.W["Physical qubits"]
s1 = self.s["Physical qubits"]
ph_qubits = np.exp(x_hat*W1[1]+W1[0])
W2 = self.W["Average two-qubit gate error rate"]
s2 = self.s["Average two-qubit gate error rate"]
fidelities = np.exp(x_hat*W2[1]+W2[0])
W3 = self.W["Generalized logical qubits"]
s3 = self.s["Generalized logical qubits"]
glqs = np.exp(x_hat*W3[1]+W3[0])
elif not combined_mode and q_sampling is not None: # broken
W1 = self.W["Physical qubits"]
s1 = self.s["Physical qubits"]
ph_qubits = np.exp(x_hat*W1[1]+W1[0] + np.sqrt(2*s1)*special.erfinv(2*q_sampling-1))
W2 = self.W["Average two-qubit gate error rate"]
s2 = self.s["Average two-qubit gate error rate"]
fidelities = np.exp(x_hat*W2[1]+W2[0]+ np.sqrt(2*s2)*special.erfinv(2*q_sampling-1))
W3 = self.W["Generalized logical qubits"]
s3 = self.s["Generalized logical qubits"]
glqs = np.exp(x_hat*W3[1]+W3[0]+ np.sqrt(2*s3)*special.erfinv(2*q_sampling-1))
elif combined_mode and q_sampling is None:
W1 = self.W["Physical qubits"]
ph_qubits = np.exp(x_hat*W1[1]+W1[0])
W2 = self.W["Average two-qubit gate error rate"]
fidelities = np.exp(x_hat*W2[1]+W2[0])
glqs = vect_GLQ(ph_qubits, fidelities)
else: # combined_mode and q_sampling is not None
# Sample ph qubits and fidelities
W1 = self.W["Physical qubits"]
mu1 = x_hat*W1[1]+W1[0]
s1 = self.s["Physical qubits"]
samples1 = np.exp(np.random.normal(mu1, s1, (n_samples, len(x_hat)))).T
W2 = self.W["Average two-qubit gate error rate"]
mu2 = x_hat*W2[1]+W2[0]
s2 = self.s["Average two-qubit gate error rate"]
samples2 = np.exp(np.random.normal(mu2, s2, (n_samples, len(x_hat)))).T
# Compute glqs
samples3 = vect_GLQ(samples1, samples2)
assert samples3.shape == samples1.shape, f"samples3.shape = {samples3.shape}, expected {samples1.shape}"
# Select desired quantile
glqs = np.quantile(samples3, q_sampling, axis=1)
assert glqs.shape == x_hat.shape, f"glqs.shape = {glqs.shape}, expected {x_hat.shape}"
quantile_idx = np.argmin(np.abs(np.reshape(glqs, (-1,1)) - samples3), axis=1)
assert quantile_idx.shape == x_hat.shape, f"quantile_idx.shape = {quantile_idx.shape}, expected shape {x_hat.shape}"
ph_qubits = samples1[np.arange(len(samples1)), quantile_idx]
assert ph_qubits.shape == x_hat.shape, f"ph_qubits.shape = {ph_qubits.shape}, expected {x_hat.shape}"
fidelities = samples2[np.arange(len(samples2)), quantile_idx]
assert fidelities.shape == x_hat.shape
return ph_qubits, fidelities, glqs
def log_residuals(self, x, y, metric):
ph_qubits, fidelities, glqs = self.predict(x)
if metric == "Physical qubits":
y_hat = ph_qubits
elif metric == "Average two-qubit gate error rate":
y_hat = fidelities
elif metric == "Generalized logical qubits":
y_hat = glqs
log_residuals = np.log(y)-np.log(y_hat)
return log_residuals
models = {}
#################################################################################
# BASELINE MODEL
if plot_baseline:
models["Baseline"] = PredictiveModel(df_data)
##########################################################################
# BOOTSTRAPPED MODELS
if plot_bootstrapping:
model_samples = []
t_threshold_sample = np.zeros(B)
retries = 0
n_sample = {}
for metric in METRICS if not combined_mode else METRICS[:2]:
n_sample[metric] = np.zeros(B)
for idx in range(B):
while True:
try:
df_hat = df_data.sample(n, replace=True)
model = PredictiveModel(df_hat)
model_samples.append(model)
t_threshold_sample[idx] = model.critical_t
for metric in METRICS if not combined_mode else METRICS[:2]:
n_sample[metric][idx] = model.n[metric]
except (np.linalg.LinAlgError, SamplingError):
retries += 1
continue
else: break
print(f"We needed to repeat {retries} resamples")
# the bootstrapped critical times should not be always equal
assert not (t_threshold_sample == t_threshold_sample[0]).all()
n_trajectories = len(np.unique(t_threshold_sample))
print(f"There are {n_trajectories} distinct trajectories")
for metric in METRICS if not combined_mode else METRICS[:2]:
print(f"The median of {metric.lower()} data points after aggregation was {np.median(n_sample[metric])}")
# Find the models that correspond to the desired quantiles
for bootstrap_q in bootstrapping_quantiles:
threshold = np.quantile(t_threshold_sample,bootstrap_q)
idx = np.argmin(np.abs(t_threshold_sample - threshold))
models[f"Bootstrap quantile {bootstrap_q}"] = model_samples[idx]
##########################################################################
# PREDICT TRAJECTORIES FOR EACH MODEL
assert len(models) > 0, "At least one trajectory must be plotted"
x_hat = np.linspace(plot_range[0], plot_range[1], 1000)
predictions = []
data_labels = []
for label, model in models.items():
for multivariate_q in multivariate_quantiles:
ph_qubits, fidelities, glqs = model.predict(x_hat, multivariate_q)
label2 = label + (f" multivariate quantile {multivariate_q}" if multivariate_q else '')
# Compute when we cross each of the thresholds
for threshold in glq_thresholds:
if np.any(glqs > threshold):
idx = np.argmin(np.abs(glqs - threshold))
t = x_hat[idx]
print(f"The {threshold} threshold won't be reached until year {t} for {label2}")
else:
print(f"The {threshold} threshold won't be reached by year {x_hat.max()} for {label2}")
model_predictions = [{
"Model" : label2,
"Year" : t,
"Physical qubits" : x1,
"Average two-qubit gate error rate" : x2,
"Generalized logical qubits" : x3
} for t,x1,x2,x3 in zip(x_hat, ph_qubits, fidelities, glqs)]
predictions += model_predictions
for metric in METRICS if not combined_mode else METRICS[:2]:
data = model.df_data[metric]
data["Model"] = label2
data["Metric"] = metric
data_labels.append(data)
predictions_df = pd.DataFrame(predictions)
predictions_df = pd.melt(predictions_df, id_vars=['Year', 'Model'], value_vars=METRICS)
data_labels_df = pd.concat(data_labels)
print("Simulation done!")
# + id="pljTr6NXc0jZ" cellView="form" outputId="034f5738-cba6-4ec3-de6c-58a5271465df" colab={"base_uri": "https://localhost:8080/", "height": 601}
#@markdown Visualize extrapolation
target = "Generalized logical qubits" #@param ["Physical qubits", "Average two-qubit gate error rate", "Generalized logical qubits"]
domain_map = {
"Generalized logical qubits" : [1E-07, 1e6],
"Average two-qubit gate error rate" : [1e-6, 1],
"Physical qubits" : [1,1e8],
}
###########################################################
# PREPARE THE DATA WE NEED
predictions_df_hat = predictions_df[predictions_df['variable'] == target]
data_labels_df_hat = data_labels_df[data_labels_df['Metric'] == target]
df_data = pd.melt(data_labels_df_hat,
id_vars=['Year', 'Model', 'Source', 'Technology', 'Metric'],
value_vars=[target])
##########################################################################
# DATA VISUALIZATION
alt.data_transformers.disable_max_rows()
# plot trajectories
predictions_alt = alt.Chart(predictions_df_hat).transform_filter(
alt.FieldGTPredicate(field='value', gt=0.)
).mark_line().encode(
x="Year",
y = alt.Y('value',
scale=alt.Scale(type='log'),
title=target),
color=alt.Color("Model", legend=None),
)
# Create a selection that chooses the nearest point & selects based on x-value
nearest = alt.selection(type='single', nearest=True, on='mouseover',
fields=['Year'], empty='none')
# Transparent selectors across the chart. This is what tells us
# the x-value of the cursor
selectors = alt.Chart(predictions_df_hat).mark_point().encode(
x='Year:Q',
opacity=alt.value(0),
).add_selection(
nearest
)
# Draw points on the line, and highlight based on selection
points = predictions_alt.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = predictions_alt.mark_text(align='left', dx=5, dy=-5).encode(
text=alt.condition(nearest, 'value:Q', alt.value(' '))
)
# Draw a rule at the location of the selection
rules = alt.Chart(predictions_df_hat).mark_rule(color='gray').encode(
x='Year:Q',
).transform_filter(
nearest
)
# Plot data horizon
data_horizon_df = pd.DataFrame([{"Year": th.year + th.month/12} for th in data_horizon[1:]])
data_horizon_alt = alt.Chart(data_horizon_df).mark_rule(color='black', strokeDash=[1,1]).encode(
x='Year:Q'
)
# Plot thresholds
if target == "Generalized logical qubits":
threshold_df = pd.DataFrame([{"threshold": th, "variable": "Generalized logical qubits"} for th in glq_thresholds])
else: threshold_df = pd.DataFrame()
threshold_alt = alt.Chart(threshold_df).mark_rule(color='black', strokeDash=[2,2]).encode(
y='threshold:Q'
)
# Plot dataset
input_dropdown = alt.binding_select(options=list(df_data['Model'].unique()))
selection2 = alt.selection_single(fields=['Model'],
bind=input_dropdown,
name='Dataset ')
color_cond = alt.condition(selection2,
alt.value('purple'),
alt.value('lightgray'))
raw_data_alt = alt.Chart(df).transform_filter(
alt.FieldGTPredicate(field=target, gt=0.)
).mark_point().encode(
x="Year",
y = alt.Y(target, scale=alt.Scale(type='log')),#, domain=domain_map[target])),
tooltip = 'Source',
shape = alt.Shape('Technology', legend=None),
color = color_cond
)
model_data_alt = alt.Chart(df_data).transform_filter(
alt.FieldGTPredicate(field='value', gt=0.)
).mark_point().encode(
x="Year",
y = alt.Y('value', scale=alt.Scale(type='log')),#, domain=domain_map[target])),
tooltip = 'Source',
shape = alt.Shape('Technology', legend=None),
).add_selection(selection2)
# Put the five layers into a chart and bind the data
chart = alt.layer(
predictions_alt, data_horizon_alt, threshold_alt,
model_data_alt, raw_data_alt,
#selectors, points, rules, text, # comment this line to disable the vertical tooltip
).properties(
width=800, height=500
).interactive(
).configure_axis(
grid=True,
labelFontSize=20,
titleFontSize=20
).configure_legend(
titleFontSize=20,
labelFontSize = 18,
gradientLength=400,
gradientThickness=30
).configure_mark(
opacity=0.4,
color='red'
)
chart
# + id="ST_fWgsV3XYJ" cellView="form" outputId="c45d31a9-63d1-4283-f4e8-675e52a3ce0c" colab={"base_uri": "https://localhost:8080/", "height": 386}
#@markdown Visualize residuals
metric = "Average two-qubit gate error rate" #@param ["Physical qubits", "Average two-qubit gate error rate", "Generalized logical qubits"]
# Compute aggregated datapoints
df_hat = df_data.dropna(subset=["Year", metric])
df_hat = df_hat[df_hat[metric] > 0.]
df_max = max_aggregation(df_hat, metric)
x = df_max["Year"].values
y = df_max[metric].values
# Compute residuals
residuals_data = []
for label, model in models.items():
residuals = model.log_residuals(x,y,metric)
model_residuals = [{
"Model" : label,
"Year" : t,
"Residuals" : r
} for t,r in zip(x, residuals)]
residuals_data += model_residuals
residuals_df = pd.DataFrame(residuals_data)
residuals_df = pd.melt(residuals_df, id_vars=['Year', 'Model'], value_vars=["Residuals"])
residuals_df = residuals_df.rename(columns={"value":f"Log {metric.lower()} residuals"})
# Plot residuals
input_dropdown = alt.binding_select(options=residuals_df["Model"].unique())
selection = alt.selection_single(fields=['Model'], bind=input_dropdown, name='Model ')
alt.Chart(residuals_df).mark_point().encode(
x=alt.X("Year", scale=alt.Scale(domain=[2000, 2020])),
y=f"Log {metric.lower()} residuals"
).add_selection(selection).transform_filter(
selection
)
# + [markdown] id="jV29mJwOiz9W"
# # Multivariate linear regression
# $$Y=XB+\Xi$$
#
# $$\Xi\sim \mathcal{N}(\mu, \Sigma)$$
#
# $$\hat B = (X^T X)^{-1}X^T Y$$
#
# $$\hat \Sigma = \frac{1}{n-q-1}(Y-X\hat B)^T(Y-X\hat B)$$
#
# [Reference](https://brilliant.org/wiki/multivariate-regression/)
# + id="iGezPwRii207" outputId="c09215b6-ff1e-49c5-c8b7-ec101494a56c" colab={"base_uri": "https://localhost:8080/", "height": 922}
#@title Multivariate linear regression {display-mode: "form"}
# Parameters
technology_filter = "Superconductor" #@param ["Superconductor", "all"]
threshold_year = 2000 #@param {type: "slider", min: 2000, max: 2020}
threshold_year = pd.Timestamp(year=threshold_year, month=1, day=1)
# Mask invalid values
mask = df["Date"] >= threshold_year
mask &= ~np.isnan(df["Date"])
mask &= ~np.isnan(df["Physical qubits"])
mask &= ~np.isnan(df["Average two-qubit gate error rate"])
if technology_filter != "all":
mask &= df["Technology"] == technology_filter
df_temp = df[mask]
# Constructor predictor and response variables
x = df_temp["Date"].apply(lambda x: x.year + x.month/12).values.reshape((-1,1))
X = np.c_[np.ones((x.shape[0], 1)), x]
y1 = np.log(df_temp["Physical qubits"].values)
y2 = np.log(df_temp["Average two-qubit gate error rate"].values)
Y = np.c_[y1,y2]
print(f"The number of data points we are using is {X.shape[0]}")
print(f"")
# Compute estimators
B_hat = np.linalg.solve(np.matmul(X.T, X),np.matmul(X.T, Y))
print(f"B_hat.shape = {B_hat.shape}")
error = Y-np.matmul(X, B_hat)
Sigma_hat = np.matmul(error.T, error) / (X.shape[0] - X.shape[1])
print(f"Sigma_hat.shape = {Sigma_hat.shape}")
print("")
print("B_hat")
print(B_hat)
print("")
print("Sigma_hat")
print(Sigma_hat)
print("")
# Display some intuitive characteristics
sample_year = 2020
print(f"The estimated physical qubit drift is {B_hat[1,0]}")
print(f"The estimated doubling time is {np.log(2)/B_hat[1,0]} years")
print(f"The median value by {sample_year} is {np.exp(B_hat[0,0] + sample_year*B_hat[1,0])} physical qubits")
print("")
print(f"The estimated error rate drift is {B_hat[1,1]}")
print(f"The estimated halving time is {-np.log(2)/B_hat[1,1]} years")
print(f"The median value by {sample_year} is {np.exp(B_hat[0,1] + sample_year*B_hat[1,1])} error rate")
print("")
# Build predictor
f = lambda t : np.matmul(np.c_[np.ones((t.shape[0],1)),t], B_hat)
# Sample some predictions
x_extrapolate_max = 2050 #previously x.max()
x_hat = np.linspace(x.min(), x_extrapolate_max, 1000)
y_hat = np.exp(f(x_hat))
# plot the results
prediction_year = 2023 #@param {type: "slider", min: 2000, max: 2100}
x, y = np.mgrid[0:8:0.1, -8:0:0.1]
pos = np.dstack((x, y))
mean = f(np.array([prediction_year])).flatten()
rv = multivariate_normal(mean, cov= Sigma_hat)
fig2 = plt.figure(figsize=(12,8))
ax2 = fig2.add_subplot(111)
ax2.set_title(f"Predicted progress in year {prediction_year}", fontsize=20)
ax2.set_xlabel("Physical qubits", fontsize=20)
ax2.set_ylabel("Average error rate", fontsize=20)
ax2.set_xscale("log")
ax2.set_yscale("log")
heatmap = rv.pdf(pos)
def quantiles_func(pos):
mask = heatmap > rv.pdf(pos)
integral = np.sum(heatmap[mask]) * (0.1*0.1) # approximate integral
return integral
quantiles = np.array([[quantiles_func(x_y) for x_y in row] for row in pos])
assert quantiles.shape == heatmap.shape
assert (quantiles <= 1.).all(), f"np.max(quantiles) = {np.max(quantiles)}"
CS2 = ax2.contour(np.exp(x), np.exp(y), quantiles*100, colors='green');
ax2.clabel(CS2, inline=1, fontsize=10, fmt="%1i");
median = np.exp(mean)
print(f"Predicted median of physical qubits = {median[0]}")
print(f"Predicted median of average error rate = {median[1]}")
# + id="cMZSqSFLw9k6" outputId="70e73d38-65e6-4bb2-8d10-a4bc30a7b4ad" colab={"base_uri": "https://localhost:8080/", "height": 84}
#@title Multivariate covariance estimation with bootstrapping {run: "auto", display-mode: "form"}
# Parameters
technology_filter = "Superconductor" #@param ["Superconductor", "all"]
threshold_year = 2003 #@param {type: "slider", min: 2000, max: 2020}
B = 1000 #@param
seed = 111 #@param {type:"number"}
np.random.seed(seed)
# Mask invalid values
mask = df["Date"] >= pd.Timestamp(year=threshold_year, month=1, day=1)
mask &= ~np.isnan(df["Date"])
mask &= ~np.isnan(df["Physical qubits"])
mask &= ~np.isnan(df["Average two-qubit gate error rate"])
if technology_filter != "all":
mask &= df["Technology"] == technology_filter
df_temp = df[mask]
n = len(df_temp)
print(f"The number of data points we are using is {n}")
print(f"")
covariance_samples = np.zeros(B)
for subsample_idx in range(B):
subsample_df = df_temp.sample(n, replace=True)
# Constructor predictor and response variables
x = subsample_df["Date"].apply(lambda x: x.year + x.month/12).values.reshape((-1,1))
X = np.c_[np.ones((x.shape[0], 1)), x]
y1 = np.log(subsample_df["Physical qubits"].values)
y2 = np.log(subsample_df["Average two-qubit gate error rate"].values)
assert X.shape[0] == n
Y = np.c_[y1,y2]
# Compute estimators
B_hat = np.linalg.solve(np.matmul(X.T, X),np.matmul(X.T, Y))
error = Y-np.matmul(X, B_hat)
Sigma_hat = np.matmul(error.T, error) / (X.shape[0] - X.shape[1])
covariance_samples[subsample_idx] = Sigma_hat[0,1]
sorted_covariance_samples = np.sort(covariance_samples)
critical_idx = np.argmax(sorted_covariance_samples > 0.)
critical_quantile = critical_idx / len(sorted_covariance_samples)
print(f"The covariance is positive with confidence {1-critical_quantile}")
lower_bound = np.quantile(sorted_covariance_samples, 0.05)
upper_bound = np.quantile(sorted_covariance_samples, 0.95)
print(f"The 90% confidence interval is {lower_bound, upper_bound}")
# + [markdown] id="fYo3YiTIoBS8"
# # Geometric drift model
# + [markdown] id="-LNjWxOfNhGq"
# We fit the data we collected to the geometric drift model from [(<NAME>, 2016)](https://www.sciencedirect.com/science/article/pii/S0048733315001699)
#
# We model progress as a random walk in log-space with some drift and noise autocorrelation ie $y_t = y_{t-1} + \mu + \nu_t + \theta \nu_{t-1}$, where $y_t$ are the log values of the metric of interest and $\nu_t \sim \mathcal{N}(0, \sigma^2)$.
#
# We can estimate the parameters of this model as
#
# $${\oversetˆ\mu}_{t_0}=\frac1m\sum_{i=t_0-m}^{t_0-1}(y_{i+1}-y_i)=\frac{y_{t_0}-y_{t_0-m}}m,$$
#
# $$\oversetˆK_{t_0}^2=\frac1{m-1}\sum_{i=t_0-m}^{t_0-1}{\lbrack(y_{i+1}-y_i)-{\oversetˆ\mu}_{t_0}\rbrack}^2,$$
#
# $$A^\ast=-2\theta+\left(1+\frac{2(m-1)\theta}m+\theta^2\right)\left(\tau+\frac{\tau^2}m\right),$$
#
# The forecast for $\tau$ years after our final data point $y_t$ is distributed as
#
# $$y_S(t+\tau)\sim\mathcal N(y_S(t)+{\widetilde\mu}_S\tau,\widetilde K_S^2A^\ast/(1+\theta_m^2)).$$
# + id="7CgKp7kaFtOd" outputId="57885622-108f-425b-e19d-18b5c1d31e9c" colab={"base_uri": "https://localhost:8080/", "height": 946}
#@title Geometric drift model {display-mode: "form", run: "auto"}
print(f"Date of analysis : {datetime.now()}")
# specify target
target = "Generalized logical qubits" #@param ["Physical qubits", "Average two-qubit gate error rate", "Generalized logical qubits"]
time_horizon = (2015, 2021) #@param
#time_horizon = [pd.Timestamp(year=x, month=1, day=1) for x in time_horizon]
prediction_horizon = 2100 #@param {type:"number"}
thresholds = [1,4100] #@param
# Filter data
df_hat = df.dropna(subset=["Year", target])
df_hat = df_hat[df_hat[target] > 0.]
df_hat = df_hat[df_hat["Year"] > time_horizon[0]]
df_hat = df_hat[df_hat["Year"] < time_horizon[1]]
print(f"n = {len(df_hat)}")
## aggregate maximum data
# Temporarily flip the fidelity so the code is equal to the other metrics (max instead of min)
if target == "Average two-qubit gate error rate":
df_hat[target] = -df_hat[target].values
# create new dataframe aggregating max metrics so far
l = []
for year in range(math.floor(df["Year"].min()), math.ceil(df["Year"].max())):
row = {"Year": year}
mask = df["Year"] <= year
df_temp = df[mask]
row[target] = df_temp[target].max()
l.append(row)
df_hat = pd.DataFrame(l)
df_hat = df_hat[df_hat[target] > 0.]
# Undo flipping
if target == "Average two-qubit gate error rate":
df_hat[target] = -df_hat[target].values
# prepare data
logseries = np.log(df_hat[target])
logdeltas = logseries.diff().iloc[1:]
# model parameter fitting
m = len(logdeltas) # number of deltas
θ = 0.63 # autocorrelation parameter
μ = logdeltas.mean() # estimated mean
K2 = logdeltas.var() # estimated variance
print(f"μ={μ}")
print(f"K2={K2}")
print(f"θ={θ}")
print(f"m={m}")
print(f"estimated doubling time = {np.log(2)/μ} years")
print(f"")
# Shaphiro-Wilk test
W,p = stats.shapiro(logdeltas)
print(f"SHAPHIRO-WILK TEST")
print(f"W={W}")
print(f"p-value={p}")
print("")
# prediction functions
y_0 = logseries.iloc[-1]
t_0 = time_horizon[1]
A_star_t = lambda t : -2*θ+(1+(2*(m-1)*θ)/m+θ**2)*((t-t_0)+(t-t_0)**2/m)
μ_t = lambda t : y_0 + μ*(t-t_0)
σ2_t = lambda t : K2*A_star_t(t)/(1+θ**2)
quantile = lambda F : np.vectorize(lambda t : np.exp(μ_t(t)+np.sqrt(2*σ2_t(t))*special.erfinv(2*F-1)))
# predictions
time_range = np.linspace(t_0+1, prediction_horizon, 1000)
percentile_15_pred = quantile(0.15)(time_range)
median_pred = quantile(0.5)(time_range)
percentile_85_pred = quantile(0.85)(time_range)
# Find threshold crossings
for threshold in thresholds:
if np.any(percentile_85_pred > threshold):
percentile_15_threshold = time_range[np.min(np.argwhere(percentile_85_pred > threshold))]
print(f"The quantity of GLQs will exceed {threshold} by the year {percentile_15_threshold} with probability 15%")
if np.any(median_pred > threshold):
median_threshold = time_range[np.min(np.argwhere(median_pred > threshold))]
print(f"The quantity of GLQs will exceed {threshold} by the year {median_threshold} with probability 50%")
if np.any(percentile_15_pred > threshold):
percentile_85_threshold = time_range[np.min(np.argwhere(percentile_15_pred > threshold))]
print(f"The quantity of GLQs will exceed {threshold} by the year {percentile_85_threshold} with probability 85%")
print("")
# prepare plot
fig = plt.figure(figsize=(15, 10));
ax = plt.axes();
ax.set_xlabel("Year");
ax.set_ylabel(f"Predicted {target.lower()}");
ax.set_yscale("log");
# plot predictions
line, = ax.plot(time_range,percentile_15_pred);
line.set_label("15% percentile");
line, = ax.plot(time_range, median_pred);
line.set_label("median");
line, = ax.plot(time_range,percentile_85_pred);
line.set_label("85% percentile");
# plot thresholds of interest
for threshold in thresholds:
line = ax.hlines(threshold, t_0, prediction_horizon, linestyles='dashed')
line.set_label(f"{threshold} {target.lower()}")
ax.legend();
# + id="7mwkPYzmyjID" outputId="ee1bf520-61ac-4933-abeb-99b5e868b24c" colab={"base_uri": "https://localhost:8080/", "height": 235}
df_hat
# + id="v_WPu5tTu8Ip"
| QuantumComputingProgress.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="R2hDRoLniXjz"
# <img src="https://github.com/lunduniversity/schoolprog-satellite/blob/master/exercises/kolcykeln/images/carboncycle_nb_header_swe2.png?raw=1" width="890" align="center"/>
#
# <a id='intro'></a>
# <br>
# <br>
#
# # Kolcykeln - Övning med koldioxid-data
#
# I denna interaktiva övning undersöker vi problem relaterade till *kolcykeln*, dvs kretsloppet för hur kol tas upp och släpps ut inom ett ekosystem. Vi fokuserar på hur koncentrationen av koldioxid i luften förändras över dygnet och över året.
#
#
# <br>
# <br>
# Övningen består av följande delar:
#
# 1. Vad är koldioxid
#
# 2. Kolcykeln
#
# 3. Ett år i Koldioxidens liv på Jorden (NASA video)
#
# 4. Quiz - Hur bra koll har du på kol
#
# 5. Övningar med koldioxid-data från mätstationen Hyltemossa
#
# <br>
# <br>
#
#
# ## Gör så här
#
# Detta är in interaktiv övning som använder sig av programkod. För att köra övningen, börja med att:
# - klicka på **Runtime** (alt. Körning) i menyn och sedan **Restart and run all...** (alt. Starta om och kör alla).
#
# Då kommer all bakgrundskod du behöver för övningen att köras.
#
# Observera:
# - Om meny-alternativet är grått kan det betyda att du inte är "Connect"-ad (uppkopplad). I så fall finns en knapp `Connect` uppe till höger i fönstret som du kan trycka på. När du är uppkopplad skall du i stället för knappen se hur mycket "RAM" och "Disk" du använder.
# - Det kan komma upp en varning som du måste godkänna för att komma vidare.
#
# <br>
#
# **Att köra kod**
#
# För att köra kod som finns i en enstaka kodcell, klickar du först på cellen. Då blir den "aktiv" och du ser en liten pil i vänstra övre hörnet. Tryck på pilen så körs koden.
# <br>
# <img src="https://github.com/lunduniversity/schoolprog-satellite/blob/master/exercises/kolcykeln/images/run.PNG?raw=1" width="30" align="center">
#
# När du klickar utanför kodcellen visas ett tal inom hakparenteser i stället för pilen, t.ex. `[9]`. Siffrorna visar i vilken ordning olika kodceller har körts.
#
#
# <br>
# <br>
# + id="WsdJoTc_gphU" cellView="form"
#@title Bakgrundskod för att importera filer från GitHub:
#Importera moduler:
import os
#### Importera datafiler till colab-servern ####
#Kontrollera om datafilen med mätningsdata är redan nedladdad:
if(os.path.isfile('htm_150m_L2_co2')==False):
#Ladda ner fil med mätningsdata från GitHub:
# !wget https://github.com/lunduniversity/schoolprog-satellite-data/raw/master/carboncycle/co2_concentration/htm_150m_L2_co2 --quiet
#Kontrollera om datafilen med python-funktioner är redan nedladdad:
if(os.path.isfile('quiz_funcs.py')==False):
#Ladda ner datafilen med python-funktioner:
# !wget https://raw.githubusercontent.com/lunduniversity/schoolprog-satellite/master/exercises/kolcykeln_enkel/quiz_funcs.py --quiet
# + [markdown] id="oAA6gKOKix7h"
# <a id='co2_definition'></a>
# <br>
# <br>
#
# ## 1. Vad är Koldioxid (CO$_2$)
#
# Koldioxid är en gas som är lukt- och färglös vid normala temperaturer. Den bildas i förbränningsprocesser när kolföreningar reagerar med syre.
#
# Vid förbrännning av biomassa så förbränns kolföreningar som nyligen har bildats av växterna och koldioxid släpps ut. Så länge biomassan tillåts växa upp igen och absorbera samma mängd koldioxid, så ökar inte halten av koldioxid i atmosfären. När man däremot förbränner fossila bränslen (kol, olja, naturgas), så släpps koldioxid ut till atmosfären som varit utanför kretsloppet väldigt länge. Detta leder till att koldioxidhalten i atmosfären ökar.
#
# <br>
#
# ### Växthuseffekten
# Växthusgaser som t.ex. vattenånga, koldioxid, metan, lustgas eller ozon förekommer naturligt i atmosfären. De släpper igenom solens strålar men minskar värmeutsläppet från jorden. På det viset fångas värme in i atmosfären. Utan växthusgaser skulle temperaturen på jorden vara för låg för att levande varelser skulle kunna existera. Utan växthuseffekten hade vi alltså inte haft liv på jorden.
#
# <br>
#
# ### Global uppvärmning
# Problem uppstår när totala mängden växthusgaser i atmosfären börjar öka. Det leder till att mer värme fångas in i atmosfären, som i sin tur leder till högre temperaturer på jorden. Konsekvenserna blir smältande glaciärer, stigande havsnivåer, oftare förekomst av extrem nederbörd eller torka och förändrade förutsättningar för jordbruket. Ökade halter av koldioxid i luften är även länkade till havsförsurning. Havsförsurning är en process där havets pH-värde långsamt sjunker på grund av att allt större mängder koldioxid i atmosfären tas upp av haven.
#
#
# + [markdown] id="NiE7HWLWkHvD"
# <br>
# <br>
# <a id='carbon_cycle_definition'></a>
#
# ## 2. Kolcykeln
# Koldioxid (CO$_2$), kolmonoxid (CO) och metan (CH$_4$) ingår i kolets livscykel. Bilden nedan visar hur kol transporteras från olika källor till sänkor. De blå pilarna representerar upptaget av kol medan de rosa pilarna representerar utsläppet av kol.
#
#
# Kol i form av koldioxid släpps ut i atmosfären från människors, djurs och växters **respiration** (utandning). Förbränning av fossila bränslen bidrar till en stor del av kolutsläppet i atmosfären. Kol i form av koldioxid och kolmonoxid släpps ut vid skogsbränder. Havets djur och växter släpper också ut koldioxid med sin respiration. Betesdjur och framför allt kor rapar och släpper ut metan. Metan släpps även ut från bakterier som finns i människors och djurs avföring (bajs). Koldioxid och metan släpps ut i luften när nedbrytare (t.ex. svampar och daggmaskar) bryter ner organiskt material (döda djur eller växter) till jord. Alla processer som leder till att kol släpps ut i atmosfären kallas för **källor**.
#
# Kol tas upp från atmosfären när växter **fotosyntetiserar**. Fotosyntes är en process under vilken växter tar in koldioxid, vatten och solenergi, och omvandlar dem till syre och socker. Denna process utförs både av växter på land och i haven. Processen utförs endast när det finns tillräckligt med solljus. Under natten sker ingen fotosyntes, utan växterna andas in syre och andas ut koldioxid. Socker är en kolförening och kan användas av växterna för att växa i storlek eller skapa nya grenar, löv och rötter. På så sätt binds kol i växterna.
#
# Dött material från växter eller djur kallas *organiskt material*, och består av olika kolföreningar. Detta kol kommer ner i jorden på olika sätt. När växter tappar löv eller grenar på marken, så tappar de organiskt material. Urin och avföring från människor och djur är också organiskt material. I marina ekosystem omvandlas organiskt material till sediment. Kol som lagrats i jorden kan omvandlas till naturgas och olja. Detta är dock en mycket långsam process som kan ta 50-500 miljoner år. Alla processer som tar upp kol kallas för **sänkor**.
#
# <br>
# <br>
# <img src="https://github.com/lunduniversity/schoolprog-satellite/blob/master/exercises/kolcykeln/images/carbon_cycle_swe5.png?raw=1" width="900" align="center">
# <br>
# <br>
# <br>
#
# + [markdown] id="0T_cSIxmb9HX"
# ## 3. Ett år i Koldioxidens liv på Jorden (NASA)
# Klicka på videon nedan för att spela..
# + id="PO8EqWpbGl6Z" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 597} outputId="76867dd1-ab8c-413b-be44-86aa89a39a9c"
#@title Backgrundskod för youtube
#Importera modul för att spela en youtube video:
from IPython.display import YouTubeVideo
#Visa NASA video - "A Year in the Life of Earth's CO2":
display(YouTubeVideo('x1SgmFa0r04', width=970, height=576))
# + [markdown] id="43gtcEF3Bbin"
# <br>
# <br>
#
# ## 4. Quiz - Hur bra koll har du på kol
#
# Klicka på länken nedan för att gå vidare till quizet: <br>
# [Quiz: Hur bra koll har du på kol?](https://lunduniversity.github.io/schoolprog-satellite/exercises/quiz/co2_quiz/kollpakol.html)
# + [markdown] id="lZBKjx0cmfVp"
# <br>
# <br>
#
# ## 5. Övningar med CO$_2$-data från mätstationen Hyltemossa
# Här kommer du att få lära dig mer om koldioxidets kretslopp genom att använda ett interaktivt diagram som visar dagliga samt årliga förändringar i koldioxidkoncentrationen i luften. Innan du fortsätter med övningarna är det viktigt att du får lite mer information om mätstationer.
#
#
#
# Mycket forskning görs idag för att förstå hur kolcykeln ändras med tiden och hur t.ex. förändringar i vegetationen och oceanerna kan påverka koncentrationen av CO$_2$ och andra växthusgaser i atmosfären. [ICOS](https://www.icos-cp.eu/) (Integrated Carbon Observation System), är ett europeiskt samarbete som bedriver mätningar över lång tid för att kartlägga Europas kolbalans och de växthusgaser som finns i luften. Just nu har ICOS ca 150 mätstationer i 13 länder inom Europa. Mätstationer finns både på land och ombord på båtar. [ICOS Sweden](https://www.icos-sweden.se/) är nanmnet på ICOS forskningsinfrastruktur i Sverige och bidrar med 10 stationer på 7 olika platser.
#
# <br>
# <img src="https://github.com/lunduniversity/schoolprog-satellite/blob/master/exercises/kolcykeln_enkel/images/icos_map.png?raw=1" width="900" align="center">
# <br>
# <br>
#
# [Hyltemossa](https://www.icos-sweden.se/station_hyltemossa.html) är en mätstation som drivs av ICOS Sweden (se bilder nedan). Mätstationen ligger söder om Perstorp i nordvästra Skåne och är belägen vid en 30-år gammal granskog.
#
# <br>
# <img src="https://github.com/lunduniversity/schoolprog-satellite/blob/master/exercises/kolcykeln_enkel/images/htm_mashup.png?raw=1" width="1000" align="center">
# <br>
#
# Koldioxid-mätningar påverkas av vad som finns i mätstationens omgivning. Mätaren är placerad högt uppe på en mast för att minska denna påverkan. Vindens riktning kan också spela stor roll. T.ex. om det blåser i riktning från en trafikerad motorväg, kan CO$_2$-koncentrationen vara högre än annars.
#
# **Uppdrag:** Zooma in på kartan nedan för att se vad som finns omkring Hyltemossa forskningsstation. Kan du hitta Nedre Store sjö, Perstorps industrimark eller Ljungbyheds flygplats på kartan?
# + id="tzs8gcBmiiXs" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 676} outputId="da825ab8-848f-42e5-c8a3-057d56a50b56"
#@title Bakgrundskod för karta
#Import modules:
import folium
#Create map object:
m = folium.Map(location=[56.097991, 13.420181], zoom_start=7)
#Add marker:
folium.Marker(location=[56.097991, 13.420181],
popup='Hyltemossa station (ICOS Sweden)',
icon=folium.Icon(color='darkred', icon='cloud')).add_to(m)
#Show map
m
# + id="L_ekh9WOiiiP" colab={"base_uri": "https://localhost:8080/", "height": 517} cellView="form" outputId="fe8cb946-8604-4d48-df30-8d0dd146e222"
#@title Bakgrundskod för plott
#Importera moduler:
import pandas as pd
#Importera csv-fil med CO2-data till tabell:
co2_tabell = pd.read_csv('htm_150m_L2_co2',
header=0,
sep=';',
parse_dates=['DateTime'])
#Funktion som skapar en interaktiv plott från en pandas dataframe med co2-data i given färg:
def plott(df_L2, color):
#Importera moduler:
from datetime import datetime
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool, Label
from bokeh.io import show, output_notebook
#Nyckel-värdetabell för att omvandla nummer till deras motsvarande superscript eller subscript varianter:
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
SUP = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹")
source = ColumnDataSource(data=df_L2)
#Skapa ett figur-objekt:
p = figure(plot_width=900,
plot_height=500,
x_axis_label='Tid (UTC)',
y_axis_label='CO2'.translate(SUB)+' (' +'\u03BC'+ 'mol.mol-1'.translate(SUP) + ')',
x_axis_type='datetime',
title = 'Koldioxidkoncentration (Hyltemossa, Sverige, '+str(df_L2.SamplingHeight.iloc[0])+'m)' ,
tools='pan,box_zoom,wheel_zoom,reset')
#Skapa en cirkel-glyph:
r0 = p.circle(x='DateTime', y='co2', source=source, radius=.12, color=color)
#Skapa en linje-glyph:
r1 = p.line(x='DateTime', y='co2', source=source,
line_width=1, color=color)
#Lägg till tooltip:
p.add_tools(HoverTool(tooltips=[
('Tid (UTC)','@DateTime{%Y-%m-%d %H:%M:%S}'),
('CO2'.translate(SUB),'@co2{0.f}'),
],
formatters={
'@DateTime' : 'datetime',
},
# visa ett tooltip när musen är i lodrätt-linje med motsvarande glyph
mode='vline'
))
#Definiera formatteringsattribut för plottens titel:
p.title.align = 'center'
p.title.text_font_size = '13pt'
p.title.offset = 15
#Definiera font för x-axel och y-axel titlarna :
p.xaxis.axis_label_text_font_style = 'normal'
p.yaxis.axis_label_text_font_style = 'normal'
p.xaxis.axis_label_standoff = 15 #Sets the distance of the label from the x-axis in screen units
p.yaxis.axis_label_standoff = 15 #Sets the distance of the label from the y-axis in screen units
#Definiera vart copyright-texten ska stå:
label_opts = dict(x=0, y=10,
x_units='screen', y_units='screen')
#Skapa copyright-texten:
caption1 = Label(text="© ICOS ERIC", **label_opts)
caption1.text_font_size = '8pt'
#Inaktivera hover-verktyget, som är alltid aktivt annars:
p.toolbar.active_inspect = None
#Lägg till copyright-texten till plotten:
p.add_layout(caption1, 'below')
#Definiera vart resultatet ska visas:
output_notebook()
#Visa plott:
show(p)
#Skapa plott:
plott(co2_tabell, 'green')
# + [markdown] id="DsNlckN3SlHv"
# __Notering:__ <br>
# I plotten ovan anges koldioxidkoncentrationen i luften som __$\mu$ mol / mol__ som, i sin tur, är lika med __ppm__ (parts per million).
#
# Plotten är interaktiv. Till höger om plotten ser du olika verktyg du kan använda för att undersöka plotten, som *Pan*, *Box Zoom*, *Wheel Zoom*, *Reset* och *Hover*. (Håll musen över så ser du vilken som är vilken.)
#
# Prova att använda *Box Zoom* och *Hover* för att se hur de fungerar. Om du vill komma tillbaka till ursprungsläget klickar du på *Reset*.
#
# __Övning:__
#
# 1. Använd Box-Zoom och Hover för att zooma in och se vilket CO2-värdet är den 6 Mars 2018, kl. 06:00.
# 2. Använd Box-Zoom och Hover för att zooma in och se under vilka månader de högsta värdena observerats. Vilken årstid motsvarar det? Varför är värdena höga under den tiden?
# 3. Använd Box-Zoom och Hover för att se under vilka månader de lägsta värdena observeras? Vilken årstid motsvarar det? Vad tror du att det beror på?
# 4. Använd Box-Zoom och Hover för att zooma in på dagen den 18 Juli 2017. Observera att värdena är höga så länge det är mörkt och lägre så länge det är ljust. Vilken process tror du är ansvarig för detta, med tanke på att det var en vindstilla dag och att Hyltemossa mätstation är omgiven av granskog?
# <br>
# <br>
#
#
# + id="5lF-_JWdkbpX" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["772e6aee029543e98033e121e2b04ed2", "a54ad865917c4285ab990e2ffcb53bd7", "c316ff51edc249a2b4bec6b1c3e4da08", "d69301f38ea94ef29c4cc2af4c24e360", "e5e6a673289f422f9519f741ee5025c5", "14c8c015acea455b8f827f3e2f4c8ecd", "3689ebce485844b99b0e7e1a099ab7e4", "2a797f11abf447f09628d97390ffa02a", "3fdbc9a10732410ca3e4f7c21be0edda", "2c2a2532480b476699be5e142461a578", "<KEY>", "<KEY>", "<KEY>", "abada682b97342d4b2d391108faa8736", "f3cdc3a3706642f599248f88f1ccef06", "<KEY>", "<KEY>", "a36fbbfe5a1c4c9da5c7a8a613d3e92e", "<KEY>", "<KEY>", "7c825ead9ae04d91b8aadb161759fb23", "<KEY>", "<KEY>", "57c6c556be9640d396f88e6822236be3", "<KEY>", "066829ef8d644e6ca7ced027c8141d62", "<KEY>", "f2945a4c010a44219f3925df7feb116a", "46d8d549ea31496a8c3c09ea6747f0da", "95919301a1f2435295d10eddeea8b8d8", "25791571d05f41ceb5769f3e17a50946", "<KEY>", "<KEY>", "3d6cd8be8620452e912ac5a1c0469c5b", "<KEY>", "<KEY>", "<KEY>", "6f7965b4daa6449f88f377e59a24134b", "ff6a53e854434effadf5d1b735af7dca", "93fa7d3232d24bbc855a5d22efd99256", "21a7e1ee471b4583ab8e64e1f2245829", "1dd2185f89e7450c94c056e936b45218", "<KEY>", "<KEY>", "2e5fe52186284be9862b5375ecf8b19b", "<KEY>", "<KEY>", "1b2f6b7558ef45369370c8a8d42e8763", "5eae95437da846da8e284e6fac603bc3", "09da0cce20394fe18cba60f3b3b48b03", "<KEY>", "08faae11e1664d7a8af7ac794a199907", "<KEY>", "966605d114ce4a0293afd5a57a0393ed"]} outputId="1c81b99b-1abc-489e-8a37-aecc356766ac"
#@title Kontrollera dina svar:
from ipywidgets import VBox
from quiz_funcs import create_coding_quiz_question, create_coding_quiz_question_dropdown
#Display both answer-control boxes in the same column:
display(VBox([create_coding_quiz_question('Övning 1', 442.9),
create_coding_quiz_question_dropdown('Övning 2', ['höst-vinter', 'vinter-vår', 'vår-sommar', 'sommar-höst'], 'vinter-vår'),
create_coding_quiz_question_dropdown('Övning 3', ['höst-vinter', 'vinter-vår', 'vår-sommar', 'sommar-höst'], 'sommar-höst'),
create_coding_quiz_question_dropdown('Övning 3', ['fotosyntes', 'respiration', 'biltrafik', 'skogsbrand'], 'fotosyntes')]))
# + [markdown] id="oKZCiFoEkO_-"
# #Avslutning
# Vi hoppas du har fått lite inblick i hur kolcykeln fungerar och vad som kan påverka värdena i mätningarna.
#
# För forskare är programmering ett centralt verktyg för att kunna analysera data och förstå vad som händer i vår miljö.
#
# Om du vill lära dig mer om programmering och hur man kan använda den för att analysera miljödata, så kolla gärna upp alla våra uppgifter på https://lunduniversity.github.io/schoolprog-satellite/
# <br>
# <br>
# <br>
# <br>
#
# <img src="https://github.com/lunduniversity/schoolprog-satellite/blob/master/exercises/kolcykeln/images/sciencecenters_logo.png?raw=1" width="800"/>
# <br>
# <br>
# <br>
#
# ###### Figure credits
# <font size="0.7">CO$_2$-icon made by Freepik from www.flaticon.com<br>Carbon Cycle figure created by <NAME> (ICOS Carbon Portal), inspired by figure created by <NAME> (ScottishCentre for CarbonStorage) https://www.icos-cp.eu/ <br>Map of ICOS station nertwork 2020 created by <NAME> (ICOS Carbon Portal) https://www.icos-cp.eu/<br>Photos of ICOS Hyltemossa station, courtesy of <NAME> <EMAIL></font>
#
| exercises/kolcykeln_enkel/kolcykeln_htm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
X.shape
y.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)
# ### scikit-learn 中的StandardScaler
from sklearn.preprocessing import StandardScaler
standardScaler = StandardScaler()
standardScaler.fit(X_train)
standardScaler.mean_
standardScaler.scale_
X_train = standardScaler.transform(X_train)
X_test_standard = standardScaler.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_neighbors=3)
knn_clf.fit(X_train, y_train)
knn_clf.score(X_test_standard, y_test)
| ml/knn/Feature-Scaling-Sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# # Illustration of Dynamic Co-Tiling (DCoT)
#
# ## Introduction
#
# This notebook contains an example of using **dynamic co-tiling** or **DCoT** to perform balanced parallel element-wise multiplication. This is distinguished from ordinary **dynamic tiling**, which does a data-dependent, runtime splitting of a _single_ tensor, into non-uniform, coordinate-space tiles. In contrast, DCoT simultaneously does data-dependent, runtime splitting of _two_ tensors into non-uniform, coordinate-space tiles. In addtion, DCoT tiling preseves the invariant that coresponding tiles of the two tensors occupy the same coordinate ranges. This allows straightforward co-traversal, e.g., for intersections, of the two tensors. In the examples below, the DCoT splitting strives to keep the sum of the occupancies of each cooresponding pair of tiles constant, with the objective of achiving similar execution times in parallel execution units working on different pairs of tiles.
#
# To illustract DCoT tiling, we first include some libraries and provide some dropdown lists to select the display style and type of animation.
# +
# Run boilerplate code to set up environment
# %run ../prelude.py --style=tree --animation=movie
# -
# ## Creating two rank-1 tensors
#
# To start we will create two sparse input tensors ```a``` and ```b```, which we will use in the examples below. The following cells provide the parameterization, creation and display of these tensors as well as their intersection.
#
# Note how the intersection fiber has *coordinates* only for those *coordinates* that exist in both the ```a``` and ```b``` input tensors. Also the *payloads* of the intersection fiber are tuples whose elements are the *payloads* from the matching *coordinates* in both the ```a``` and ```b``` tensors. These tuple *payloads* are displayed as a vertical red rectangle with two numbers in them.
#
# +
#
# Set default shape, density and seed for the input tensors
#
shape0 = 50
density0 = 0.4
seed=10
def set_params(input_shape, input_density, input_seed):
global shape0
global density0
global seed
shape0 = input_shape
density0 = input_density/100
seed = input_seed
print("Run the next cell to create the input tensors after changing these sliders")
interactive(set_params,
input_shape=widgets.IntSlider(min=10, max=100, step=1, value=shape0),
input_density=widgets.IntSlider(min=1, max=100, step=1, value=(100*density0)),
input_seed=widgets.IntSlider(min=0, max=100, step=1, value=seed))
# +
#
# Show parameters
#
print(f"Shape: {shape0}")
print(f"Density: {density0}")
print(f"Seed: {seed}")
print("")
#
# Create tensor a
#
a = Tensor.fromRandom(rank_ids=['I'],
shape=[shape0],
density=[density0],
interval=9,
seed=seed)
a.setColor("blue")
a_i = a.getRoot()
print("Tensor a")
displayTensor(a)
#
# Create tensor b
#
b = Tensor.fromRandom(rank_ids=['I'],
shape=[shape0],
density=[density0],
interval=9,
seed=seed+1)
b.setColor("green")
b_i = b.getRoot()
print("Tensor b")
displayTensor(b)
#
# Create intersection of tensors a and b
#
ab = a_i & b_i
print("Intersection of a and b")
displayTensor(ab)
print(f"The are {len(ab)} elements in the intersection of a and b")
# -
# ## Simple Element-wise Multiplication
#
# The following cell illustrates untiled element-wise multiplication of the ```a``` and ```b``` tensors created above. This cooresponds to the Einsum expression:
#
# $$
# Z_i = A_i \times B_i
# $$
#
# Note how the computation skips along the two input tensors while sequentially generating the output tensor
#
# +
#
# Create output tensor z and get root
#
z = Tensor(rank_ids=["I"])
z_i = z.getRoot()
canvas = createCanvas(a, b, z)
#
# Traverse intersection of tensors a and b
#
for i, (z_ref, (a_val, b_val)) in z_i << (a_i & b_i):
#
# Compute output value
#
z_ref <<= a_val * b_val
#
# Animation bookeeping...
#
canvas.addFrame((i,), (i,), (i,))
#
# Print result
#
print("Tensor z - after")
displayTensor(z)
displayCanvas(canvas)
# -
# ## Uniform Coordinate Space Tiling
#
# In the following cells, we illustrate splitting both the input tensors **uniformly** in **coordinate space**.
#
# But first we set the target tile size measured in coordinates, which is used in multiple cells below
# +
#
# Set default parameter for uniform coordinate-space tiling
#
tile_size = 8
def set_tile_size(tile_size_input):
global tile_size
tile_size = tile_size_input
print("Run the cell below to create the split tensors after changing these sliders")
interactive(set_tile_size,
tile_size_input=widgets.IntSlider(min=1, max=shape0, step=1, value=tile_size))
# -
# ## Create uniform coordinate-space tiled tensors
#
# The cell below creates the split ```a``` and ```b``` tensors.
#
# Note how the the upper rank (I1) coordinates of both the split tensors increment uniformly by the tile size (measured in coordinates) and how the total number of children under each I1 coordinate varies greatly.
# +
#
# Split tensor "a" and get root
#
a_split = a.splitUniform(tile_size)
a_i1 = a_split.getRoot()
print("Tensor a_split")
displayTensor(a_split)
#
# Split tensor "b" and get root
#
b_split = b.splitUniform(tile_size)
b_i1 = b_split.getRoot()
print("Tensor b_split")
displayTensor(b_split)
# -
# ## Uniform coordinate-space tiles element-wise multiplication
#
# Below we show element-wise multiplication for each tile.
#
# In the animations, the currently active tiles in the ```a``` and ```b``` tensors are highlighted for a set of cycles, and the scalar values that are currently being read or written are also highlighted.
# +
#
# Create output tensor z and get root
#
z = Tensor(rank_ids=["I1", "I0"])
z_i1 = z.getRoot()
canvas = createCanvas(a_split, b_split, z)
#
# Traverse the upper rank of the intersecton of the tiled a and b tensors
#
for i1, (z_i0, (a_i0, b_i0)) in z_i1 << (a_i1 & b_i1):
#
# Traverse the intersection of each lower rank of the tiled a and b tensors
#
for i0, (z_ref, (a_val, b_val)) in z_i0 << (a_i0 & b_i0):
#
# Compute the output product
#
z_ref <<= a_val * b_val
#
# Animation bookeeping...
#
canvas.addActivity(
[(i1,)], [(i1,)], [(i1,)],
worker="tile")
canvas.addFrame(
[(i1, i0)], [(i1, i0)], [(i1,i0)])
#
# Display the results
#
print("Tensor z - after")
displayTensor(z)
displayCanvas(canvas)
# -
# ## Parallel computation of uniform coordinate space tiling
#
# A key issue with tiling is the load balance between the activies in different PEs, which are typically working on different tiles. In the following cells, there is an animation of a system with two PEs running in parallel on separate tiles from the uniform coordinate-space tiled ```a``` and ```b``` tensors.
#
# Since there are two PEs, when both PEs are active there will be two values highlighted in a cycle, but only one will be highlighted when there is load imbalance and only one PE is active.
#
# Note we assume that the activity on distinct tiles in different PEs are synchronized, so work on new tiles always starts in both PEs at the same cycle.
#
# Utility function to display the active tiles in two PEs
#
def addTile(canvas, tiles, max_skew):
for c in range(max_skew):
canvas.addActivity(
tiles, tiles, tiles,
worker=f"tile",
skew=c)
# +
#
# Create an empty z tensor and get its root fiber
#
z = Tensor(rank_ids=["I1", "I0"])
z_i1 = z.getRoot()
print("Tensor z - before")
displayTensor(z)
canvas = createCanvas(a_split, b_split, z)
#
# Initialization
#
pe=0
#
# Animation bookeeping...
#
tiles = []
max_skew = 0
skew = 0
#
# Traverse the elements in the upper rank of the DCoT object
#
#
# Traverse the upper rank of the intersection of the tiled a and b tensors
#
for i1, (z_i0, (a_i0, b_i0)) in z_i1 << (a_i1 & b_i1):
#
# Animation bookeeping...
#
tiles.append((i1,))
#
# Traverse elements in the intersection of the lower rank fibers
#
for i0, (z_ref, (a_val, b_val)) in z_i0 << (a_i0 & b_i0):
#
# Compute the product
#
z_ref <<= a_val * b_val
#
# Animation bookeeping...
#
print(f"Skew = {skew}, i1 = {i1} and i0 = {i0}")
canvas.addActivity(
[(i1, i0)], [(i1, i0)], [(i1,i0)],
worker=f"PE{pe}",
skew=skew)
skew += 1
#
# Determine next PE
#
pe = (pe+1)%2
#
# Animation bookeeping...
#
max_skew = max(skew, max_skew)
skew = 0
#
# Animation bookeeping...
#
if pe == 0:
addTile(canvas, tiles, max_skew)
for c in range(max_skew):
canvas.addFrame()
tiles = []
max_skew = 0
#
# Animation finalization
#
if len(tiles) > 0:
addTile(canvas, tiles, max_skew)
canvas.addFrame()
#
# Show results
#
print("Tensor z - after")
displayTensor(z)
displayCanvas(canvas)
# -
# ## DCoT Splitting Class
#
# The following cell defines a class to create a **dyamaic co-tiled (DCoT)** split of two tensors.
#
# Note, the code assumes the two tensors have only a single rank that share a **rank id**. Thus, for the case handled by this code the tiles created by the split must contain the same coordinates when the same coordinate exists in each orginal tensor. Other cases where the input tensors have more ranks or where all the **rank ids** are not the same in both tensors is beyond the scope of this notebook.
# +
class DCoT():
"""
DCoT
"""
def __init__(self, a, b, size=2):
"""
__init__
Accept to fibers to be co-tiled and a target size
of the occupancy of each cooresponding pair of tiles
"""
self.a_i = a.getRoot()
self.b_i = b.getRoot()
self.size = size
# TBD: Get rank names for a, b
a_dcot = Tensor(rank_ids=["I1", "I0"]).setMutable(False)
a_dcot.setColor(a.getColor())
self.a_dcot = a_dcot
b_dcot = Tensor(rank_ids=["I1", "I0"]).setMutable(False)
b_dcot.setColor(b.getColor())
self.b_dcot = b_dcot
def getA(self):
"""
getA
Return the split tensor created from the "a" input fiber
"""
return self.a_dcot
def getB(self):
"""
getB
Return the split tensor created from the "b" input fiber
"""
return self.b_dcot
def __iter__(self):
"""
__iter__
An iterator that dynamically co-tiles the "a" and "b" input fibers
TBD: This iterator can only be called once...
"""
#
# Intialization
#
# 1) Create the first coordinate in the top ranks of the output tensors
# 2) Create an empty fiber as the payload of those coordinates
#
a_i = self.a_i
b_i = self.b_i
i1_coord = 0
a_dcot = self.a_dcot
a_dcot_i1 = a_dcot.getRoot()
a_dcot_i0 = Fiber()
a_dcot_i1.append(i1_coord, a_dcot_i0)
b_dcot = self.b_dcot
b_dcot_i1 = b_dcot.getRoot()
b_dcot_i0 = Fiber()
b_dcot_i1.append(i1_coord, b_dcot_i0)
cur_size = 0
#
# Co-iterate through the union of the input fibers
#
for i, (ab, a_val, b_val) in a_i | b_i:
# TBD: Generalize so cur_size is subtensor size...
#
# Check if current pair of tiles are "full"
#
if cur_size == self.size or ("AB" in ab and cur_size == self.size-1):
#
# Return element of co-tiled tensor
#
yield i1_coord, (a_dcot_i0, b_dcot_i0)
#
# Create next coordinate in top rank of each output tensor
#
i1_coord = max(a_dcot_i0[-1].coord, b_dcot_i0[-1].coord)+1
cur_size = 0
a_dcot_i0 = Fiber()
a_dcot_i1.append(i1_coord, a_dcot_i0)
b_dcot_i0 = Fiber()
b_dcot_i1.append(i1_coord, b_dcot_i0)
#
# If the was a non-empty element in the "a" tensor add it to the output
#
if "A" in ab:
a_dcot_i0.append(i, a_val)
cur_size += 1
#
# If the was a non-empty element in the "b" tensor add it to the output
#
if "B" in ab:
b_dcot_i0.append(i, b_val)
cur_size += 1
#
# Return the final element of the split tensors
#
yield i1_coord, (a_dcot_i0, b_dcot_i0)
def getDefault(self):
#
# Since this is a rank-1 tensor, just return 0 as the default value
#
return 0
# -
# ## Set the DCoT tile size
#
# Set the target total tile size for the DCoT splitting, which is used in multiple cells below.
# +
#
# Set default parameter for DCoT combined tile size
#
dcot_size = 6
def set_dcot_size(dcot_size_input):
global dcot_size
dcot_size = dcot_size_input
print("Run the cell below to create the split tensors after changing these sliders")
interactive(set_dcot_size,
dcot_size_input=widgets.IntSlider(min=1, max=shape0, step=1, value=dcot_size))
# -
# ## DCoT Splitting of two tensors
#
# The cell below illustrates the result of DCoT splitting of the ```a``` and ```b``` tensors.
#
# Note how the sum of the occupancies of the payloads of matching coordinates in the two split tensors are nearly constant.
# +
#
# Get the a_i fiber
#
a_i = a.getRoot()
print("Tensor a")
displayTensor(a)
#
# Get the a_i fiber
#
b_i = b.getRoot()
print("Tensor b")
displayTensor(b)
#
# Create a DCoT object and fully populate the split tensors
#
# TBD: Allow manifestation of DCoT output as a fiber (like &)
#
ab_dcot = DCoT(a, b, size=dcot_size)
cplist = [ e for e in ab_dcot]
ab_fiber = Fiber.fromCoordPayloadList(*cplist)
print("\nResult of DCoT as a fiber - text print because payloads are a tuple of fibers\n")
print(f"{ab_fiber:n*}")
print("\n")
print("Tensor a - split")
displayTensor(ab_dcot.getA())
print("Tensor b - split")
displayTensor(ab_dcot.getB())
# -
# ## DCoT tiled element-wise multiplication
#
# Iterate over the split DCoT to do element-wise multiplication
#
# Note that the tile being worked on is highlighted.
# +
#
# Get the a_i fiber
#
a_i = a.getRoot()
print("Tensor a")
displayTensor(a)
#
# Get the a_i fiber
#
b_i = b.getRoot()
print("Tensor b")
displayTensor(b)
#
# Create an empty z tensor and get its root fiber
#
z = Tensor(rank_ids=["I1", "I0"])
z_i1 = z.getRoot()
print("Tensor z - before")
displayTensor(z)
#
# Create a DCoT object to traverse
#
ab_dcot = DCoT(a, b, size=dcot_size)
a_dcot = ab_dcot.getA()
b_dcot = ab_dcot.getB()
canvas = createCanvas(a_dcot, b_dcot, z)
#
# Traverse the elements in the upper rank of the DCoT object
#
for i1, (z_i0, (a_i0, b_i0)) in z_i1 << ab_dcot:
#
# Traverse elements in the intersection of the lower rank fibers
#
for i0, (z_ref, (a_val, b_val)) in z_i0 << (a_i0 & b_i0):
#
# Compute the product
#
z_ref <<= a_val * b_val
#
# Animation bookeeping...
#
canvas.addActivity(
[(i1,)], [(i1,)], [(i1,)],
worker="tile")
canvas.addFrame(
[(i1, i0)], [(i1, i0)], [(i1,i0)])
#
# Show results
#
print("Tensor a_split")
displayTensor(a_dcot)
print("Tensor b_split")
displayTensor(b_dcot)
print("Tensor z - after")
displayTensor(z)
displayCanvas(canvas)
# -
# ## Parallel DCoT
#
# The code below runs two parallel PEs, each processing its own tile
#
# Utility function to display the active tiles in two PEs
#
def addTile(canvas, tiles, max_skew):
for c in range(max_skew):
canvas.addActivity(
tiles, tiles, tiles,
worker=f"tile",
skew=c)
# +
#
# Get the a_i fiber
#
a_i = a.getRoot()
print("Tensor a")
displayTensor(a)
#
# Get the a_i fiber
#
b_i = b.getRoot()
print("Tensor b")
displayTensor(b)
#
# Create an empty z tensor and get its root fiber
#
z = Tensor(rank_ids=["I1", "I0"])
z_i1 = z.getRoot()
print("Tensor z - before")
displayTensor(z)
#
# Create a DCoT object to traverse
#
ab_dcot = DCoT(a, b, size=dcot_size)
a_dcot = ab_dcot.getA()
b_dcot = ab_dcot.getB()
canvas = createCanvas(a_dcot, b_dcot, z)
#
# Initialization
#
pe=0
#
# Animation bookeeping...
#
tiles = []
max_skew = 0
skew = 0
#
# Traverse the elements in the upper rank of the DCoT object
#
for i1, (z_i0, (a_i0, b_i0)) in z_i1 << ab_dcot:
#
# Animation bookeeping...
#
tiles.append((i1,))
#
# Traverse elements in the intersection of the lower rank fibers
#
for i0, (z_ref, (a_val, b_val)) in z_i0 << (a_i0 & b_i0):
#
# Compute the product
#
z_ref <<= a_val * b_val
#
# Animation bookeeping...
#
print(f"Skew = {skew}, i1 = {i1} and i0 = {i0}")
canvas.addActivity(
[(i1, i0)], [(i1, i0)], [(i1,i0)],
worker=f"PE{pe}",
skew=skew)
skew += 1
#
# Determine next PE
#
pe = (pe+1)%2
#
# Animation bookeeping...
#
max_skew = max(skew, max_skew)
skew = 0
#
# Animation bookeeping...
#
if pe == 0:
addTile(canvas, tiles, max_skew)
for c in range(max_skew):
canvas.addFrame()
tiles = []
max_skew = 0
#
# Animation finalization
#
if len(tiles) > 0:
addTile(canvas, tiles, max_skew)
canvas.addFrame()
#
# Show results
#
print("Tensor a_split")
displayTensor(a_dcot)
print("Tensor b_split")
displayTensor(b_dcot)
print("Tensor z - after")
displayTensor(z)
displayCanvas(canvas)
# -
# ## Testing area
#
# For running alternative algorithms
| notebooks/tiling/dynamic-co-tiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('..')
import numpy as np
from dezero import *
import math
max_epoch = 5
batch_size = 100
hidden_size = 1000
train_set = datasets.MNIST(train=True)
test_set = datasets.MNIST(train=False)
train_loader = DataLoader(train_set, batch_size)
test_loader = DataLoader(test_set, batch_size, shuffle=False)
model = MLP((hidden_size, 10))
optimizer = optimizers.SGD().setup(model)
for epoch in range(max_epoch):
sum_loss, sum_acc = 0, 0
for x, t in train_loader:
y = model(x)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
model.cleargrads()
loss.backward()
optimizer.update()
sum_loss += float(loss.data) * len(t)
sum_acc += float(acc.data) * len(t)
print('epoch %d, loss %.2f, accuracy %.4f' %
(epoch + 1, sum_loss / len(train_set), sum_acc / len(train_set)))
sum_loss, sum_acc = 0, 0
with no_grad():
for x, t in test_loader:
y = model(x)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
sum_loss += float(loss.data) * len(t)
sum_acc += float(acc.data) * len(t)
print('test loss %.2f, accuracy %.4f' %
(sum_loss / len(train_set), sum_acc / len(train_set)))
model = MLP((hidden_size, hidden_size, 10), activation=F.relu)
optimizer = optimizers.SGD().setup(model)
for epoch in range(max_epoch):
sum_loss, sum_acc = 0, 0
for x, t in train_loader:
y = model(x)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
model.cleargrads()
loss.backward()
optimizer.update()
sum_loss += float(loss.data) * len(t)
sum_acc += float(acc.data) * len(t)
print('epoch %d, loss %.2f, accuracy %.4f' %
(epoch + 1, sum_loss / len(train_set), sum_acc / len(train_set)))
sum_loss, sum_acc = 0, 0
with no_grad():
for x, t in test_loader:
y = model(x)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
sum_loss += float(loss.data) * len(t)
sum_acc += float(acc.data) * len(t)
print('test loss %.2f, accuracy %.4f' %
(sum_loss / len(train_set), sum_acc / len(train_set)))
| CHAPTER04/step51.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # search with ami
#
#
# ### define the parameters of the search
#
#
# +
# specific problem
PROJECT = "funders"
EPMC = "viral epidemics"
HITS = 100
DICTIONARIES = "drugs disease country"
# This will be user-specific. EDIT
# where is your workspace?
HOME = "/Users/pm286/"
WORK = HOME + "workspace/work/search/"
# and where is ami3 distrib (DEMO ONLY)?
AMI3 = HOME + "workspace/cmdev/ami3/"
# where is the dictionary directory?
AMI_DICT = HOME + ""
AMI3 = HOME + "workspace/cmdev/ami3/src/ipynb/work/search/"
# (DEMO ONLY) copy so we don't contaminate AMI3
# ! cp -R $AMI3 $WORK
#specific project
PROJECT_WORK = WORK + PROJECT + "/"
# ! cd $WORK
print ("working in: " + WORK + "\nfiles: \n")
# ! ls
# -
# ## search EPMC with getpapers
# This needs
# * a search query (EPMC) to send to europepmc.org
# * a project name (PROJECT) which will create a directory under your current directory
# * maximum hits (HITS)
# ! getpapers -q "$EPMC" -o $PROJECT -x -k $HITS
# ## `ami search` on results of `getpapers`
#
# This requires one or more dictionaries (space-separated) in DICTIONARIES
# It can transform/search 1-3 papers/sec and is quicker the second time around.
# ! ami -p $PROJECT search --dictionary $DICTIONARIES
# ls $PROJECT
# [dashboard](drugs_disease_country/full.dataTables.html)
# +
## cooccurrence plots
### First, list the plots
# -
# ! ls $PROJECT/__cooccurrence/
# ! ls $PROJECT/__cooccurrence/allPlots.svg
# this is a kludge until we work out how to display the file
# ! cp $PROJECT/__cooccurrence/allPlots.svg tempAllPlots.svg
# ### display the all-vs-all plot
# *WARNING uses a temp file - we need to find a generic way with `display()`
#
# ## all-by-all plots
# <img src="tempAllPlots.svg">
#
# licence
# 
#
#
| src/ipynb/work/search/.ipynb_checkpoints/funders-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Certificate Editor
#
# This notebook uses [cryptography](https://cryptography.io/) to take an existing X.509 certificate and make edits to it, while preserving the other characteristics of the certificate. In particular, this preserves the currently existing certificate private key.
#
# By default, this code takes a certificate and changes its expiry date to be thirty years in the future from today, but in principle it can make other changes.
#
# The script has the following limitations:
#
# - It only works for self-signed certificates. This isn't a fundamental limitation, just done for simplicity's sake.
# +
# Imports and other basic setup.
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
THIRTY_YEARS = datetime.timedelta(days=365*30)
# +
# User-controlled settings. These settings adjust what certificate is used and what gets edited about it.
CERT_PATH = "client.pem"
KEY_PATH = "client.key"
KEY_PASSWORD = <PASSWORD>
OUTPUT_PATH = "newcert.pem"
NEW_PARAMETERS = {
"not_valid_after": datetime.datetime.today() + THIRTY_YEARS
}
NEW_EXTENSIONS = {}
# +
# The actual logic of the code. First, define all the certificate attributes we'll try to set.
# We deliberately exclude extensions here because they require more work.
# The attributes are a dictionary of "certificate builder name" to "certificate name", to work
# around the fact that cryptography has different names for most of these things,
attrs = {
'subject_name': 'subject',
'issuer_name': 'issuer',
'not_valid_before': 'not_valid_before',
'not_valid_after': 'not_valid_after',
'serial_number': 'serial',
}
def build_certificate(current_cert, current_key):
builder = x509.CertificateBuilder()
# Apply the attributes. We have to do a weird getattr() dance here becuase the various
# builder attributes are actually functions.
for attr in attrs:
if attr not in NEW_PARAMETERS:
try:
old_attr = getattr(current_cert, attrs[attr])
except AttributeError:
continue
else:
builder = getattr(builder, attr)(old_attr)
else:
builder = getattr(builder, attr)(NEW_PARAMETERS[attr])
# Then the extensions. First copy across the ones that are there, editing if needed.
for extension in current_cert.extensions:
if extension.value.__class__ not in NEW_EXTENSIONS:
builder = builder.add_extension(extension.value, critical=extension.critical)
else:
builder = builder.add_extension(NEW_EXTENSIONS[extension])
del NEW_EXTENSIONS[extension]
# Then set any extra new extensions.
for extension, value in NEW_EXTENSIONS.items():
builder = builder.add_extension(value)
# Finally, set the key and sign it.
builder = builder.public_key(current_key.public_key())
signature_algorithm = current_cert.signature_hash_algorithm
return builder.sign(
private_key=current_key,
algorithm=signature_algorithm,
backend=default_backend(),
)
# +
# Finally, a code block that actually executes the transformation.
with open(CERT_PATH, 'rb') as f:
current_cert = x509.load_pem_x509_certificate(f.read(), default_backend())
with open(KEY_PATH, 'rb') as f:
current_key = serialization.load_pem_private_key(f.read(), KEY_PASSWORD, default_backend())
new_cert = build_certificate(current_cert, current_key)
with open(OUTPUT_PATH, 'wb') as f:
f.write(new_cert.public_bytes(serialization.Encoding.PEM))
| certificate_editor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/huggingface/transformers/blob/add_benchmark_notebook/05_benchmark.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="jG-SjOQTskcX" colab_type="text"
# ## **How to benchmark models with Transformers**
#
# With ever-larger language models, it is no longer enough to just
# compare models on their performance on a specific task. One should always be aware of the computational cost that is attached to a specific model. For a given computation environment (*e.g.* type of GPU), the computational cost of training a model or deploying it in inference usually depends only on **the required memory** and **the required time**.
#
# Being able to accurately benchmark language models on both *speed* and *required memory* is therefore very important.
#
# HuggingFace's Transformer library allows users to benchmark models for both Tensorflow 2 and PyTorch using the `PyTorchBenchmark` and `TensorflowBenchmark` classes.
#
# The currently available features for `PyTorchBenchmark` are summarized in the following table.
#
#
# | | CPU | CPU + torchscript | GPU | GPU + torchscript | GPU + FP16 | TPU |
# :-- | :--- | :--- | :--- | :--- | :--- | :--- |
# **Speed - Inference** | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
# **Memory - Inference** | ✔ | ✔ | ✔ | ✔ | ✔ | ✘ |
# **Speed - Train** | ✔ | ✘ | ✔ | ✘ | ✔ | ✔ |
# **Memory - Train** | ✔ | ✘ | ✔ | ✘ | ✔ | ✘ |
#
#
# * *FP16* stands for mixed-precision meaning that computations within the model are done using a mixture of 16-bit and 32-bit floating-point operations, see [here](https://pytorch.org/docs/stable/nn.html#torch.nn.Module.half) for more detail.
#
# * *torchscript* corresponds to PyTorch's torchscript format, see [here](https://pytorch.org/docs/stable/jit.html).
#
# The currently available features for `TensorflowBenchmark` are summarized in the following table.
#
# | | CPU | CPU + eager execution | GPU | GPU + eager execution | GPU + XLA | GPU + FP16 | TPU |
# :-- | :--- | :--- | :--- | :--- | :--- | :--- | :--- |
# **Speed - Inference** | ✔ | ✔ | ✔ | ✔ | ✔ | ✘ | ✔ |
# **Memory - Inference** | ✔ | ✔ | ✔ | ✔ | ✔ | ✘ | ✘ |
# **Speed - Train** | ✘ | ✘ | ✘ | ✘ | ✘ | ✘ | ✘ |
# **Memory - Train** | ✘ | ✘ | ✘ | ✘ | ✘ | ✘ | ✘ |
#
# * *eager execution* means that the function is run in the eager execution environment of Tensorflow 2, see [here](https://www.tensorflow.org/guide/eager).
#
# * *XLA* stands for Tensorflow's Accelerated Linear Algebra (XLA) compiler, see [here](https://www.tensorflow.org/xla)
#
# * *FP16* stands for Tensorflow's mixed-precision package and is analogous to PyTorch's FP16 feature, see [here](https://www.tensorflow.org/guide/mixed_precision).
#
# ***Note***: In ~1,2 weeks it will also be possible to benchmark training in Tensorflow.
#
#
# This notebook will show the user how to use `PyTorchBenchmark` and `TensorflowBenchmark` for two different scenarios:
#
# 1. **Inference - Pre-trained Model Comparison** - *A user wants to implement a pre-trained model in production for inference. She wants to compare different models on speed and required memory.*
#
# 2. **Training - Configuration Comparison** - *A user wants to train a specific model and searches that for himself most effective model configuration.*
#
# + [markdown] id="j-jvAvZ1-GIh" colab_type="text"
# ### **Inference - Pre-trained Model Comparison**
#
# Let's say we want to employ a question-answering model in production. The questions are expected to be of the same format as in **SQuAD v2**, so that the model to choose should have been fine-tuned on this dataset.
#
# HuggingFace's new dataset [webpage](https://huggingface.co/datasets) lets the user see all relevant information about a dataset and even links the models that have been fine-tuned on this specific dataset. Let's check out the dataset webpage of SQuAD v2 [here](https://huggingface.co/datasets/squad_v2).
#
# Nice, we can see that there are 7 available models.
#
# 
#
# Let's assume that we have decided to restrict our pipeline to "encoder-only" models so that we are left with:
#
# - `a-ware/roberta-large-squad-classification`
# - `a-ware/xlmroberta-squadv2`
# - `aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2`
# - `deepset/roberta-base-squad2`
# - `mrm8488/longformer-base-4096-finetuned-squadv2`
#
# Great! In this notebook, we will now benchmark these models on both peak memory consumption and inference time to decide which model should be employed in production.
#
# ***Note***: None of the models has been tested on performance so that we will just assume that all models perform more or less equally well. The purpose of this notebook is not to find the best model for SQuAD v2, but to showcase how Transformers benchmarking tools can be leveraged.
#
# First, we assume to be limited by the available GPU on this google colab, which in this copy amounts to 16 GB of RAM.
# + [markdown] id="2l9C7d7K5-G4" colab_type="text"
# In a first step, we will check which models are the most memory-efficient ones.
# Let's make sure 100% of the GPU is available to us in this notebook.
# + id="M7cQmgM5TvlO" colab_type="code" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="2797c14e-a62d-42cc-97a6-6c61b015d569"
#@title Check available memory of GPU
# Check that we are using 100% of GPU
# memory footprint support libraries/code
# !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
# !pip -q install gputil
# !pip -q install psutil
# !pip -q install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
# + id="NuS2CKuQ4qSk" colab_type="code" colab={}
# If GPU RAM Util > 0% => crash notebook on purpose
# # !kill -9 -1
# + [markdown] id="ikdYDXsj6Nzv" colab_type="text"
# Looks good! Now we import `transformers` and download the scripts `run_benchmark.py`, `run_benchmark_tf.py`, and `plot_csv_file.py` which can be found under `transformers/examples/benchmarking`.
#
# `run_benchmark_tf.py` and `run_benchmark.py` are very simple scripts leveraging the `PyTorchBenchmark` and `TensorflowBenchmark` classes, respectively.
# + id="Dylftiyd1IG1" colab_type="code" cellView="both" colab={}
# install transformes
# !pip uninstall -y transformers
# !pip install -q git+https://github.com/huggingface/transformers.git
# install py3nvml to track GPU memory usage
# !pip install -q py3nvml
# !rm -f run_benchmark.py
# !rm -f run_benchmark_tf.py
# !rm -f plot_csv_file.py
# !wget https://raw.githubusercontent.com/huggingface/transformers/master/examples/benchmarking/run_benchmark.py -qq
# !wget https://raw.githubusercontent.com/huggingface/transformers/master/examples/benchmarking/run_benchmark_tf.py -qq
# !wget https://raw.githubusercontent.com/huggingface/transformers/master/examples/benchmarking/plot_csv_file.py -qq
# import pandas to pretty print csv files
import pandas as pd
# + [markdown] id="C4nz5nGFkOrK" colab_type="text"
# Information about the input arguments to the *run_benchmark* scripts can be accessed by running `!python run_benchmark.py --help` for PyTorch and `!python run_benchmark_tf.py --help` for Tensorflow.
# + id="zu7Oufe0jcAj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="bc52dea5-b721-410c-cf3b-8a7b983a558e"
# !python run_benchmark.py --help
# + [markdown] id="Q_3TZshjcrjP" colab_type="text"
# Great, we are ready to run our first memory benchmark. By default, both the *required memory* and *time* for inference is enabled. To disable benchmarking on *time*, we add `--no_speed`.
#
# The only required parameter is `--models` which expects a list of model identifiers as defined on the [model hub](https://huggingface.co/models). Here we add the five model identifiers listed above.
#
# Next, we define the `sequence_lengths` and `batch_sizes` for which the peak memory is calculated.
#
# Finally, because the results should be stored in a *CSV* file, the option `--save_to_csv` is added and the path to save the results is added via the `--inference_memory_csv_file` argument.
# Whenever a benchmark is run, the environment information, *e.g.* GPU type, library versions, ... can be saved using the `--env_info_csv_file` argument.
# + id="ykJqt7MEbHIq" colab_type="code" colab={}
# create plots folder in content
# !mkdir -p plots_pt
# + id="TSJgpQxBe-Fj" colab_type="code" colab={}
# run benchmark
# !python run_benchmark.py --no_speed --save_to_csv \
# --models a-ware/roberta-large-squad-classification \
# a-ware/xlmroberta-squadv2 \
# aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2 \
# deepset/roberta-base-squad2 \
# mrm8488/longformer-base-4096-finetuned-squadv2 \
# --sequence_lengths 32 128 512 1024 \
# --batch_sizes 32 \
# --inference_memory_csv_file plots_pt/required_memory.csv \
# --env_info_csv_file plots_pt/env.csv >/dev/null 2>&1 # redirect all prints
# + [markdown] id="ESHrlnKik396" colab_type="text"
# Under `plots_pt`, two files are now created: `required_memory.csv` and `env.csv`. Let's check out `required_memory.csv` first.
# + id="rPg_7fPnuDUa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 639} outputId="b6272763-7235-43c6-c457-0a4a13bb02e5"
df = pd.read_csv('plots_pt/required_memory.csv')
df
# + [markdown] id="o2LnaVpyW9TB" colab_type="text"
# Each row in the csv file lists one data point showing the *peak memory* usage for a given model, batch_size and sequence_length. As can be seen, some values have a *NaN* result meaning that an *Out-of-Memory* Error occurred. To better visualize the results, one can make use of the `plot_csv_file.py` script.
#
# Before, let's take a look at the information about our computation environment.
# + id="y6n49pbIXI6E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 639} outputId="495f011c-87c9-43a1-e1d4-a6501c327e76"
df = pd.read_csv('plots_pt/env.csv')
df
# + [markdown] id="z316Xf2oXTZz" colab_type="text"
# We can see all relevant information here: the PyTorch version, the Python version, the system, the type of GPU, and available RAM on the GPU, etc...
#
# **Note**: A different GPU is likely assigned to a copy of this notebook, so that all of the following results may be different. It is very important to always include the environment information when benchmarking your models for both reproducibility and transparency to other users.
#
# Alright, let's plot the results.
# + id="yHYUqRzWy8sp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="22499f33-bafc-42b3-f1b7-fcb202df9cd2"
# plot graph and save as image
# !python plot_csv_file.py --csv_file plots_pt/required_memory.csv --figure_png_file=plots_pt/required_memory_plot.png --no_log_scale --short_model_names a-ware-roberta a-aware-xlm aodiniz-bert deepset-roberta mrm8488-long
# show image
from IPython.display import Image
Image('plots_pt/required_memory_plot.png')
# + [markdown] id="RKZhRMmJmNH_" colab_type="text"
# At this point, it is important to understand how the peak memory is measured. The benchmarking tools measure the peak memory usage the same way the command `nvidia-smi` does - see [here](https://developer.nvidia.com/nvidia-system-management-interface) for more information.
# In short, all memory that is allocated for a given *model identifier*, *batch size* and *sequence length* is measured in a separate process. This way it can be ensured that there is no previously unreleased memory falsely included in the measurement. One should also note that the measured memory even includes the memory allocated by the CUDA driver to load PyTorch and Tensorflow and is, therefore, higher than library-specific memory measurement function, *e.g.* this one for [PyTorch](https://pytorch.org/docs/stable/cuda.html#torch.cuda.max_memory_allocated).
#
# Alright, let's analyze the results. It can be noted that the models `aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2` and `deepset/roberta-base-squad2` require significantly less memory than the other three models. Besides `mrm8488/longformer-base-4096-finetuned-squadv2` all models more or less follow the same memory consumption pattern with `aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2` seemingly being able to better scale to larger sequence lengths.
# `mrm8488/longformer-base-4096-finetuned-squadv2` is a *Longformer* model, which makes use of *LocalAttention* (check this blog post to learn more about local attention) so that the model scales much better to longer input sequences.
#
# For the sake of this notebook, we assume that the longest required input will be less than 512 tokens so that we settle on the models `aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2` and `deepset/roberta-base-squad2`.
#
# To better understand how many API requests of our *question-answering* pipeline can be run in parallel, we are interested in finding out how many batches the two models run out of memory.
# + id="9Nwmb57M4wIG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="4c074607-5200-4cca-bbd5-c39d32ce0451"
# !python run_benchmark.py --no_speed --save_to_csv \
# --inference_memory_csv_file plots_pt/required_memory_2.csv \
# --env_info_csv_file plots_pt/env.csv \
# --models aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2 \
# deepset/roberta-base-squad2 \
# --sequence_lengths 512 \
# --batch_sizes 64 128 256 512\
# --no_env_print
# + [markdown] id="P4JFKLZXqmss" colab_type="text"
# Let's plot the results again, this time changing the x-axis to `batch_size` however.
# + id="tNtvHpE67pgH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="092c4dac-5002-4603-8eba-cd4bca727744"
# plot graph and save as image
# !python plot_csv_file.py --csv_file plots_pt/required_memory_2.csv \
# --figure_png_file=plots_pt/required_memory_plot_2.png \
# --no_log_scale \
# --short_model_names aodiniz-bert deepset-roberta \
# --plot_along_batch
# show image
from IPython.display import Image
Image('plots_pt/required_memory_plot_2.png')
# + [markdown] id="bdoTRF7Yq8oV" colab_type="text"
# Interesting! `aodiniz/bert_uncased_L-10_H-51` clearly scales better for higher batch sizes and does not even run out of memory for 512 tokens.
#
# For comparison, let's run the same benchmarking on Tensorflow.
# + id="752y4onm-gpy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 726} outputId="a65c4bc1-f88e-46ae-cb80-27e29a0a1954"
# create plots folder in content
# !mkdir -p plots_tf
# !TF_CPP_MIN_LOG_LEVEL=3 python run_benchmark_tf.py --no_speed --save_to_csv \
# --inference_memory_csv_file plots_tf/required_memory_2.csv \
# --env_info_csv_file plots_tf/env.csv \
# --models aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2 \
# deepset/roberta-base-squad2 \
# --sequence_lengths 512 \
# --batch_sizes 64 128 256 512 \
# --no_env_print \
# + [markdown] id="3h5JqW2osAQ7" colab_type="text"
# Let's see the same plot for Tensorflow.
# + id="hkw-EOOvA52R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="3947ccf0-b91c-43bf-8569-d6afe0232185"
# plot graph and save as image
# !python plot_csv_file.py --csv_file plots_tf/required_memory_2.csv --figure_png_file=plots_tf/required_memory_plot_2.png --no_log_scale --short_model_names aodiniz-bert deepset-roberta --plot_along_batch
# show image
from IPython.display import Image
Image('plots_tf/required_memory_plot_2.png')
# + [markdown] id="ybqol62LsVrF" colab_type="text"
# The model implemented in Tensorflow requires more memory than the one implemented in PyTorch. Let's say for whatever reason we have decided to use Tensorflow instead of PyTorch.
#
# The next step is to measure the inference time of these two models. Instead of disabling time measurement with `--no_speed`, we will now disable memory measurement with `--no_memory`.
# + id="m8qfllt9uPZg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="b185f547-fbe6-4287-b8a0-6229d3eec377"
# !TF_CPP_MIN_LOG_LEVEL=3 python run_benchmark_tf.py --no_memory --save_to_csv \
# --inference_time_csv_file plots_tf/time_2.csv \
# --env_info_csv_file plots_tf/env.csv \
# --models aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2 \
# deepset/roberta-base-squad2 \
# --sequence_lengths 8 32 128 512 \
# --batch_sizes 256 \
# --no_env_print \
# + id="-bPClv873lrW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="152f14c7-288a-4471-9cc0-5108cb24804c"
# plot graph and save as image
# !python plot_csv_file.py --csv_file plots_tf/time_2.csv --figure_png_file=plots_tf/time_plot_2.png --no_log_scale --short_model_names aodiniz-bert deepset-roberta --is_time
# show image
from IPython.display import Image
Image('plots_tf/time_plot_2.png')
# + [markdown] id="f9sIjRWd4Me1" colab_type="text"
# Ok, this took some time... time measurements take much longer than memory measurements because the forward pass is called multiple times for stable results. Timing measurements leverage Python's [timeit module](https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat) and run 10 times the value given to the `--repeat` argument (defaults to 3), so in our case 30 times.
#
# Let's focus on the resulting plot. It becomes obvious that `aodiniz/bert_uncased_L-10_H-51` is around twice as fast as `deepset/roberta-base-squad2`. Given that the model is also more memory efficient and assuming that the model performs reasonably well, for the sake of this notebook we will settle on `aodiniz/bert_uncased_L-10_H-51`. Our model should be able to process input sequences of up to 512 tokens. Latency time of around 2 seconds might be too long though, so let's compare the time for different batch sizes and using Tensorflows XLA package for more speed.
# + id="aPeMsHJb3t2g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="56276801-6d56-444c-8ac8-75471136aa84"
# !TF_CPP_MIN_LOG_LEVEL=3 python run_benchmark_tf.py --no_memory --save_to_csv \
# --inference_time_csv_file plots_tf/time_xla_1.csv \
# --env_info_csv_file plots_tf/env.csv \
# --models aodiniz/bert_uncased_L-10_H-512_A-8_cord19-200616_squad2 \
# --sequence_lengths 512 \
# --batch_sizes 8 64 256 \
# --no_env_print \
# --use_xla
# + [markdown] id="_KrzL6y_6Z2T" colab_type="text"
# First of all, it can be noted that XLA reduces latency time by a factor of ca. 1.3 (which is more than observed for other models by Tensorflow [here](https://www.tensorflow.org/xla)). A batch size of 64 looks like a good choice. More or less half a second for the forward pass is good enough.
#
# Cool, now it should be straightforward to benchmark your favorite models. All the inference time measurements can also be done using the `run_benchmark.py` script for PyTorch.
# + [markdown] id="Drht35ylINuK" colab_type="text"
# ### **Training - Configuration Comparison**
#
# Next, we will look at how a model can be benchmarked on different configurations. This is especially helpful when one wants to decide how to most efficiently choose the model's configuration parameters for training.
# In the following different configurations of a *Bart MNLI* model will be compared to each other using `PyTorchBenchmark`.
#
# Training in `PyTorchBenchmark` is defined by running one forward pass to compute the loss: `loss = model(input_ids, labels=labels)[0]` and one backward pass to compute the gradients `loss.backward()`.
#
# Let's see how to most efficiently train a Bart MNLI model from scratch.
# + id="YTKW0Ml3Wpwq" colab_type="code" colab={}
# Imports
from transformers import BartConfig, PyTorchBenchmark, PyTorchBenchmarkArguments
# + [markdown] id="6Uw92tMRq6MV" colab_type="text"
# For the sake of the notebook, we assume that we are looking for a more efficient version of Facebook's `bart-large-mnli` model.
# Let's load its configuration and check out the important parameters.
# + id="nukyLU7iXBzN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 637, "referenced_widgets": ["975f42d7b55c4d0caf229cd4c16df5d2", "69b36685703342eaa80b6f0e01f94e04", "<KEY>", "a6c3647736554beea36db798827203b2", "<KEY>", "eed2ce14188a453ca296601ca39133b6", "<KEY>", "<KEY>"]} outputId="ae4ecae5-bd30-4eb4-e4b3-34447036e98d"
BartConfig.from_pretrained("facebook/bart-large-mnli").to_diff_dict()
# + [markdown] id="3t4ZOmg5sTrx" colab_type="text"
# Alright! The important configuration parameters are usually the number of layers `config.encoder_num_layers` and `config.decoder_num_layers`, the model's hidden size: `config.d_model`, the number of attention heads `config.encoder_attention_heads` and `config.decoder_attention_heads` and the vocabulary size `config.vocab_size`.
#
# Let's create 4 configurations different from the baseline and see how they compare in terms of peak memory consumption.
# + id="qA0d1RvGYAEE" colab_type="code" colab={}
config_baseline = BartConfig.from_pretrained("facebook/bart-large-mnli")
config_768_hidden = BartConfig.from_pretrained("facebook/bart-large-mnli", d_model=768)
config_8_heads = BartConfig.from_pretrained("facebook/bart-large-mnli", decoder_attention_heads=8, encoder_attention_heads=8)
config_10000_vocab = BartConfig.from_pretrained("facebook/bart-large-mnli", vocab_size=10000)
config_8_layers = BartConfig.from_pretrained("facebook/bart-large-mnli", encoder_layers=8, decoder_layers=8)
# + [markdown] id="RhefJji1rU07" colab_type="text"
# Cool, now we can benchmark these configs against the baseline config. This time, instead of using the benchmarking script we will directly use the `PyTorchBenchmark` class. The class expects the argument `args` which has to be of type `PyTorchBenchmarkArguments` and optionally a list of configs.
#
# First, we define the `args` and give the different configurations appropriate model names. The model names must be in the same order as the configs that are directly passed to `PyTorchBenchMark`.
#
# If no `configs` are provided to `PyTorchBenchmark`, it is assumed that the model names `["bart-base", "bart-768-hid", "bart-8-head", "bart-10000-voc", "bart-8-lay"]` correspond to official model identifiers and their corresponding configs are loaded as was shown in the previous section.
#
# It is assumed that the model will be trained on half-precision, so we add the option `fp16=True` for the following benchmarks.
# + id="Lv_WvM2jr79r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 554} outputId="939dc355-036f-45ad-c996-e6cb136c7a59"
# define args
args = PyTorchBenchmarkArguments(models=["bart-base", "bart-768-hid", "bart-8-head", "bart-10000-voc", "bart-8-lay"],
no_speed=True,
no_inference=True,
training=True,
train_memory_csv_file="plots_pt/training_mem_fp16.csv",
save_to_csv=True,
env_info_csv_file="plots_pt/env.csv",
sequence_lengths=[64, 128, 256, 512],
batch_sizes=[8],
no_env_print=True,
fp16=True) # let's train on fp16
# create benchmark
benchmark = PyTorchBenchmark(configs=[config_baseline, config_768_hidden, config_8_heads, config_10000_vocab, config_8_layers], args=args)
# run benchmark
result = benchmark.run()
# + [markdown] id="DJWs_tDjxzuO" colab_type="text"
# Nice, let's plot the results again.
# + id="0r-r-R1lxEr0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="5dbeb7f7-c996-4db2-a560-735354a5b76f"
# plot graph and save as image
# !python plot_csv_file.py --csv_file plots_pt/training_mem_fp16.csv --figure_png_file=plots_pt/training_mem_fp16.png --no_log_scale
# show image
from IPython.display import Image
Image('plots_pt/training_mem_fp16.png')
# + [markdown] id="5xTuRPBCx-dw" colab_type="text"
# As expected the model of the baseline config requires the most memory.
#
# It is interesting to see that the "bart-8-head" model initially requires more memory than `bart-10000-voc`, but then clearly outperforms `bart-10000-voc` at an input length of 512.
# Less surprising is that the "bart-8-lay" is by far the most memory-efficient model when reminding oneself that during the forward pass every layer has to store its activations for the backward pass.
#
# Alright, given the data above, let's say we narrow our candidates down to only the "bart-8-head" and "bart-8-lay" models.
#
# Let's compare these models again on training time.
# + id="c9xSoCUZ0Hlz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="7054af8a-3050-4aca-f503-e229ed365cb0"
# define args
args = PyTorchBenchmarkArguments(models=["bart-8-head", "bart-8-lay"],
no_inference=True,
training=True,
no_memory=True,
train_time_csv_file="plots_pt/training_speed_fp16.csv",
save_to_csv=True,
env_info_csv_file="plots_pt/env.csv",
sequence_lengths=[32, 128, 512],
batch_sizes=[8],
no_env_print=True,
repeat=1, # to make speed measurement faster but less accurate
no_multi_process=True, # google colab has problems with multi processing
fp16=True
)
# create benchmark
benchmark = PyTorchBenchmark(configs=[config_8_heads, config_8_layers], args=args)
# run benchmark
result = benchmark.run()
# + [markdown] id="UseFqLiuRQuX" colab_type="text"
# The option `no_multi_process` disabled multi-processing here. This option should in general only be used for testing or debugging. Enabling multi-processing is crucial to ensure accurate memory consumption measurement, but is less important when only measuring speed. The main reason it is disabled here is that google colab sometimes raises "CUDA initialization" due to the notebook's environment.
# This problem does not arise when running benchmarks outside of a notebook.
#
# Alright, let's plot the last speed results as well.
# + id="8c6fjmWLU0Rx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="8a4b4db7-abed-47c4-da61-c3b1ccae66f1"
# plot graph and save as image
# !python plot_csv_file.py --csv_file plots_pt/training_speed_fp16.csv --figure_png_file=plots_pt/training_speed_fp16.png --no_log_scale --is_time
# show image
from IPython.display import Image
Image('plots_pt/training_speed_fp16.png')
# + [markdown] id="b6T7I4lnVCpk" colab_type="text"
# Unsurprisingly, "bart-8-lay" is faster than "bart-8-head" by a factor of ca. 1.3. It might very well be that reducing the layers by a factor of 2 leads to much more performance degradation than reducing the number of heads by a factor of 2.
# For more information on computational efficient Bart models, check out the new *distilbart* model [here](https://huggingface.co/models?search=distilbart)
# + [markdown] id="S4cG0NwfNugm" colab_type="text"
# Alright, that's it! Now you should be able to benchmark your favorite models on your favorite configurations.
#
# Transparency for the computational cost of a model is becoming more and more important. Feel free to share your results with the community on a shared spreadsheet or by tweeting us @huggingface 🤗.
| notebooks/05-benchmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''compasdev'': conda)'
# language: python
# name: python3
# ---
# ## import libraries
from compas_vol.primitives import VolBox, VolCylinder, VolPlane
from compas_vol.modifications import Overlay, Shell
from compas_vol.combinations import Intersection, Union, Subtraction
from compas_vol.microstructures import TPMS
from compas.geometry import Box, Frame, Point, Plane, Cylinder, Circle
import numpy as np
import meshplot as mp
from skimage.measure import marching_cubes
from compas_vol.utilities import bbox_edges
# ## create volumetric object (CSG tree)
# +
shaft = VolBox(Box(Frame.worldXY(), 250, 30, 10), 1.5)
cyl_plane = Plane((125,0,0),(0,0,1))
roundcap = VolCylinder(Cylinder(Circle(cyl_plane, 15), 10))
handle = Union(shaft, roundcap)
gyroid = TPMS(tpmstype='Gyroid', wavelength=5.0)
shell = Shell(gyroid, 2.0, 0.5)
ol_plane = VolPlane(Plane((0,0,0), (1,0,0)))
thicken_tpms = Overlay(shell, ol_plane, 0.005)
taper = Overlay(handle, ol_plane, -0.01)
porous_handle = Intersection(thicken_tpms, taper)
solid_outer = VolCylinder(Cylinder(Circle(cyl_plane, 12), 13))
void_inner = VolCylinder(Cylinder(Circle(cyl_plane, 10), 20))
hole_reinforce = Union(porous_handle, solid_outer)
wrench = Subtraction(hole_reinforce, void_inner)
# -
# ## workspace (dense grid)
#workspace initialization
# lower and upper bounds
lbx, ubx = -145.0, 145.0
lby, uby = -18.0, 18.0
lbz, ubz = -8.0, 8.0
# resolution(s)
nx, ny, nz = 580, 72, 32
x, y, z = np.ogrid[lbx:ubx:nx*1j, lby:uby:ny*1j, lbz:ubz:nz*1j]
#voxel dimensions
gx = (ubx-lbx)/nx
gy = (uby-lby)/ny
gz = (ubz-lbz)/nz
# ## sample at discrete interval
dm = wrench.get_distance_numpy(x, y, z)
# ## generate isosurface (marching cube)
v, f, n, l = marching_cubes(dm, 0, spacing=(gx, gy, gz))
v += [lbx,lby,lbz]
# ## display mesh
p = mp.plot(v, f, c=np.array([0,0.57,0.82]), shading={"flat":False, "roughness":0.4, "metalness":0.01, "reflectivity":1.0})
vs,ve = bbox_edges(lbx,ubx,lby,uby,lbz,ubz)
p.add_lines(np.array(vs), np.array(ve))
| examples/notebooks/wrench.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Piston expander
# This example explains how to use properly PDSim to simulate a piston expander. The same methodology can be readily applied to other positive displacement machines.
## COMMON IMPORTS ##
from __future__ import division, print_function
from math import pi, cos, sin
from timeit import default_timer
import os, sys
import matplotlib.pyplot as plt, numpy as np
# %matplotlib inline
#From PDSim we import the different elements that allows us to built a positive displacement simulation code.
from PDSim.flow.flow import FlowPath
from PDSim.flow import flow_models
from PDSim.misc.datatypes import empty_arraym
from PDSim.core.containers import ControlVolume, Tube
from PDSim.core.core import PDSimCore
# We need also to import CoolProp as property library to define suction and discharge states of the expander
# (or compressor).
from CoolProp import State
from CoolProp import CoolProp as CP
# +
# We create a class derived from PDSimCore that holds the piston expander model
class PistonExpander(PDSimCore):
#: Displacement of the cylinder above the dead volume [m^3]
Vdisp = 20e-6
#: Dead volume of the cylinder at TDC [m^3]
Vdead = 3e-6
#: Rotational speed [rad/s]
omega = 377
def __init__(self):
#Initialize the base class that PistonExpander is derived from
PDSimCore.__init__(self)
# We define the working chamber volume as function of the crank angle and its derivative.
# In this case we assume a simplified mathematical formulation
def V_dV(self, theta):
V = self.Vdead+self.Vdisp/2*(1-cos(theta))
dVdtheta = self.Vdisp/2*sin(theta)
return V, dVdtheta
# We define the suction and discharge port area profiles as well as the flow model through such ports.
def Suction(self, FlowPath):
if 0 <= self.theta <= pi/4:
FlowPath.A = pi*0.006**2/4*(1-cos(8*self.theta))/2
mdot = flow_models.IsentropicNozzle(FlowPath.A,
FlowPath.State_up,
FlowPath.State_down)
else:
FlowPath.A = 0.0
mdot = 0
return mdot
def Discharge(self, FlowPath):
if pi <= self.theta <= 7*pi/4:
FlowPath.A = pi*0.006**2/4*(1-cos(4*self.theta))/2
mdot = flow_models.IsentropicNozzle(FlowPath.A,
FlowPath.State_up,
FlowPath.State_down)
else:
FlowPath.A = 0.0
mdot = 0
return mdot
# We define a tube flow model to connect the inlet of the expander shell with the actual suction port.
# Similar model is applied on the discharge side.
def TubeCode(self, Tube):
Tube.Q = flow_models.IsothermalWallTube(Tube.mdot,
Tube.State1,
Tube.State2,
Tube.fixed,
Tube.L,
Tube.ID,
T_wall = self.Tlumps[0])
# In this case we neglect the in-chamber heat transfer which eventually can be added
# depending on the type of machine
def heat_transfer_callback(self, theta):
return empty_arraym(self.CVs.N)
# We also neglect the mechanical losses for simplicity
def mechanical_losses(self):
return 0
# We define the heat transfer between the expander shell and the ambient by defining
# a constant heat transfer coefficient
def ambient_heat_transfer(self):
return self.h_shell*self.A_shell*(self.Tamb-self.Tlumps[0]) #[kW]
# At this point we are able to define an overall energy balance of the expander shell
# with a single lumped temperature
def lump_energy_balance_callback(self):
#Mechanical losses are added to the lump
self.Wdot_mechanical = self.mechanical_losses() #[kW]
#Heat transfer between the shell and the ambient
self.Qamb = self.ambient_heat_transfer() #[kW]
return self.Wdot_mechanical + self.Qamb
# Callback function for the stepsize of the solver
def step_callback(self,theta,h,Itheta):
self.theta = theta
return False, h
# -
# We have completely defined the class that hold the general piston expander model.
# Now we can actually define a function to run the model
def Expander():
expander = PistonExpander() #Instantiate the class
# We specify the working fluid, the inlet state conditions (temperature and pressure in this case),
# the outlet state for which the the pressure is specified and the temperature is guessed.
# Last, we need to provide a guess for the inlet mass flow rate. The model calculate the actual
# mass flow rate through the machine as well as the discharge temperature.
Ref = 'Nitrogen'
inletState = State.State(Ref,dict(T = 298.15, P = 501.325))
outletState = State.State(Ref,dict(T = 200, P = inletState.p/10))
mdot_guess = inletState.rho*expander.Vdisp*expander.omega/(2*pi)
# The piston expander has only one working chamber and therefore we add one control volume
expander.add_CV(ControlVolume(key='A',
initialState=inletState.copy(),
VdVFcn=expander.V_dV,)
)
# We define the necessary constants for ambient heat transfer
expander.h_shell = 0.010 #[kW/m2/K]
expander.A_shell = pi*10*2*(0.0254**2) #[m2]
expander.Tamb = 298 #[K]
expander.Wdot_parasitic = 0.01 #Parasitic losses [kW]
"""
We add the inlet and outlet tubes. The states of the tube are defines as:
inlet tube:
__________________
inlet.1 inlet.2
__________________
outlet tube:
__________________
outlet.2 outlet.1
__________________
"""
#Add the inlet tube
expander.add_tube(Tube(key1 = 'inlet.1',
key2 = 'inlet.2',
L = 0.03,
ID = 0.01,
mdot = mdot_guess,
State1 = inletState.copy(),
fixed = 1,
TubeFcn = expander.TubeCode)
)
#Add the outlet tube
expander.add_tube(Tube(key1 = 'outlet.1',
key2 = 'outlet.2',
L = 0.03,
ID = 0.01,
mdot = mdot_guess,
State2 = outletState.copy(),
fixed = 2,
TubeFcn = expander.TubeCode)
)
# We define flow paths to connect the nodes of the tubes with the suction or discharge states
expander.add_flow(FlowPath(key1='inlet.2',key2='A',MdotFcn=expander.Suction))
expander.add_flow(FlowPath(key1='outlet.1',key2='A',MdotFcn=expander.Discharge))
# We connect together the energy balance of the expander shell
t1=default_timer()
expander.connect_callbacks(step_callback = expander.step_callback,
endcycle_callback=expander.endcycle_callback, # Provided by PDSimCore
heat_transfer_callback=expander.heat_transfer_callback,
lumps_energy_balance_callback = expander.lump_energy_balance_callback)
# We choose the solver and the integration options
expander.EulerN = 5000
expander.solve(key_inlet='inlet.1',
key_outlet='outlet.2',
solver_method = 'Euler',
OneCycle = False,
eps_cycle = 1e-10,
UseNR = True,
plot_every_cycle = False,
eps_energy_balance = 1e-3
)
print('time taken',default_timer()-t1,'s')
return expander
# +
#Finally, we can run the piston expander model and have access to the variables for plotting
piston = Expander()
#We plot out the PV diagram
p = piston.p.T #[kPa]
V = piston.V.T*1e6 #[cm^3]
plt.plot(V,p, 'b-',lw = 2)
plt.plot([0,100],[piston.inlet_state.p,piston.inlet_state.p],'k--',lw = 2)
plt.plot([0,100],[piston.outlet_state.p,piston.outlet_state.p],'k--',lw = 2)
plt.xlabel(r'V [cm$^3$]',fontsize = 10)
plt.ylabel(r'p [kPa]',fontsize = 10)
plt.xlim(0,30)
lb = plt.ylim(0,600)
| doc/notebooks/PistonExpander_IJR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#some imports
import pandas as pd
#for visualization
import matplotlib.pyplot as plt
#to download file
import urllib.request
# %matplotlib inline
# -
# File available here:http://koza.if.uj.edu.pl/~krzemien/machine_learning2021/materials/datasets/data1.csv
url='http://koza.if.uj.edu.pl/~krzemien/machine_learning2021/materials/datasets/data1.csv'
urllib.request.urlretrieve(url,'data1.csv')
input_file = 'data1.csv'
data = pd.read_csv(input_file)
data.head()
data.dtypes
print(data.columns.values)
data.info()
data.describe()
data[data['dataSet']==0].count()
data.count()
data[['y','dataSet']].groupby(['dataSet'],as_index=False).mean().sort_values(by='dataSet')
plt.figure()
data.plot()
data.plot(x='x', y='y', style='o')
| notebooks/intro/simple_load_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import boto3
# + deletable=true editable=true
s3 = boto3.resource('s3')
# + deletable=true editable=true
for bucket in s3.buckets.all():
print(bucket.name)
# + deletable=true editable=true
data = open('/Users/scollis/test.nc', 'rb')
s3.Bucket('aceena').put_object(Key='test.nc', Body=data)
# + deletable=true editable=true
data.close()
# + deletable=true editable=true
| notebooks/scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %pylab inline
import numpy as np
from tqdm import tqdm
import sys
sys.path.append('../../core/')
from utils import *
# -
loss_types = ['MSE', 'RE', 'OW','AOW']
taus = [0.5,1.0,1.5,2.0]
n_restarts = 20
n_samples = 20
Omega = np.logspace(-4,-1,31)
path = '../../saved_results/square/'
# +
if False:
Results = {}
def compute_output_dist(D):
D_hist, D_bins = np.histogram(D, density = True, bins='auto')
D_order = np.argsort(D)
D_sorted = np.sort(D)
inds = [np.min(np.where(D_sorted >= e)) for e in D_bins[:-1]] + [len(D)]
D_density = np.zeros_like(D)
for j in range(len(D_bins)-1):
D_density[D_order[inds[j]:inds[j+1]]] = D_hist[j]
return D_density
for loss_type in loss_types:
for tau in taus:
filename = path+'results_loss_'+loss_type+'_tau'+str(tau)+'.npy'
Results[loss_type, tau] = np.load(filename, allow_pickle=True).item()
Results[loss_type,tau]['NN_mean'] = np.mean(np.vstack([Results[loss_type, tau]['NN_'+str(j+1)]
for j in range(n_restarts)]), axis=0)
Results[loss_type,tau]['error'] = np.vstack([Results[loss_type, tau]['true'].flatten()-
Results[loss_type, tau]['NN_'+str(j+1)]
for j in range(n_restarts)])
Results[loss_type,tau]['density'] = compute_output_dist(Results[loss_type, tau]['true'])
Alpha = {}
sample_rate = 10
for tau in taus:
m = len(Results[loss_types[0],tau]['true'])
m_tv = int(0.6*len(Results[loss_types[0],tau]['true']))
for loss_type in loss_types:
Alpha[loss_type,tau] = np.zeros((n_samples, len(Omega)))
true = Results[loss_type, tau]['true'][m_tv::sample_rate].flatten()
for i in tqdm(range(n_samples)):
ind = np.argsort(Results[loss_type, tau]['val_loss'])[i]
pred = Results[loss_type, tau]['NN_'+str(ind+1)][m_tv::sample_rate].flatten()
gAUC = get_AUC_grid(true, pred, Q=Omega)[1]
Alpha[loss_type,tau][i,...] = gAUC
np.save(path + 'Alpha.npy', Alpha)
else:
Alpha = np.load(path + 'Alpha.npy', allow_pickle=True).item()
# +
figure(figsize = (15,4.5))
label_fs = 18
alpha = 0.1
pct = 10
for i in range(len(taus)):
for j in range(len(loss_types)-1):
ax=subplot2grid((3,len(taus)),(j,i))
plot(Omega, np.mean(Alpha[loss_types[0],taus[i]],axis=0), 'b', label='MSE')
fill_between(Omega,
np.percentile(Alpha[loss_types[0],taus[i]],pct,axis=0),
np.percentile(Alpha[loss_types[0],taus[i]],100-pct,axis=0),
color='b', alpha=0.1)
plot(Omega, np.mean(Alpha[loss_types[j+1],taus[i]],axis=0), 'r', label=loss_types[j+1])
fill_between(Omega,
np.percentile(Alpha[loss_types[j+1],taus[i]],pct,axis=0),
np.percentile(Alpha[loss_types[j+1],taus[i]],100-pct,axis=0),
color='r', alpha=0.1)
xlim(Omega[0], Omega[-1])
yscale('log')
xscale('log')
ylim([0.005,1])
if j==2:
xlabel(r'$\omega$', fontsize = label_fs)
xticks(fontsize = 0)
else:
xticks(fontsize=0)
if j==0:
title(r'$\tau=$'+str(taus[i]), fontsize = label_fs)
# legend(loc = 'upper left', fontsize = label_fs-6, ncol=2)
if i==0:
label = r''+loss_types[j+1]+'\n$\\alpha ( \omega )$'
ylabel(label, fontsize = label_fs)
yticks([0.01, 0.1, 1], fontsize = label_fs-2)
else:
yticks([], fontsize=0)
if i == len(taus)-1:
legend(loc = 'lower right', fontsize = label_fs-6, ncol=2, bbox_to_anchor=(1.022, -0.07))
if j == 2:
text(10**-4.2, 10**-2.9, r'$10^{-4}$', fontsize = label_fs-2)
text(10**-3.2, 10**-2.9, r'$10^{-3}$', fontsize = label_fs-2)
text(10**-2.2, 10**-2.9, r'$10^{-2}$', fontsize = label_fs-2)
# text(10**-1.2, 10**-3.7, r'$10^{-1}$', fontsize = label_fs-2)
tight_layout()
# -
| code/notebooks/Square/Square alpha.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib widget
# # %matplotlib inline
from sympy import *
from IPython.display import Latex
import matplotlib.pyplot as plt
plt.style.use('dark_background')
init_printing(use_latex=True)
_ = var('c k t x y z')
# -
lhs = sin(x)**2 + cos(x)**2
Eq(lhs, simplify(lhs))
p = (x + 1)**2 - 1
p, solve(p, x)
lhs = x**2 - 7*x + 12
Eq(lhs, factor(lhs))
eqs = [
Eq(x + y, 4),
Eq(x - y, 2) ]
(eqs), solve(eqs)
A = Matrix([[1, 1], [1, -1]])
b = Matrix([4, 2])
X = MatrixSymbol('x', 2, 1)
X = A.LUsolve(b)
Eq(MatMul(A, X), b)
expr = Integral(exp(-x**2 - y**2), (x, -oo, oo), (y, -oo, oo))
Eq(expr, expr.doit())
f = Function('f')
eq = Eq(f(x).diff(x), k * f(x))
eq, dsolve(eq)
plot(sin(x)**2, (x, -3, 1))
from sympy.plotting import plot3d
plot3d(sin(x*10)*cos(y*5) - x*y)
| py/jupyter/sympy_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df=pd.read_csv("Accidents0515.csv");df.head()
df.Accident_Severity.nunique()
df.columns
df.isna().sum()
df.drop(["Accident_Index","LSOA_of_Accident_Location","Accident_Index",'Local_Authority_(Highway)',"Date"],axis=1,inplace=True)
df.dropna(inplace=True)
df.isna().sum()
df.dtypes
df= df[['Number_of_Vehicles', 'Day_of_Week',
'Time', 'Road_Type', 'Speed_limit',
'Junction_Detail', 'Junction_Control', 'Light_Conditions',
'Weather_Conditions', 'Road_Surface_Conditions',
'Special_Conditions_at_Site', 'Carriageway_Hazards',
'Urban_or_Rural_Area']]
df.head()
df["Time"]
df.Time = df.Time.str[:2].astype(int)
df.head()
df.Longitude=np.floor_divide(df.Longitude,0.0001)/10000
df.Latitude=np.floor_divide(df.Latitude,0.0001)/10000
df.head()
records=df.head(10000).values
records[0:5]
records.astype(bool)
# +
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
te = TransactionEncoder()
# -
from mlxtend.cluster import Kmeans
km = Kmeans(k=2, max_iter=50, random_seed=1, print_progress=3)
km.fit(X=records,init_params=True)
# +
y_clust = km.predict(records)
plt.scatter(records[y_clust == 0, 0],
records[y_clust == 0, 1],
s=50,
c='black',
marker='s',
label='cluster 1')
plt.scatter(records[y_clust == 1,0],
records[y_clust == 1,1],
s=50,
c='orange',
marker='o',
label='cluster 2')
# plt.scatter(records[y_clust == 2,0],
# records[y_clust == 2,1],
# s=50,
# c='lightblue',
# marker='v',
# label='cluster 3')
# plt.scatter(records[y_clust == 3,0],
# records[y_clust == 3,1],
# s=50,
# c='black',
# marker='v',
# label='cluster 4')
# plt.scatter(records[y_clust == 4,0],
# records[y_clust == 4,1],
# s=50,
# c='lightblue',
# marker='v',
# label='cluster 5')
plt.scatter(km.centroids_[:,0],
km.centroids_[:,1],
s=250,
marker='*',
c='red',
label='centroids')
# plt.legend(loc='lower left',
# scatterpoints=1)
plt.grid()
plt.show()
# -
y_clust
records=records.astype(int)
records
# +
# te_ary = te.fit(records).transform(records)
# te_ary
# -
te.columns_
print(te.columns_)
df = pd.DataFrame(records, columns=te.columns_)
print (df)
from mlxtend.frequent_patterns import apriori
frequent_itemsets = apriori(df.astype(bool), min_support=0.5, use_colnames=True)
frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x))
print(frequent_itemsets)
from mlxtend.frequent_patterns import association_rules
rules=association_rules(frequent_itemsets, metric="confidence", min_threshold=0.7)
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1.01)
# print(list(rules))
rules1=rules[['antecedents','consequents','support','confidence','lift']]
print(rules1)
| backend/AccidentPreprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training Mask RCNN to instance-segment food images
#
# ### Downloading the Dataset with necessary annotations
# + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="UzFtYDNzUHrS" outputId="7f8b1778-fd4d-46f6-82e4-6160d1454589"
# %cd Mask_RCNN/
# !git clone https://github.com/binayakpokhrel/datasets
# -
# ### Cloning the main application folder from github
# + colab={"base_uri": "https://localhost:8080/", "height": 161} colab_type="code" id="2vamcw_TWOdQ" outputId="91889fb2-deb8-4f25-cfb3-97f8f1fe1f48"
# %cd /content/Mask_RCNN/samples/
# # !rm -R food_mask
# !git clone https://github.com/binayakpokhrel/food_mask
% cd food_mask
# -
# ### Imports and refrences
# +
import os
import sys
ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR) # To find local version of the library
from samples.food_mask import food
import mrcnn.model as modellib
from mrcnn import utils
from mrcnn import visualize
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# -
config = food.FoodConfig()
FOOD_DIR = os.path.join(ROOT_DIR, "datasets/food")
# +
dataset = food.FoodDataset()
dataset.load_food(FOOD_DIR, "train")
# Must call before using the dataset
dataset.prepare()
print("Image Count: {}".format(len(dataset.image_ids)))
print("Class Count: {}".format(dataset.num_classes))
for i, info in enumerate(dataset.class_info):
print("{:3}. {:50}".format(i, info['name']))
# -
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# %cd /content/Mask_RCNN
# !mkdir logs
# %cd logs
# !wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5
#uncomment below code to use the pre-trained weights
# # !wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=18RC3W2jnejGjCCOl1Lo0-zTTBcApArNP' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=18RC3W2jnejGjCCOl1Lo0-zTTBcApArNP" -O model_88.h5 && rm -rf /tmp/cookies.txt
COCO_MODEL_PATH='/content/Mask_RCNN/logs/mask_rcnn_coco.h5'
init_with = "coco"
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
#skipping the layers different due to the class numbers
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights("/content/Mask_RCNN/logs/food20190224T0936/mask_rcnn_food_0003.h5", by_name=True)
# +
dataset_train = food.FoodDataset()
dataset_train.load_food(FOOD_DIR, "train")
dataset_train.prepare()
dataset_val = food.FoodDataset()
dataset_val.load_food(FOOD_DIR,"val")
dataset_val.prepare()
# -
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=200,
layers='heads')
# ## Detection using inferenceConfig
# First we need to load a model weight in inference mode
# +
class InferenceConfig(food.FoodConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
#create the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
model_path='/content/Mask_RCNN/logs/model_88.h5'
print(model_path)
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# -
# #### Detection using the trained model. Here we are choosing a random image from validation dataset and giving it as an input to the trained model.
# +
image_id = random.choice(dataset_val
.image_ids)
image = dataset_val.load_image(image_id)
mask, class_ids = dataset_val.load_mask(image_id)
# Compute Bounding box
bbox = utils.extract_bboxes(mask)
# Display image and additional stats
print("image_id ", image_id, dataset_val.image_reference(image_id))
log("image", image)
log("mask", mask)
log("class_ids", class_ids)
log("bbox", bbox)
# Display image and instances
visualize.display_instances(image, bbox, mask, class_ids, dataset_val.class_names)
original_image=image
| .ipynb_checkpoints/image_based_food_calorie_estimation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Compute fixedpoint of $f(x, y)= [x^2 + y^3; xy - 0.5]$
#
# **<NAME>, PhD**
#
# This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>.
#
# Original (Matlab) CompEcon file: **demslv04.m**
#
# Running this file requires the Python version of CompEcon. This can be installed with pip by running
#
# # !pip install compecon --upgrade
#
# <i>Last updated: 2021-Oct-01</i>
# <hr>
#
# ## About
#
# Compute fixedpoint of
#
# \begin{equation}
# f(x, y)= \begin{bmatrix}x^2 + y^3 \\ xy - 0.5 \end{bmatrix}
# \end{equation}
#
# using Newton, Broyden, and function iteration methods.
#
# Initial values generated randomly. Some algorithms may fail to converge, depending on the initial value.
#
# True fixedpoint is $x = -0.09$, $y=-0.46$.
import numpy as np
import pandas as pd
from compecon import NLP, tic, toc
np.random.seed(12)
# ### Set up the problem
# +
def g(z):
x, y = z
return np.array([x **2 + y ** 3, x * y - 0.5])
problem_as_fixpoint = NLP(g, maxit=1500)
# -
# ### Equivalent Rootfinding Formulation
# +
def f(z):
x, y = z
fval = [x - x ** 2 - y ** 3,
y - x * y + 0.5]
fjac = [[1 - 2 * x, -3 * y **2],
[-y, 1 - x]]
return np.array(fval), np.array(fjac)
problem_as_zero = NLP(f, maxit=1500)
# -
# ### Randomly generate starting point
xinit = np.random.randn(2)
# ### Compute fixed-point using Newton method
t0 = tic()
z1 = problem_as_zero.newton(xinit)
t1 = 100 * toc(t0)
n1 = problem_as_zero.fnorm
# ### Compute fixed-point using Broyden method
t0 = tic()
z2 = problem_as_zero.broyden(xinit)
t2 = 100 * toc(t0)
n2 = problem_as_zero.fnorm
# ### Compute fixed-point using function iteration
t0 = tic()
z3 = problem_as_fixpoint.fixpoint(xinit)
t3 = 100 * toc(t0)
n3 = np.linalg.norm(problem_as_fixpoint.fx - z3)
#
# + pycharm={"name": "#%%\n"}
print('Hundredths of seconds required to compute fixed-point of ')
print('\n\t\tg(x1,x2)=[x1^2+x2^3; x1*x2-0.5]')
print('\nusing Newton, Broyden, and function iteration methods, starting at')
print('\n\t\tx1 = {:4.2f} x2 = {:4.2f}\\n\\n'.format(*xinit))
pd.DataFrame({
'Time': [t1, t2, t3],
'Norm of f': [n1, n2, n3],
'x1': [z1[0], z2[0], z3[0]],
'x2': [z1[1], z2[1], z3[1]]},
index=['Newton', 'Broyden', 'Function']
)
| _build/jupyter_execute/notebooks/slv/04 Compute fixedpoint of f(x1,x2)= [x1 2+x2 3; x1 x2 - 0.5].ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# <a href="https://www.pythonista.io"> <img src="img/pythonista.png"></a>
# # Desarrollo Dirigido por Pruebas (*TDD*).
#
# Es una práctica cuyo objetivo es lograr un código limpio, seguro y que funcione.
#
# La idea es que los requerimientos sean traducidos en pruebas, de este modo, cuando las pruebas pasen, se garantizará que el software cumple con los requisitos que se han establecido.
# ## Conceptos relacionados.
# ### Deuda técnica.
#
# Es el costo que implica retrabajar un artefacto por causas de su pobre desarrollo en etapas tempranas del ciclo de vida.
# ### Desarrollo que prioriza las pruebas (*Test First Development*).
#
# Consiste en crear pruebas para cada funcionalidad que debe cumplir el producto de software.
# ### Refactorización.
#
# Es la acción de reescribir el código de un componente de software sin afectar su comportamiento.
# ## Ciclo de *TDD*.
# <img src="img/01/TDD_Lifecycle.png" width="800">
#
# Fuente: https://upload.wikimedia.org/wikipedia/commons/0/0b/TDD_Global_Lifecycle.png
# ### Aseguramiento de la calidad (*QA*).
#
# El concepto de *QA* implica una serie de métodos y actividades cuyo objetivo es garantizar que el software cumple con las especificaciones requeridas. Las actividades de *QA* también requieren de la realización de pruebas y análisis del código y los binarios durante el ciclo de vida de desarrollo de software.
# ## Tipos de pruebas.
# ### Pruebas funcionales.
#
# Fuente: https://hackr.io/blog/types-of-software-testing
#
# * **Pruebas unitarias.** Evalúan el correcto funcionamiento de cada componente de código.
# * **mock** es un objeto que simula las características de un objeto real y que es utiizado para realizar las pruebas unitarias.
# * **stub** es un objeto que permite simular una entidad con la que se relacionará un objeto durante la prueba.
# * **code coverage** es el procentaje de código que se ejecuta durante las pruebas unitarias.
# * **Pruebas de integración.** Evalúan el funcionamiento del sistema a partir de la inclusión de nuevos componentes.
# * **Pruebas de extremo a extremo**. Evalúa que cada componente de software de un proceso funciones correctamente de principio a fin.
# * **Pruebas de humo**. Son pruebas no exhaustivas que se le hacen a un producto de software previo a su liberación a producción para validar que las funcionalidades primordiales funcionan correctamente.
# * **Pruebas de sanidad**. Son pruebas superficiales de los puntos más relevantes de un producto de software.
# * **Pruebas de regresión**. Son pruebas exhaustivas que se hacen para validar que el sistema funciona correctamente después de cierto tiempo de estar en producción.
# * **Pruebas de aceptación o (*UAT*)**. Son pruebas que validan al funcionalidad del producto ante el cliente.
# * **Pruebas de caja blanca**. Son pruebas que inciden en la lógica interna de los componentes.
# * **Pruebas de caja negra**. Son pruebas que inciden en el comportamiento de los componentes.
# * **Pruebas de interfaz**. Son pruebas sobre los puntos de acceso de los componentes de un sistema.
# ### Pruebas no funcionales.
#
# * **Pruebas de rendimiento**. Evalúan el uso de recursos de un sistema.
# * **Pruebas de seguridad**. Evalúan las posibles vulnerabildiades de un sistema.
# * **Pruebas de carga**. Evalúan la capacidad de un sistema ante cierto tipo de cargas de uso.
# * **Pruebas de fallo**. Evalúan la resiliencia de un sistema.
# * **Pruebas de compatibilidad**. Evalúan si un sistema puede interactuar con otros.
# * **Pruebas de usabilidad**. Evalúan la experiencia general del uso de un sistema.
# * **Pruebas de escalabilidad**. Evalúan la capacidad de crecer en cuestión de recursos de un sistema.
# * **Pruebas de volumen**. Evalúan la capacidad de un sistema de gestionar grandes cantidades de datos.
# * **Pruebas de estrés**. Evalúan el comportamiento de un sistema cuando se encuentra al límite de sus capacidades.
# * **Pruebas de mantenibilidad**. Evalúan la facilidad de mantenimiento de un sistema.
# * **Pruebas de cumplimiento (compliance)**. Evalúan si un sistema cumple con la normatividad aplicable.
# * **Pruebas de eficiencia**. Evalúan si el sistema cumple con los objetivos funcionales, en tiempo razonable.
# * **Pruebas de confiabilidad** Evalúan la disponibilidad y madurez de los procesos dentro del sistema.
# * **Pruebas de resistencia.** Evalúa la operación del sistema con carga en tiempos largos.
# * **Pruebas de recuperación ante desastres**. Evalúa la capacidad de un sistema para restablecer sus funcionalidades en caso de una emergencia crítica, así comod e mitgar riesgos.
# * **Pruebas de localización**. Evalúa la capacidad de un sistema de poder ofercer servicios homogeneos a usuarios de diversas zonas geográficas.
# * **Pruebas de internacionalización**. Evalúa la capacidad de un sistema de poder ofercer servicios homogeneos a usuarios de diversas zonas geográficas a novel internacional.
#
# <p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
# <p style="text-align: center">© <NAME>. 2022.</p>
| 01_desarrollo_dirigido_por_pruebas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
# + deletable=true editable=true
engine = create_engine("postgresql://localhost:5432/gdax")
df = pd.read_sql_query("select * from orderbook where pair_string LIKE 'ETH-USD' order by created_at asc limit 1000", con=engine)
# + deletable=true editable=true
#[n['asks'] for n in df['data'][:10]]
asks = df['data'].apply(lambda n: n['asks'])
asks = asks.apply(lambda book: [[float(order_detail) for order_detail in order] for order in book])
bids = df['data'].apply(lambda n: n['bids'])
bids = bids.apply(lambda book: [[float(order_detail) for order_detail in order] for order in book])
marketPrices = df['data'].apply(lambda n: (float(n['bids'][0][0]) + float(n['asks'][0][0]))/2)
# + [markdown] deletable=true editable=true
# Example format
#
# df.iloc[0][2]['asks'][0]
#
# [ price, size, num-orders ],
#
# ['894.89', '29.7307291', 22]
# + deletable=true editable=true
len(asks)
# + deletable=true editable=true
def getSplitVWAP(book):
return [getBookVWAP(bucket) for bucket in np.split(np.array(book), 5)]
def getBookVWAP(book):
prices = np.array([order[0] for order in book])
sizes = np.array([order[1] for order in book])
return [(prices * sizes).sum() / sizes.sum(), sizes.sum()]
# + deletable=true editable=true
asks_VWAP = pd.Series.apply(asks, getSplitVWAP)
bids_VWAP = pd.Series.apply(bids, getSplitVWAP)
# + deletable=true editable=true
asks_VWAP
# + deletable=true editable=true
bids_VWAP
# + deletable=true editable=true
marketPrices
# + deletable=true editable=true
data = bids_VWAP + asks_VWAP
# + deletable=true editable=true
data[0]
# + deletable=true editable=true
from keras.models import Sequential
from keras.layers import Dense, Activation, LSTM
# + deletable=true editable=true
model = Sequential()
model.add(LSTM(32, input_shape=(5, 2)))
model.add(Dense(1))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
# -
time_series = np.array(data).reshape(200,5,2)
price_array = np.array(marketPrices[0:-1:5]).reshape(200,1)
price_array[0]
model.fit(time_series, price_array, batch_size=1024, epochs=1, validation_split=0.2)
time_series[0]
| Orderbook to Price.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 2: “Text Mining with Simpsons ”
# Due: Monday 2pm, December 14, 2020, via Moodle
# <NAME>
#
# <NAME>
#
# <NAME>
# ### Submission guidelines
#
# - Solutions need to be uploaded as a single Jupyter notebook. You will find many provided codes in the notebook, your task is to fill in the missing cells.
# - For the written solution use LaTeX in markdown inside the same notebook. Do *not* hand in a seperate file for it.
# - Download the .zip file containing the dataset but do *not* upload it with your solution.
# - It is sufficient if one person per group uploads the solution to Moodle, but make sure that the names of
# all team members are given on the PDF and in the source code.
# --------
# The Simpson show is the world’s longest-running animated sitcom. The show revolves around the lives of the Simpson family, which consists of Homer, Marge, Bart, Lisa, and the little Maggi.
# For this notebook, you should download the dataset that contains the characters, locations, episode details, and script lines for approximately 600 Simpsons episodes, dating back to 1989.
# In the following exercises, we will explore the data and use visualizations to gain some insight into the show. The Zip file alongside this notebook should contain the following files:
#
# ` simpsons_characters.csv, simpsons_locations.csv, simpsons_script_lines.csv, simpsons_episodes.csv`
#
# Take some time and familiarize yourself with the data. `simpsons_characters.csv` contains the character-names and their gender.
# `simpsons_locations.csv` contains information about locations in the shows,
# and `simpsons_episodes.csv` has information about each episode including their title, rating, and views.
# Finally, `simpsons_script_lines.csv` lists all the lines in the show, who said the line and where it was said. Be aware that this data set is not cleaned and, hence, includes faulty data as well as inconsistent data types.
# Import the needed packages:
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from os import path
# %matplotlib inline
# Let's read the data and get started!
# ### Task 1: Important Characters (3 + 2 + 2 + 4 = 11 point)
# ### Sub-Task 1:
# Load the four datasets in the respective variables using pandas (pd),
# make sure to set the type of the `character_id` to integer and drop the line that has the non-numeric character ID `142024`.
# +
df_characters = pd.read_csv('data/simpsons_characters.csv',
dtype={
'id': 'int64',
'name': 'object',
'normalized_name': 'object',
'gender': 'category'
})
df_episodes = pd.read_csv('data/simpsons_episodes.csv',
dtype={
'id': 'int64',
'image_url': 'object',
'imdb_rating': 'float',
'imdb_votes': 'float',
'number_in_season': 'int64',
'number_in_series': 'int64',
'original_air_date': 'object',
'original_air_year': 'int64',
'production_code': 'object',
'season': 'int64',
'title': 'object',
'us_viewers_in_millions': 'float',
'video_url': 'object',
'views': 'float'
})
df_locations = pd.read_csv('data/simpsons_locations.csv',
dtype={
'id': 'int64',
'name': 'object',
'normalized_name': 'object'
})
df_script = pd.read_csv('data/simpsons_script_lines.csv')
df_script = df_script.drop(df_script.index[142024])
df_script = df_script.astype({
'id': 'int64',
'episode_id': 'int64',
'number': 'int64',
'raw_text': 'object',
'timestamp_in_ms': 'int64',
'speaking_line': 'bool',
'character_id':'float',
'location_id': 'Int64',
'raw_character_text': 'object',
'raw_location_text': 'object',
'spoken_words': 'object',
'normalized_text': 'object',
# 'word_count': 'Int64' # Cleaning very difficult, see conv function below
})
df_script.character_id = df_script.character_id.astype('Int64') # See https://github.com/pandas-dev/pandas/issues/25472
df_script.dropna(subset=["character_id"], inplace=True)
# -
# Using panda's `.head()` function look at the top rows of each dataset.
df_characters.head()
df_locations.head()
df_script.head()
df_episodes.head()
# Let's take a look at the important characters of the show. To start we detect the main characters by the number of spoken words/lines throughout the show.
# Extract top 10 characters based on the number of spoken words (word count) as well as the 10 most prominent characters regarding the number of lines they head during the show.
# Compare both results by drawing two bar plots: For the first plot, the x-axis should show the name of the top 10 characters with regard to the word count and the y-axis should display the number of spoken words.
# In the second plot, you should do the same but this time considering the number of lines for each character (,i.e., the characters displayed on the x-axis can be different for the first and second bar plot). You might notice that there is a column with `word_count` in the `scripts` data frame, but cleaning it up might be tricky. To find the sum of all values, first remove the `nan` and any other string characters in that colum, you can use the `conv` function provided.
# +
def conv(row):
try:
return int(row)
except:
return np.nan
df_script["word_count"] = df_script["word_count"].apply(lambda cell: conv(cell))
df_script.dropna(subset=["word_count"], inplace=True)
top_characters_words = df_script.groupby("character_id")["word_count"].sum()
merge_data_words_sub = pd.merge(top_characters_words, df_characters, how="inner", left_on="character_id", right_on="id")
merge_data_words_sub = merge_data_words_sub.sort_values(by=['word_count'], ascending=False)
merge_data_words_sub.head(10)
# -
top_characters_sentences = df_script.groupby("character_id").size()
merge_data_sentences_sub = pd.merge(top_characters_sentences.rename('sentence_count'), df_characters, how="inner", left_on="character_id", right_on="id")
merge_data_sentences_sub = merge_data_sentences_sub.sort_values(by=['sentence_count'], ascending=False)
merge_data_sentences_sub.head(10)
# +
#### Draw the word count plot ####
#### Hint: look at the plot function from dataframe ####
df_first_n_items_by_words = merge_data_words_sub[["word_count", "name", "id"]].head(10)
#df_first_n_items.head()
df_first_n_items_by_words.plot.bar(x='name', y='word_count')
# +
#### Draw the sentence count plot ####
#### Hint: look at the plot function from dataframe ####
df_first_n_items_by_sentences = merge_data_sentences_sub[["sentence_count", "name", "id"]].head(10)
#df_first_n_items.head()
df_first_n_items_by_sentences.plot.bar(x='name', y='sentence_count')
# -
# If you have done the exercise correctly you should see that the top-4 characters of the show (Homer, Marge, Bart, and <NAME>) have the most dialogs (lines) in the show but their distribution differs when we look at the word count.
# Seems like some characters speak in long sentences.
# ### Sub-Task 2: Common Words and Word Cloud
# Let's examine the dialogues and look at the top 20 common words, draw a bar plot in the same way as above to show the frequencies.
# To generate a meaningful output use the preprocessing pipelines explained in the lecture:
# - use regex to remove non-alphabetic characters and also remove `'` since the tokenizer will treat it as a new token (anything that is not a number or alphabet including punctuations)
# - lowercase all words
# - remove stopwords based on spaCy's stopword list
# - tokenize the `spoken_words` (remove single characters produced by the tokenizer)
# - perform stemming
#
# In this exercise, we require you to use SpaCy for all language processing steps except for stemming.
# This is due to the lack of a stemmer in SpaCy's library that only provides a tool for lemmatization.
# Hence, for Stemming we are going to use one of the stemmers provided by NLTK.
#
import spacy
import re
from collections import Counter
from matplotlib import pyplot as plt
from nltk.stem.snowball import SnowballStemmer
# +
nlp = spacy.load("en_core_web_sm") ### load en_core_web_sm and disable the tagger, parser and ner.
nlp.disable_pipes('tagger', 'parser', 'ner') ### and disable the tagger, parser and ner.
stopwords = spacy.lang.en.stop_words.STOP_WORDS ### load the list of stopwords from spacy for the English language
stemmer = SnowballStemmer("english") ### initialize the stemmer from NLTK
df_script["spoken_words"] = df_script["spoken_words"].str.lower().str.replace('[^a-zA-Z ]', '') ### Your code to lower case and remove non-alphabetic characters
df_script.dropna(subset=["spoken_words"], inplace=True)
def tokenize(input_string):
doc = nlp(input_string)
tokens = []
for token in doc:
tokens.append(token.text)
return tokens
df_script['all_tokens'] = df_script['spoken_words'].apply(lambda cell: tokenize(cell))
def remove_stopwords(input_list_of_tokens):
return [token for token in input_list_of_tokens if not token in stopwords]
df_script['tokens_without_stopwords'] = df_script['all_tokens'].apply(lambda cell: remove_stopwords(cell))
def remove_single_characters(input_list_of_tokens):
return [token for token in input_list_of_tokens if not len(token) == 1]
df_script['cleaned_tokens'] = df_script['tokens_without_stopwords'].apply(lambda cell: remove_single_characters(cell))
def perform_stemming(input_list_of_tokens):
stems = []
for token in input_list_of_tokens:
stems.append(stemmer.stem(token))
return stems
df_script['stems'] = df_script['cleaned_tokens'].apply(lambda cell: perform_stemming(cell))
df_script.head()
# -
#### Count the words in the scripts and print out the most common 20, remove any single character tokens and stem
counter = Counter(df_script['stems'].explode())
twenty_most_common = counter.most_common(20)
print(twenty_most_common)
#### draw a bar plot with the x axis as the words and the y axis as the frequencys
x = np.array(twenty_most_common).T[0]
list_of_tuples = list(zip(*twenty_most_common))
list_of_lists = [list(elem) for elem in list_of_tuples]
plt.bar(np.array(list_of_lists[0]), np.array(list_of_lists[1]))
plt.xticks(range(0, x.shape[0]), x, rotation='vertical')
plt.show()
# Generate a word cloud to visualize the frequencies:
# A word cloud is a graphical representation of frequently used words in the normalized text.
# The height of each word in this picture is an indication of the frequency of occurrence of the word in the entire text.
# You will need to install the package `wordcloud`.
# To achieve a homogeneous output, set the `max_words` to 100 and `max_font_size` to 60.
# Make sure the same word does not appear more than once.
from wordcloud import WordCloud
# +
hundred_most_common = np.array(counter.most_common(100)).T[0]
hundred_most_common_words_as_string=(" ").join(hundred_most_common)
wordcloud = WordCloud(max_words=100, max_font_size=60).generate(hundred_most_common_words_as_string)
fig = plt.figure(1, figsize=(12, 12))
plt.axis("off")
plt.imshow(wordcloud, interpolation="bilinear")
# -
# ### Sub-Task 3: Common Named Entities
# Apply named entity recognition using SpaCy and generate a word cloud of the top 50
# named entities that have the type `PERSON`, using the same approach
# for visualization as above. Take into account that you cannot simply use the
# output of the previous step and need to undo the stemming. Since the named entity
# recognition takes quite some time, use only the first `10000` scripts.
# +
nlp = spacy.load("en_core_web_sm") # Load the small English spacy model again, this time with all components enabled
named_entities = []
for script in df_script.spoken_words.tolist()[:10000]:
doc = nlp(script)
for entity in doc.ents:
if entity.label_ == "PERSON":
named_entities.append(entity.text)
#print(entity.text, '-->', entity.label_)
counter = Counter(named_entities)
fifty_most_common_person_entities = counter.most_common(50)
fifty_most_common_person_entities_as_string=(" ").join(np.array(fifty_most_common_person_entities).T[0])
print(fifty_most_common_person_entities_as_string)
wordcloud = WordCloud().generate(fifty_most_common_person_entities_as_string)
fig = plt.figure(1, figsize=(12, 12))
plt.axis("off")
plt.imshow(wordcloud, interpolation="bilinear")
# -
# How well does the named entity recognition work? Do you notice some strange "people" in there?
# The named entity recognition works surprisingly well, even if some outliners are detectable like "haw", "moon", "hey" or "j".
# ### Sub Task 4 : Sophistication of Speech
#
# Take into account the top 10 characters from the first subtask (based on line count), it is interesting to see whether there are big differences in the sophistication of speech or vocabulary size between the characters.
# To measure this, we turn to the Flesch Reading Ease. Although designed for written text we use it here on spoken words.
# This measures indicates how difficult a passage in English is to understand and is based on a ranking scale of 0-100, where higher scores indicate material that is easier to read; lower numbers mark passages that are more difficult to read.
# For more information look at the [wiki page](https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests).
#
# $206.835 - 1.015 \times \frac{total words}{total sentences} - 84.6 \times \frac{total syllables}{total words}$
#
# For syllable counts, download `cmudict` from the NLTK library and count the vowel sounds from the Carnegie Mellon Pronouncing Dictionary (cmudict).
# Then count vowel sounds in each word, if a word is not in the dictionary do not count it in.
# CMUdict marks each vowel with a stress tag, 0 (unstressed), 1 (stressed), or 2 (secondary stress).
# +
def keep_rows_by_character_id(character_ids):
return df_script.loc[df_script['character_id'].isin(character_ids)]
df_script_top_ten = keep_rows_by_character_id(df_first_n_items_by_sentences["id"].head(10))
def keep_needed_columns(df, list_of_needed_columns):
return df[list_of_needed_columns]
df_script_top_ten_final = keep_needed_columns(df_script_top_ten, ["character_id", "cleaned_tokens", "word_count", "raw_text"])
print(df_script.shape)
print(df_script_top_ten_final.shape)
df_script_top_ten_final.head()
# +
import nltk
nltk.download('cmudict')
from nltk.corpus import cmudict
nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser", "ner"]) # load the same spacy model again, with tagger, parser and ner disabled.
nlp.add_pipe(nlp.create_pipe('sentencizer'))
phoneme_dict = cmudict.dict()
def syllable_counter(word):
if word.lower() in phoneme_dict:
return [len(list(y for y in x if y[-1].isdigit())) for x in phoneme_dict[word.lower()]][0]
else:
return 0
def total_sylls(tokens):
### function to count the total number of syllable in line of a script
syllables = []
for token in tokens:
syllables.append(syllable_counter(token))
return sum(syllables)
def sentence_count(raw_text):
### function to count the total number of sentences in line of a script
doc = nlp(raw_text)
return len(list(doc.sents))
df_script_top_ten['syllable_count'] = [total_sylls(cell) for cell in df_script_top_ten["cleaned_tokens"]]
df_script_top_ten['sentence_count'] = [sentence_count(cell) for cell in df_script_top_ten["raw_text"]]
#df_script_top_ten.head()
top_characters_counts = df_script_top_ten.groupby('character_id').agg('sum') ## use aggragations to find the word_count, syllable_count and sentence_count per person
top_characters_counts = top_characters_counts[["word_count", "syllable_count", "sentence_count"]]
top_characters_counts.head(10)
# +
def calculate_flesch_readability(df_row):
flesch_readability = 206.835 - 1.015 * (df_row.word_count/df_row.sentence_count) - 84.6 * (df_row.syllable_count/df_row.word_count)
return flesch_readability
top_characters_counts['Flesch_readability'] = top_characters_counts.apply(calculate_flesch_readability, axis=1)
#top_characters_counts.head()
merge_characters_flesch = pd.merge(top_characters_counts, df_characters, how="inner", left_on="character_id", right_on="id") ### merge with df_characters to find the character names
merge_characters_flesch_sorted = merge_characters_flesch.sort_values(by=['Flesch_readability'], ascending=False)
merge_characters_flesch_sorted.head()
### draw plot
df_flesch_readability = merge_characters_flesch_sorted[["Flesch_readability", "name", "id"]].head(10)
#df_first_n_items.head()
df_flesch_readability.plot.bar(x='name', y='Flesch_readability')
# -
# ## Task 2: TF-IDF ( 3 + 4 = 7 points)
#
# ## Sub Task 1:
# Despite all the preprocessing, so far the top words we looked at are not so informative.
# We wish to find out the important words that are spoken by the top characters.
# For example, the youngest of the family should have "mom" as an important word.
# We looked at the Term Frequency - Inverse Document Frequency (TF-IDF) weighting in the lecture, so let's use that here to detect the most important words per character.
# In this case, each script line is a document for a certain character who spoke the words.
# Use `CountVectorizer` and `TfidfTransformers` from scikit-learn, and use the scikit-learn stop word list to remove stop words and remove the words with a frequency less than 5 from the analysis.
# Then plot the TF-IDF values for the top 10 words for Homer, Marge, Bart, and <NAME> as a bar plot.
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
#del df_script['id']
#df_script = df_script.rename(columns = {'character_id':'id'}) # to merge easily
df_script.head()
merged_data = pd.merge(df_script, df_characters, how="inner", left_on="character_id", right_on="id") #merge the dataframes to the characters to get the names and drop nan values
merged_data.dropna(subset=["stems"], inplace=True)
#merged_data[merged_data['stems'] == np.nan].head()
merged_data.head()
# +
def calc_tfidf(vectorizer, count_vectorized):
transformer = TfidfTransformer(smooth_idf=True,use_idf=True)
X = transformer.fit_transform(count_vectorized)
df_tf_idf = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
return df_tf_idf
def sum_tfidf_over_all_docs(df_tf_idf, number_top_n_words, visualize=True):
df_tf_idf_sum_over_all_docs = df_tf_idf.sum()
df_tf_idf_sum_over_all_docs_top_n = df_tf_idf_sum_over_all_docs.sort_values(ascending = False).iloc[:number_top_n_words]
if visualize == True:
print("\nTop ten words for {}:".format(character_id[1]))
print(df_tf_idf_sum_over_all_docs_top_n)
df_tf_idf_sum_over_all_docs_top_n.plot.bar()
plt.show()
return df_tf_idf_sum_over_all_docs_top_n
def calc_tfidf_scores(list_of_docs, number_top_n_words, count_vectorizer_max_features=None):
vectorizer = CountVectorizer(stop_words='english', min_df=5, max_features=count_vectorizer_max_features)
count_vectorized = vectorizer.fit_transform(list_of_docs)
df_tf = pd.DataFrame(count_vectorized.toarray(), columns=vectorizer.get_feature_names())
df_tf_idf = calc_tfidf(vectorizer, count_vectorized)
sum_tfidf_over_all_docs(df_tf_idf, number_top_n_words)
def get_character_items(input_df, character_id):
corpus = input_df[input_df['character_id'] == character_id]
return corpus["stems"]
for character_id in [[2, '<NAME>'], [1, '<NAME>'], [8, '<NAME>'], [9, '<NAME>']]:
corpus_list = get_character_items(merged_data, character_id[0])
corpus = corpus_list.apply(lambda cell: " ".join(cell))
calc_tfidf_scores(corpus, 10)
### Don't forget that you need the tfidf values for a single word averaged across documents ####
# -
# If you did the exercise correctly, "mom" and "dad" should be among the top words for the childern and "homer" should be the top word for Marge, since this is what she calls her husband.
# ## Sub Task 2:
# If we consider the spoken words from each character a document of its own, we can generate 4 documents (by concatenation of all dialogs) for Homer, Marge, Bart, and <NAME>, and create document vectors from those.
# Let's take a look at how the values in these vectors are distributed. Use `sns.heatmap` from the seaborn package to show the vectors of the 4 documents for the top-20 words (set the `max_features` parameter of the `CountVectorizer` to 20).
# Compare it with the heatmap of only term frequencies of the documents. Use `fmt='g'` to print out the correct format.
import seaborn as sns
# +
# prepare data -> 4 docs
list_of_docs = []
for character_id in [[2, '<NAME>'], [1, '<NAME>'], [8, '<NAME>'], [9, '<NAME>']]:
##### Your code ######
corpus_list = get_character_items(merged_data, character_id[0])
corpus = corpus_list.apply(lambda cell: " ".join(cell))
list_of_docs.append(' '.join(corpus))
character_list = ['<NAME>', '<NAME>', '<NAME>', '<NAME>']
vectorizer = CountVectorizer(stop_words='english', max_features=20)
count_vectorized = vectorizer.fit_transform(list_of_docs)
df_tf = pd.DataFrame(count_vectorized.toarray(), columns=vectorizer.get_feature_names())
#df_tf.index = character_list
df_tf_idf = calc_tfidf(vectorizer, count_vectorized)
#df_tf_idf.index = character_list
df_tf_idf.head()
# -
fig, ax = plt.subplots(figsize=(17,6))
sns.heatmap(df_tf_idf ### Your tfidf weights ###
, annot=True, cbar=False, ax=ax, xticklabels=df_tf_idf.columns, fmt='g' # top 20 words
)
fig, ax = plt.subplots(figsize=(17,6))
sns.heatmap(df_tf ### Your count vectorizer weights ###
, annot=True, cbar=False, ax=ax, xticklabels=df_tf.columns, fmt='g' #top 20 words
)
# Based on the TF_IDF vectors of the top-20 words, which characters are similar?
from sklearn.metrics.pairwise import cosine_similarity
names = ['<NAME>', '<NAME>', '<NAME>', '<NAME>']
#cosine= ### Your code ###
for i,name in zip(range(4),names):
for j,name2 in zip(range(i+1,4),names[i+1:]):
print('{} to {}: {}'.format(name, name2, cosine_similarity(df_tf_idf.iloc[[i]], df_tf_idf.iloc[[j]])))
# # Task 3: Topics ( 4 + 3 = 7 points)
# It is interesting to inspect the topics often discussed in the show. We look at SVD (LSA) and NMF for topic detection.
# ## Sub Task1:
# Use `NMF` from scikit-learn to detect the topics in the scripts, and use the text cleaning steps from the first task:
# - use regex to remove non-alphabetic characters and also remove `'` since the tokenizer will treat it as a new token (anything that is not a number or alphabet including punctuations)
# - lowercase all words
# - remove stopwords based on spaCy's stopword list for English
# - tokenize the spoken_words (remove single characters produced by the tokenizer)
# - perform stemming
#
# set the `max_features` of `CountVectorizer` to 10,000 and `random_state` of the `NMF` to 42, `n_components=10`.
from sklearn.decomposition import NMF
stopwords = spacy.lang.en.stop_words.STOP_WORDS # load spacy's stop word list
#df_script.head()
def show_topics(components, num_top_words, vocab):
#for each component or topic sorts the row values from large to small and returns the top words and the representation of the topic.
top_words = lambda t: [vocab[i] for i in np.argsort(t)[:-num_top_words-1:-1]]
topic_words = ([top_words(t) for t in components])
return [' '.join(t) for t in topic_words]
# +
# => alread done above reuse df_script
#nlp = spacy.load(#### Your Code ####
#porter = #### initlize the stemmer ####
#df_script.dropna(inplace=True)
#df_script.spoken_words =#### Your Code ####
# -
nmf = NMF(random_state=42, n_components=10)
# Use the `show_topics()` method to show the top-20 words for the top 2 topics.
# +
#### Your Code ####
vectorizer = CountVectorizer(stop_words='english', max_features=10000)
corpus_list = df_script["stems"]
corpus = corpus_list.apply(lambda cell: " ".join(cell))
tf = vectorizer.fit_transform(list(corpus))
nmf.fit(tf)
n_top_words = 20
print("\nTopics in NMF model:")
tfidf_feature_names = vectorizer.get_feature_names()
show_topics(nmf.components_, n_top_words, tfidf_feature_names)
# -
# ## Sub Task 2:
# The SVD algorithm factorizes a matrix into one matrix with orthogonal columns and one with orthogonal rows (along with a diagonal matrix, which contains the relative importance of each factor).
# Latent Semantic Analysis (LSA) uses SVD. Here we use the 'TruncatedSVD' method from 'sklearn' to look at the topics.
# This is faster than SVD since we focus only on the largest singluar values.
# Use the cleaned documents form the substask before. Where `random_state=42`, `n_components=10`, and `n_iter=7`.
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=10, n_iter=7, random_state=42)
svd.fit(tf)
# Use the `show_topics` method to show the top-20 words for the top-2 topics.
n_top_words = 20
topics = show_topics(svd.components_, n_top_words, tfidf_feature_names)
print(topics[:2])
# As you can see there is not much to make out of these topics. But topic models also give us document representations, so let's compare Homer, Marge, Bart, and <NAME> based on their SVD vectors.
# Use the same approach as the TF_IDF Task, but apply the pre-processing steps mentioned in the previous subtask.
# Notice that if you use the default parameters for the SVD you might encounter a strange shape in the transformed data.
# This happens because the underlying algorithm is set to 'randomized_svd'.
# Investigate how to solve this problem and run the algorithm with `n_components=2, n_iter=7, random_state=40`.
# Hint: You need one extra parameter to overcome this problem.
# Transform the TF_IDF vectors for the 4 subsets to the SVD and compute the cosine similarity.
subsets = {}
for name in ['<NAME>', '<NAME>', '<NAME>', '<NAME>']:
corpus_list_one_character = merged_data[merged_data['name']==name]['stems'] #.### Your code ###
corpus_one_character = corpus_list_one_character.apply(lambda cell: " ".join(cell))
subsets[name] = corpus_one_character
# +
### Your Code ###
subsets_as_list = [" ".join(subsets[name].tolist()) for name in subsets]
vectorizer = CountVectorizer(analyzer='word', max_features=10000)
count_vectorized = vectorizer.fit_transform(subsets_as_list)
svd = TruncatedSVD(n_components=2, n_iter=7, random_state=40, algorithm='arpack') ### Your Code ###
svd.fit(count_vectorized.toarray().astype(float))
new_weights = svd.transform(count_vectorized) ### Your Code ###
print(svd.components_)
print(new_weights)
# -
names = ['<NAME>', '<NAME>', '<NAME>', '<NAME>']
#cosine = ###Compute the cosine similirity###
for i, name in zip(range(4),names):
for j, name2 in zip(range(i+1,4),names[i+1:]):
print('{} to {}: {}'.format(name,name2,cosine_similarity([new_weights[i]], [new_weights[j]])))
# ## Task 4: Mathematical Concepts ( 1 + 2 + 2 = 5 points)
# Answer the following questions in the notebook, use markdown or latex to fill in the cells for the answers:
# ## Sub Task 1:
# What is the relationship between PCA and SVD?
# PCA (Principal component analysis), e.g. used for dimensionality reduction or feature extraction, is a technique where a dataset with many correlated coordinates is transformed into a dataset with fewer uncorrelated coordinates (=principal components).
#
# SVD (Singular value decomposition) is a method for calculating principal components of a dataset. So a PCA can be calculated by using SVD.
# ## Sub Task 2:
# Find the Singular value decomposition of $A = \left[ \begin{matrix}1&-1\\ -2&2\\ 2&-2\end{matrix} \right]$.
#
# **1. Formulas**
#
# Matrix A can be decomposed into: $A = U * \Sigma * V^T$
# With the elements:
# * $A\in \mathbb{R}^{3x2} (general: A\in \mathbb{R}^{mxn})$
# * $U\in \mathbb{R}^{3x3} (general: U\in \mathbb{R}^{mxm})$
# * $V\in \mathbb{R}^{2x2} (general: V\in \mathbb{R}^{nxn})$
# * $\Sigma \in \mathbb{R}^{3x2} (general: V\in \mathbb{R}^{mxn})$
#
# $V^T = eigenvectors(A^TA)^T = \left[\begin{matrix} v_1 \\ v_2 \end{matrix}\right]$
#
# $U = \left[\begin{matrix} \frac{1}{\sigma_1}Av_1 & \frac{1}{\sigma_2}Av_2 & \frac{NS(A^T)}{\lvert NS(A^T)\rvert}Av_1 \end{matrix}\right]$
#
# $\Sigma = \left[\begin{matrix} \sigma_1 & 0 \\ 0 & \sigma_2 \\ 0 & 0 \end{matrix}\right]$
# **1. Calculate $V^T$**
#
# $A^TA = \left[\begin{matrix} 1 & -2 & 2 \\ -1 & 2 & -2 \end{matrix}\right] * \left[\begin{matrix} 1 & -1 \\ -2 & 2 \\ 2 & -2 \end{matrix}\right] = \left[\begin{matrix} 9 & -9 \\ -9 & 9 \end{matrix}\right]$
#
# Find the eigenvalues:
#
# $ \left[ \begin{matrix} 9-\lambda & -9 \\ -9 & 9-\lambda \end{matrix} \right] = 0$
#
# $ (9-\lambda) * (9-\lambda) - (-9) * (-9) = 0 $
# $\lambda^2 - 18\lambda=0 $
#
# $ \lambda_1 = 18 = \sigma_1$
# $ \lambda_2 = 0 = \sigma_2$
#
# Find the eigenvectors:
#
# For $ \lambda_1 = 18$
# $\left[\begin{matrix} 9-18 & -9 \\ -9 & 9-18 \end{matrix}\right] * \left[\begin{matrix} x_1 \\ x_2 \end{matrix}\right] = \left[\begin{matrix} 0 \\ 0 \end{matrix}\right]$ Solution of the linear equation system = $\left[\begin{matrix} 1 \\ -1 \end{matrix}\right]$
#
# For $ \lambda_2 = 0$
# $\left[\begin{matrix} 9-0 & -9 \\ -9 & 9-0 \end{matrix}\right] * \left[\begin{matrix} x_1 \\ x_2 \end{matrix} \right] = \left[\begin{matrix} 0 \\ 0 \end{matrix}\right]$ Solution of the linear equation system = $\left[\begin{matrix} 1 \\ 1 \end{matrix}\right]$
#
# The eigenvectors are:
# $v_1 = \left[\begin{matrix} \frac{1}{\sqrt{2}} \\ \frac{-1}{\sqrt{2}} \end{matrix}\right]$
# $v_2 = \left[\begin{matrix} \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}} \end{matrix}\right]$
#
# $V^T = \left[\begin{matrix} \frac{1}{\sqrt{2}} & \frac{-1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \end{matrix}\right]$
# **2. Calculate U**
#
# $U = \left[\begin{matrix} \frac{1}{3} & 0 & \frac{2}{\sqrt{17}} \\ \frac{-2}{3} & 0 & \frac{3}{\sqrt{17}} \\ \frac{2}{3} & 0 & \frac{2}{\sqrt{17}} \end{matrix}\right]$
#
# **3. Calculate $\Sigma$**
#
# $\Sigma = \left[\begin{matrix}\sqrt{18} & 0 \\ 0 & 0 \\ 0 & 0 \end{matrix}\right]$
#
# **4. Proof**
#
# $U\Sigma V^T$ gives again the original matrix A
# ## Sub Task 3:
#
# Consider the term-document matrix
#
# | | d1 | d2 | d3 | d4 | d5 | d6 |
# |--------|----|----|----|----|----|----|
# | bank | 1 | 2 | 1 | 2 | 1 | 1 |
# | money | 0 | 0 | 0 | 2 | 1 | 2 |
# | river | 2 | 0 | 2 | 0 | 0 | 0 |
# | ship | 2 | 1 | 1 | 0 | 0 | 0 |
# | water | 1 | 2 | 2 | 0 | 0 | 0 |
# | invest | 0 | 0 | 0 | 1 | 2 | 0 |
# | loan | 0 | 0 | 0 | 1 | 1 | 1 |
#
# Use NumPy to compute the SVD for the matrix and write down the term matrix (U) and document matrix ($V^T$) and the singular values ($\Sigma$). Compute the following:
#
# - zero out everything but the 2 largest singular values of $\Sigma$ and compute the new term-document matrix.
# - detect the two topics given the new $\Sigma$ and show the top-3 words for each. What are these topics?
# - what is the most similar document to each document, using the cosine similarity and reduced representation?
#
#
import numpy as np
A = np.array([[1,0,2,2,1,0,0],
[2,0,0,1,2,0,0],
[1,0,2,1,2,0,0],
[2,2,0,0,0,1,1],
[1,1,0,0,0,2,1],
[1,2,0,0,0,0,1]])
u, s, vh = np.linalg.svd(A, full_matrices=True) ### Your Code ####
two_largest_singular_values = sorted(s, reverse=True)[:2]
two_largest_singular_values.extend([0, 0, 0, 0])
print(two_largest_singular_values)
Sigma = np.zeros((A.shape[0], A.shape[1]))
Sigma[:A.shape[1], :A.shape[0]] = np.diag(two_largest_singular_values)
new_reconstruction= u.dot(Sigma.dot(vh))
print(new_reconstruction.round(1))
# +
vocab=['bank', 'money', 'river', 'ship', 'water', 'invest', 'loan']
svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
svd.fit(new_reconstruction)
n_top_words = 3
topics = show_topics(svd.components_, n_top_words, vocab)
top_words = topics[:2] ### Your Code ###
print(top_words)
# +
from sklearn.metrics import pairwise
from scipy import sparse
documents = []
for i in range(0, len(new_reconstruction[0])):
document = new_reconstruction.round(1)[:, [i]]
documents.append(document.reshape(6,))
print("Document similarities:")
for i,document1 in enumerate(documents):
for j,document2 in enumerate(documents):
print('d{} to d{}: {}'.format(i+1, j+1, cosine_similarity([document1], [document2])))
# -
| assignments/2/ITA_assignment_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## THE SPARKS FOUNDATION INTERNSHIP
#
# # Task 7: Stock Market Prediction using Numerical and Textual Analysis
#
#
# # Author : <NAME>
# ### **Problem Statement**: Create a hybrid model for stock price/performance prediction using numerical analysis of historical stock prices, and sentimental analysis of news headlines.
# **Stock to analyze and predict** - SENSEX (S&P BSE SENSEX)
#
# ## *Importing the libraries*
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
#from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn import metrics
import datetime
# #!pip install textblob
from textblob import TextBlob
# #!pip install lightgbm
import lightgbm
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer as vader
from pandas.plotting import andrews_curves
from wordcloud import WordCloud
import warnings
warnings.filterwarnings('ignore')
# ### ***Importing and reading the dataset***
# ###### *Dataset_1(Stock Price)*
stock_price = pd.read_csv('^BSESN.csv')
stock_price.head()
#To check the no. of rows and columns.
stock_price.shape
#To check the no. of elements.
stock_price.size
#Checking null count for each column
stock_price.isnull().sum()
# dropping duplicates in data
stock_price = stock_price.drop_duplicates()
#Converting Date column to datetime datatype
stock_price['Date'] = pd.to_datetime(stock_price['Date'], format= '%Y-%m-%d')
stock_price.info()
# ### *Visualizing Data*
# +
#statistical analysis
data = round(stock_price['Adj Close'],2)
mean = data.mean()
median = data.median()
mode = data.mode()
print('Mean: ',mean,'\nMedian: ',median,'\nMode: ',mode[0])
plt.figure(figsize=(11,4))
plt.hist(data,bins=100,color='silver')
plt.axvline(mean,color='r',label='Mean')
plt.axvline(median,color='g',label='Median')
plt.axvline(mode[0],color='b',label='Mode')
plt.xlabel('Data(Adj Close)')
plt.ylabel('Frequency')
plt.legend()
plt.show()
# -
stock_price['Fluctuations'] = np.where(stock_price['Volume'].shift(-1) > stock_price['Volume'],'Rise','Fall')
stock_price['Returns'] = stock_price['Adj Close'].pct_change()
stock_price = stock_price.dropna()
stock_price.head()
fig, ax = plt.subplots()
fig.set_size_inches(9, 5)
sns.lineplot(stock_price.index ,stock_price['Adj Close'])
# +
#Moving Average
TITLE='Moving Average of Stock Data'
close = stock_price['Close']
m_avg = close.rolling(window = 50).mean()
std = close.rolling(window = 50).std()
plt.figure(figsize=(10,6))
plt.title(TITLE)
stock_price['Close'].plot(label = 'Close')
m_avg.plot(label = 'Rolling Mean')
std.plot(label = 'Rolling Standard Deviation')
plt.legend()
# -
#Plotting returns
plt.figure(figsize = (10,6))
stock_price['Returns'].plot(label='Return')
plt.title("Returns")
#Heatmap
f,ax = plt.subplots(figsize=(7,4))
sns.heatmap(stock_price[['Open', 'High', 'Low', 'Adj Close', 'Volume', 'Returns']].corr(),
annot=True, linewidths=.5, fmt= '.3f',ax=ax)
fig, ax = plt.subplots()
fig.set_size_inches(12, 6)
ax = sns.lineplot(data=stock_price[['Open', 'Low', 'High', 'Close']])
#Andrew Curve
andrews_curves(stock_price[['Open', 'Close', 'Fluctuations']], "Fluctuations",colormap='Set2')
# Histograms
h=stock_price.hist()
plt.show()
#plt.h
# #### *Dataset_2(News Headlines)*
news_headlines= pd.read_csv('india-news-headlines.csv')
news_headlines.head()
news_headlines.shape
news_headlines.size
news_headlines.isnull().sum()
news_headlines.rename(columns = {'publish_date':'Date'}, inplace = True)
news_headlines['Date'] = pd.to_datetime(news_headlines['Date'], format= '%Y%m%d')
news_headlines.info()
news_headlines.drop('headline_category', axis= 1, inplace= True)
news_headlines.head()
# dropping duplicates in data
news_headlines=news_headlines.drop_duplicates()
#converting news data to lowercase
news_headlines['headline_text'] = news_headlines['headline_text'].apply(lambda x: " ".join(x.lower() for x in x.split()))
news_headlines['headline_text'].head()
news_headlines['headline_text'].value_counts()
news_headlines.replace("[^a-zA-Z']"," ",regex=True,inplace=True)
news_headlines.head()
#Word Cloud
headlines = ' '.join(news_headlines['headline_text'].str.lower().values[-1000:])
wordcloud = WordCloud(max_font_size=None,width = 2500, height = 1200).generate(headlines)
plt.figure(figsize = (12, 16))
plt.imshow(wordcloud)
plt.title('Top words in headline',size=30)
plt.axis("off")
plt.show()
# # Merging Data
# +
#news_headlines.rename(columns = {'publish_date':'Date'}, inplace = True)
data_merged = pd.merge(stock_price, news_headlines, how='inner', on='Date')
data_merged.dropna()
# -
data_merged = data_merged[data_merged['Date']>='2015-01-01']
data_merged = data_merged.dropna(how='any',axis=0) #dropping null entries in our data
data_merged = data_merged.drop_duplicates() #dropping duplicate values
data_merged
#Resetting Index
data_merged = data_merged.reset_index()
del data_merged['index']
data_merged
# Recheck the count
print(data_merged.count())
# # Sentimental Analysis
data_merged['sentiment'] = data_merged['headline_text'].apply(lambda x: TextBlob(x).sentiment[0])
data_merged.head()
sid = vader()
data_merged['x'] = data_merged['headline_text'].apply(lambda x: sid.polarity_scores(x))
#data_merged.head()
data_merged['compound'] =data_merged['x'].apply(lambda dict: dict['compound'])
data_merged['positive'] = data_merged['x'].apply(lambda dict: dict['pos'])
data_merged['negative'] = data_merged['x'].apply(lambda dict: dict['neg'])
data_merged['neutral'] = data_merged['x'].apply(lambda dict: dict['neu'])
data_merged = data_merged.drop('x',axis=1)
data_merged
#Price Indication (Price Movement)
#lies between 0 to 1
data_merged['Price Indication'] = np.where(data_merged['Close'].shift(-1)>data_merged['Close'],1,0)
data_merged.head()
# ## Training and testing
x = data_merged[['sentiment','Open','High','Low','Close','Adj Close','compound','positive','negative','neutral']]
y = data_merged['Price Indication']
# +
#Dividing data into Training and Test sets with ratio 80% AND 20%
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)
print(x_train.shape)
print(x_test.shape)
# +
#DecisionTreeRegressor Model
dtr = DecisionTreeRegressor()
dtr.fit(x_train, y_train)
predictions = dtr.predict(x_test)
print('Mean Squared error: ',mean_squared_error(predictions,y_test))
# +
#LGBMRegressor Model
gbm = lightgbm.LGBMRegressor()
gbm.fit(x_train, y_train)
predictions = gbm.predict(x_test)
print('Mean Squared error: ',mean_squared_error(predictions, y_test))
# +
#AdaBoostRegressor Model
adb = AdaBoostRegressor()
adb.fit(x_train, y_train)
predictions = adb.predict(x_test)
print('Mean Squared error: ',mean_squared_error(predictions, y_test))
# -
# ### Conclusion
# **Mean Squared error** of
# >**DecisionTreeRegressor Model** is 0.001274746134486371 <br>
# >**LGBMRegressor Model** is 0.0007688320896342009 <br>
# >**AdaBoostRegressor Model** is 0.0007676971314068422
#
# It shows ***AdaBoostRegressor Model*** shows a better performance than the others.
# ### ***Task Completed***
| Data Science & Business Analytics Tasks/Task7_Stock Market Prediction using Numerical and Textual Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup as BSoup
import csv
import re
import requests
import random
# +
data_dir = 'data'
sports_filename = 'sports.csv'
sports_path = '{}/{}'.format(data_dir, sports_filename)
events_filename = 'events.csv'
events_path = '{}/{}'.format(data_dir, events_filename)
results_filename = 'results.csv'
results_path = '{}/{}'.format(data_dir, results_filename)
# -
# ## Reloading Data from Files
#
# Run this block below if data files already exist and you just simply want to reload them into their respective dictionaries
# Sports Data
reader = csv.DictReader(open(sports_path, 'r'))
sports_info = []
for line in reader:
sports_info.append(line)
# Events Data
reader = csv.DictReader(open(events_path, 'r'))
events_info = []
for line in reader:
events_info.append(line)
# Results Data
reader = csv.DictReader(open(results_path, 'r'))
results = {}
for line in reader:
# Find event for results and remove it from the dictionary
event = line.pop('event')
# Append event results to list associated with event
results[event] = results.get(event, []) + [line]
# ## Gathering Data
#
# I'm scraping the data from https://www.olympic.org/ via BeautifulSoup4
# Base is used to reference complete urls
url_base = 'https://www.olympic.org'
# Main page has reference to all sports (links & image representations)
url_main = 'https://www.olympic.org/pyeongchang-2018'
# ### Sports & Event Data
# #### Reference page to list all sports
# Get document to be passed in for soup (better/cleaner practice)
request_main = requests.get(url_main)
text_main = request_main.text
soup_main = BSoup(text_main, 'html.parser')
# +
# Get the image container and the name/link container (comes in pairs)
sports_section = soup_main.find_all('section', {'class':'game-results-box'})[0]
sports_section = sports_section.find_all('ul', {'class':['countries','games2018-2']})[0]
sports_list = sports_section.find_all('li', {'class':'box'})
# Dictonary for the sports
sports_info = []
for item in sports_list:
sport_name = item.a.text.strip()
# Link has full url address
sport_link = '{base}{sport}'.format(base=url_base, sport=item.a['href'])
# ID for sport will be what is used by website to define the sport's pages
sport_id = sport_name.lower().replace(' ','-')
# Save each sport into list of dictionary info
sport_dict = {'id': sport_id, 'page': sport_link, 'name': sport_name}
sports_info.append(sport_dict)
# -
# TEST
print('Number of sports: {}'.format(len(sports_info)))
print('====================')
for sport in sports_info:
for k, v in sport.items():
print(k,v)
print('-----------')
# #### Reference to events in each sport
# +
# For each sport, get the different events
# Save all event info into a list of events for the sport
events_info = []
for sport in sports_info:
# Get document to be passed in for soup (better/cleaner practice)
request_sport = requests.get(sport['page'])
soup_main = BSoup(request_sport.text, 'html.parser')
# Find the main section for all events in sports
main_section = soup_main.find_all('div', {'class':'main-holder'})[0]
# Find the event sections on this main page
event_sections = main_section.find_all('section', {'class':'event-box'})
# Get the event names & info for each event section
for event in event_sections:
name = event.a.text.strip()
page = '{base}{link}'.format(base=url_base, link=event.a['href'])
# The ID is the sport & the name used for webpage ref for event
# We trade brevity for ambiguity in the ID naming convention
event_id = re.search('[^/]+$', event.a['href']).group()
event_id = '{sport}-{event}'.format(sport=sport['id'], event=event_id)
# Get sex by seeing if it's men's, women's, or mixed (definitions checked)
sex_categories = {'mixed':'mixed', 'gundersen':'men', 'man':'men', 'men':'men',
'mens':'men', 'women':'women', 'womens':'women', 'ladies':'women'}
# Default to mixed event
event_sex = 'mixed'
is_assigned = False
# Loop over each category (time consuming but necessary)
for sex in sex_categories.keys():
# Check if any of the words is the sex-term
if sex in event_id.split('-'):
# If there was more than one label applied, it's a mixed event
if is_assigned:
event_sex = 'mixed'
break
event_sex = sex_categories[sex]
is_assigned = True
# Save list of info dictionary for each event
event_dict = {'id': event_id, 'name': name, 'sport_id':sport['id'], 'sex':event_sex, 'page': page}
events_info.append(event_dict)
# -
# TEST
print('Number of events: {}'.format(len(events_info)))
print('====================')
for event in random.choices(events_info, k=5):
for k, v in event.items():
print(k,v)
print('-----------')
for i,event in enumerate(events_info):
print(i,event['name'])
# #### Reference to results for each event
results = {}
team_events = []
# Replace ranking for Gold, Silver, Bronze, and no ranking (-1)
medal_ranks = {'G':1, 'S':2, 'B':3}
for event in events_info:
# Get document to be passed in for soup (better/cleaner practice)
request_event = requests.get(event['page'])
soup_results = BSoup(request_event.text, 'html.parser')
# Find the main section and table for all events in sports
try:
results_section = soup_results.find_all('section', {'class':'table-box'})[0]
except:
print('No results found: {}'.format(event['page']))
results_table = results_section.find_all('table')[0]
# Get the headers for the results
headers = results_table.find('thead')
result_headers = [h.text.lower() for h in headers.find_all('th')]
# If team event, save to do different processing
if 'team' in result_headers:
team_events.append(event)
continue
# Get the results
competitors = results_table.find('tbody')
# Ignore team events' with special tier in tbody
competitors = competitors.find_all('tr', {'class': None})
# Each competitor (can be a team) has a result line
competitors_info = []
for c in competitors:
# Get competition info from each tier
c_dict = {}
c = c.find_all('td')
# Save the competitor and remove extra new lines (was used on website formatting)
c_dict = {h: cc.text.lower().strip().replace('\n','').replace('\r','') for h,cc in zip(result_headers,c)}
# Replace rank as number (integers)
ranking = c_dict.get('rank')
try:
# Get the integer rank if there is a decimal
c_dict['rank'] = int(ranking.split('.')[0])
# If not a number either a medalist or something else (no ranking)
except:
# -1 means no obvious ranking
c_dict['rank'] = medal_ranks.get(ranking, -1)
competitors_info.append(c_dict)
# Save all event results into one dictionary
results[event['id']] = competitors_info
# TEST
for r,dicts in results.items():
print(r)
print(dicts)
break
# ## Save Data into CSVs
# ### Sports Data
# Create CSV file from list of sports dictionaries
with open(sports_path, 'w', encoding='utf-8') as sports_csv:
writer = csv.writer(sports_csv)
# Headers
headers = ['id', 'name', 'page']
writer.writerow(headers)
# Get each dictionary assoc. with the sport
for sport in sports_info:
# Use only the headers (in order) to write row
row = [sport[key] for key in headers]
writer.writerow(row)
# ### Events Data
# Create CSV file from list of events dictionaries
with open(events_path, 'w', encoding='utf-8') as events_csv:
writer = csv.writer(events_csv)
# Headers
headers = ['id', 'name', 'sport_id', 'sex', 'page']
writer.writerow(headers)
# Get each dictionary assoc. with the sport
for event in events_info:
# Use only the headers (in order) to write row
row = [event[key] for key in headers]
writer.writerow(row)
# ### Results Data
# Header w/ event in front
result_headers.insert(0, 'event')
# Create CSV file from list of events dictionaries
with open(results_path, 'w', encoding='utf-8') as results_csv:
writer = csv.writer(results_csv)
writer.writerow(result_headers)
# Get each dictionary assoc. with each event
for event,event_results in results.items():
# Iterate over each result of event
for result in event_results:
# Use only the headers (in order) to write row
row = [result.get(key) for key in result_headers[1:]]
row.insert(0,event)
writer.writerow(row)
| data_scrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.016695, "end_time": "2021-06-19T01:09:33.642309", "exception": false, "start_time": "2021-06-19T01:09:33.625614", "status": "completed"} tags=[]
# # Importing Data
# + papermill={"duration": 2.010429, "end_time": "2021-06-19T01:09:35.667927", "exception": false, "start_time": "2021-06-19T01:09:33.657498", "status": "completed"} tags=[]
import pandas as pd
train_df = pd.read_csv("/kaggle/input/genre-classification-dataset-imdb/Genre Classification Dataset/train_data.txt",
engine="python",
sep=" ::: ",
names=["id", "movie", "genre", "summary"])
test_df = pd.read_csv("/kaggle/input/genre-classification-dataset-imdb/Genre Classification Dataset/test_data_solution.txt",
engine="python",
sep=" ::: ",
names=["id", "movie", "genre", "summary"])
# + papermill={"duration": 0.036275, "end_time": "2021-06-19T01:09:35.719808", "exception": false, "start_time": "2021-06-19T01:09:35.683533", "status": "completed"} tags=[]
# Viewing training data
train_df.head()
# + papermill={"duration": 0.02793, "end_time": "2021-06-19T01:09:35.763791", "exception": false, "start_time": "2021-06-19T01:09:35.735861", "status": "completed"} tags=[]
# Viewing test data
test_df.head()
# + [markdown] papermill={"duration": 0.015774, "end_time": "2021-06-19T01:09:35.795450", "exception": false, "start_time": "2021-06-19T01:09:35.779676", "status": "completed"} tags=[]
# # Data Manipulation
# + [markdown] papermill={"duration": 0.026082, "end_time": "2021-06-19T01:09:35.837585", "exception": false, "start_time": "2021-06-19T01:09:35.811503", "status": "completed"} tags=[]
# ## Shuffling the data
# + papermill={"duration": 0.050484, "end_time": "2021-06-19T01:09:35.915079", "exception": false, "start_time": "2021-06-19T01:09:35.864595", "status": "completed"} tags=[]
train_shuffled = train_df.sample(frac=1)
# + [markdown] papermill={"duration": 0.015492, "end_time": "2021-06-19T01:09:35.946879", "exception": false, "start_time": "2021-06-19T01:09:35.931387", "status": "completed"} tags=[]
# ## Splitting the data
# + [markdown] papermill={"duration": 0.015618, "end_time": "2021-06-19T01:09:35.978499", "exception": false, "start_time": "2021-06-19T01:09:35.962881", "status": "completed"} tags=[]
#
# + papermill={"duration": 0.909318, "end_time": "2021-06-19T01:09:36.903702", "exception": false, "start_time": "2021-06-19T01:09:35.994384", "status": "completed"} tags=[]
# Split the data using train_test_split from sklearn
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_shuffled["summary"],
train_shuffled["genre"],
test_size=0.1)
X_test, y_test = test_df["summary"], test_df["genre"]
# + [markdown] papermill={"duration": 0.015897, "end_time": "2021-06-19T01:09:36.935899", "exception": false, "start_time": "2021-06-19T01:09:36.920002", "status": "completed"} tags=[]
# ## One-Hot Encoding (Labels)
# + papermill={"duration": 0.084309, "end_time": "2021-06-19T01:09:37.035860", "exception": false, "start_time": "2021-06-19T01:09:36.951551", "status": "completed"} tags=[]
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(sparse=False)
ohe.fit(train_shuffled["genre"].to_numpy().reshape(-1,1)) # Fit the encoder to genre of training data
train_ohe = ohe.transform(y_train.to_numpy().reshape(-1, 1))
val_ohe = ohe.transform(y_val.to_numpy().reshape(-1,1))
test_ohe = ohe.transform(y_test.to_numpy().reshape(-1,1))
# + [markdown] papermill={"duration": 0.016416, "end_time": "2021-06-19T01:09:37.068665", "exception": false, "start_time": "2021-06-19T01:09:37.052249", "status": "completed"} tags=[]
# ## List (Summary)
# + papermill={"duration": 0.026977, "end_time": "2021-06-19T01:09:37.113582", "exception": false, "start_time": "2021-06-19T01:09:37.086605", "status": "completed"} tags=[]
train_sentences = X_train.tolist()
val_sentences = X_val.tolist()
test_sentences = X_test.tolist()
# + [markdown] papermill={"duration": 0.016119, "end_time": "2021-06-19T01:09:37.145541", "exception": false, "start_time": "2021-06-19T01:09:37.129422", "status": "completed"} tags=[]
# # Universal Serial Encoder (Embedding Layer)
# + papermill={"duration": 29.044879, "end_time": "2021-06-19T01:10:06.206789", "exception": false, "start_time": "2021-06-19T01:09:37.161910", "status": "completed"} tags=[]
import tensorflow_hub as hub
embedding_layer = hub.KerasLayer("https://tfhub.dev/google/universal-sentence-encoder/4",
trainable=False,
name="universal_sentence_encoder")
# + [markdown] papermill={"duration": 0.016206, "end_time": "2021-06-19T01:10:06.240261", "exception": false, "start_time": "2021-06-19T01:10:06.224055", "status": "completed"} tags=[]
# # Creating Dataset
# + papermill={"duration": 0.095939, "end_time": "2021-06-19T01:10:06.352789", "exception": false, "start_time": "2021-06-19T01:10:06.256850", "status": "completed"} tags=[]
from tensorflow.data import Dataset as tfd
import tensorflow as tf
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = tfd.from_tensor_slices((X_train, train_ohe)).batch(32).prefetch(AUTOTUNE)
val_dataset = tfd.from_tensor_slices((X_val, val_ohe)).batch(32).prefetch(AUTOTUNE)
test_dataset = tfd.from_tensor_slices((X_test, test_ohe)).batch(32).prefetch(AUTOTUNE)
train_dataset, val_dataset, test_dataset
# + [markdown] papermill={"duration": 0.017057, "end_time": "2021-06-19T01:10:06.386849", "exception": false, "start_time": "2021-06-19T01:10:06.369792", "status": "completed"} tags=[]
# # Model
# + papermill={"duration": 0.034949, "end_time": "2021-06-19T01:10:06.439070", "exception": false, "start_time": "2021-06-19T01:10:06.404121", "status": "completed"} tags=[]
classes = len(train_shuffled["genre"].value_counts())
# + papermill={"duration": 0.655303, "end_time": "2021-06-19T01:10:07.110985", "exception": false, "start_time": "2021-06-19T01:10:06.455682", "status": "completed"} tags=[]
# Build the model
from tensorflow.keras import layers
inputs = layers.Input(shape=[], dtype="string")
x = embedding_layer(inputs)
x = layers.Dense(512, activation="relu")(x)
outputs = layers.Dense(classes, activation="softmax")(x)
model = tf.keras.Model(inputs, outputs)
# + papermill={"duration": 0.03317, "end_time": "2021-06-19T01:10:07.161593", "exception": false, "start_time": "2021-06-19T01:10:07.128423", "status": "completed"} tags=[]
# Compile the model
model.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
# + papermill={"duration": 0.033256, "end_time": "2021-06-19T01:10:07.211864", "exception": false, "start_time": "2021-06-19T01:10:07.178608", "status": "completed"} tags=[]
model.summary()
# + papermill={"duration": 23.090604, "end_time": "2021-06-19T01:10:30.320287", "exception": false, "start_time": "2021-06-19T01:10:07.229683", "status": "completed"} tags=[]
history = model.fit(train_dataset,
steps_per_epoch=int(0.1*len(train_dataset)),
epochs=5,
validation_data=val_dataset,
validation_steps=int(0.1*len(val_dataset)))
# + papermill={"duration": 36.809317, "end_time": "2021-06-19T01:11:07.225497", "exception": false, "start_time": "2021-06-19T01:10:30.416180", "status": "completed"} tags=[]
model.evaluate(test_dataset)
# + [markdown] papermill={"duration": 0.246896, "end_time": "2021-06-19T01:11:07.717883", "exception": false, "start_time": "2021-06-19T01:11:07.470987", "status": "completed"} tags=[]
# # Conclusion
#
# Accuracy: 57.88%
#
# Cleaning the data strangely gives lesser accuracy.
| film-genre-classification-using-nlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building programs with Python
# 1. [Analysing data](02-Python-1-Data.ipynb) - [solutions](02-Python-1-Data_Instructor.ipynb)
# 1. [Repeating Actions with Loops](02-Python-1-Loops.ipynb) - [solutions](02-Python-1-Loops_Instructor.ipynb)
# 1. [Storing Multiple Values in Lists](02-Python-1-Lists.ipynb) - [solutions](02-Python-1-Lists_Instructor.ipynb)
# 1. [Analyzing Data from Multiple Files](02-Python-1-MultiLists.ipynb) - [solutions](02-Python-1-MultiLists_Instructor.ipynb)
# 1. [Making Choices](02-Python-1-Logic.ipynb) - [solutions](02-Python-1-Logic_Instructor.ipynb)
| 02-Python1/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
from PIL import Image
import cv2
import colorsys
import random
from matplotlib.pyplot import imshow
# %matplotlib inline
input_size = 416
iou = 0.4 #iou threshold
score = 0.25 #score threshold
class_names = "./classes.names"
image_path = "./test.jpg"
# -
def read_class_names(class_file_name):
names = {}
with open(class_file_name, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
return names
# +
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
images_data = np.asarray(image_data).astype(np.float32)
images_data = np.expand_dims(images_data, axis = 0)
# +
interpreter = tf.lite.Interpreter(model_path=r".\yolov3-tiny-416-int8.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
# +
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
# -
def filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape = tf.constant([416,416])):
scores_max = tf.math.reduce_max(scores, axis=-1)
mask = scores_max >= score_threshold
class_boxes = tf.boolean_mask(box_xywh, mask)
pred_conf = tf.boolean_mask(scores, mask)
class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]])
pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]])
box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)
input_shape = tf.cast(input_shape, dtype=tf.float32)
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
box_mins = (box_yx - (box_hw / 2.)) / input_shape
box_maxes = (box_yx + (box_hw / 2.)) / input_shape
boxes = tf.concat([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
], axis=-1)
# return tf.concat([boxes, pred_conf], axis=-1)
return (boxes, pred_conf)
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
def draw_bbox(image, bboxes, classes=read_class_names(class_names), show_label=True):
num_classes = len(classes)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
out_boxes, out_scores, out_classes, num_boxes = bboxes
for i in range(num_boxes[0]):
if int(out_classes[0][i]) < 0 or int(out_classes[0][i]) > num_classes: continue
coor = out_boxes[0][i]
coor[0] = int(coor[0] * image_h)
coor[2] = int(coor[2] * image_h)
coor[1] = int(coor[1] * image_w)
coor[3] = int(coor[3] * image_w)
fontScale = 0.5
score = out_scores[0][i]
class_ind = int(out_classes[0][i])
bbox_color = colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 600)
c1, c2 = (coor[1], coor[0]), (coor[3], coor[2])
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
if show_label:
bbox_mess = '%s: %.2f' % (classes[class_ind], score)
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]
c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3)
cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled
cv2.putText(image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)
return image
image = draw_bbox(original_image, pred_bbox)
image = Image.fromarray(image.astype(np.uint8))
image.show()
imshow(image)
# +
# image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
# cv2.imwrite("./result.png", image)
# -
| Prototype/Step 2 - Converting to tflite/testing it out/(WORKS) Image tflite.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HW3 - Group 31
# ## Libraries
import os
import csv
from bs4 import BeautifulSoup
import pandas as pd
import requests as rq
import time
import random
import unicodedata
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import *
import string
import re
from math import *
import json
from collections import defaultdict
from scipy import spatial
import heapq
from IPython.core.display import HTML
from IPython.display import display
import webbrowser
import os
# ### 1.1) Get the list of movies
# In this step we get the list of all movies urls in the movies html files. Each member of the group used this code to get the list of movies and then html files.
def get_movieList (path) :
movies = pd.DataFrame(pd.read_html(path + "\\movies1.html")[0]) #put the content of html file in a dataframe and get the first column
movies.drop('Id', inplace=True, axis = 1)
return movies
path = os.getcwd() #The address of directory where Movies.html files exist
movies = get_movieList(path) #this function will give us list of movies urls in the html file of movies which exist in the path address
# ### 1.2) Crawl Wikipedia
# Now, we crawl each wikipedia page to get html files
# +
def save_html(movies) :
for i in range(len(movies)):
try:
response = rq.get(movies.URL[i])
except rq.exceptions.RequestException as e: #if we got blocked by wiki we apply a time sleep
print(e)
time.sleep(20*60 + 30)
response = rq.get(movies.URL[i])
soup = BeautifulSoup(response.text, 'html.parser')
f = open('article_'+str(i)+'.html','w')
f.write(str(soup))
f.close()
time.sleep(random.choice(range(1,6)) #time sleep between each request
save_html(movies)
# -
# ### 1.3) Parse downloaded pages
# In this step, we should parse HTML pages, get the specefic information we want and then save it as TSV files
# After creating the function parse, contained in parser_utils, we just run the parser over the path that contains the complete database of the 30000 articles. The parser elaborates each article (wiki page) with the BeautilfulSoup package, and all the info needed (title, intro, ..., Language, Budget) are extracted from the bs object, and added to a tsv file for each article. The function doesn't return anything in the enviroment, but just creates a tsv file for each article, and stores it in another folder, that we called TSV, in the same path given as input. (full explanation of the parser can be found in parser_utils.py)
# +
import parser_utils
path1 = path + '\\Articles' #The address of directory where all html files exist
parser_utils.parse(path1)
# -
# ## 2) search engine
# ### preprocessing
# #### All the TSV Files were preprocessed by :
# 1) Tokenization
#
# 2) Removing stop words
#
# 3) Removing punctuation
#
# 4) Stemming
#
# 5) Removing [] , ""
#
# +
def clean(text):
stop_words = set(stopwords.words('english'))
stemmer = PorterStemmer()
text = text.lower()
words = word_tokenize(text) #devide the text into substrings
filtered1 = [w for w in words if not w in stop_words] #remove stop words
filtered2 = list(filter(lambda word: word not in string.punctuation, filtered1))
filtered3 = []
for word in filtered2:
try:
filtered3 += re.findall(r'\w+', word)
except:
pass
filtered3 = [stemmer.stem(w) for w in filtered3] #stemming
filtered4 = [c.replace("''", "").replace("``", "") for c in filtered3 ] #removing useless '' and `` characters
filtered4 = [f for f in filtered4 if len(f)>1]
return filtered4
# -
# ### 2.1) Conjunctive query
# #### 2.1.1) creating index
# In this section, we should first create a dictionary with all the words in our documents. The keys of this dictionary are integers(term_ids) and values are words.
# Another dictionary that we create is docwords which points each document to list of all words in that document.
# Another dictionary is tsvs which contains intro and plot section of each document.
# we save these dictionaries as json files to use afterwards in our code
# +
# this function save an object to desired path as a json file
def savetojson(pathfile, obj):
with open(pathfile, "w" ,encoding="utf-8") as out_file:
out_file.write(json.dumps(obj, ensure_ascii = False))
out_file.close()
def get_vocab_index(path) :
allwords = list()
docwords = dict() # point each document to its containing words
tsvs = dict()
vocabulary = dict() # point each term id to a word
for i in range(0,30000):
with open(path+"\\TSV\\article_" + str(i) + ".tsv", encoding = "utf-8") as fd:
rd = csv.reader(fd, delimiter="\t", quotechar='"')
for row in rd:
if row :
tsv = row
text = ' '.join([tsv[1],tsv[2]]) #get intro and plot of each tsv file
tsvs[i] = tsv
cleared = clean(text)
docwords['document_'+str(i)] = cleared
allwords += cleared
allwords = list(set(allwords)) # get the list of unique words
for i in range(len(allwords)):
vocabulary[str(i)] = allwords[i]
savetojson(path+"\\tsvs.json", tsvs)
savetojson(path + "\\WORDS\\DocWords.json", docwords)
savetojson(path + "\\WORDS\\vocabulary.json", vocabulary)
# -
# Now, we should create inverted_index which points each term_id to the documents that contains that word. First we load vocabulary json file that we created in the previous step
# +
def get_inverted_index(path) :
inverted = defaultdict(list)
with open(path + "\\WORDS\\vocabulary.json", encoding = "utf-8") as fd:
vocabulary = json.load(fd)
reverse_voc = {v:k for k,v in vocabulary.items()} # we need to inverse keys and values of our dictionary
# we check for each document and for each word in that doument whether that document exist in inverted dictionary
#or not, and if it didn't exist we add the document number
for doc in docwords.keys():
for word in docwords[doc]:
if not doc in inverted[reverse_voc[word]]:
inverted[reverse_voc[word]].append(doc)
savetojson(path + "\\WORDS\\Inverted_index.json", inverted)
# -
# #### 2.1.2) execute query
# First, we get the query from user and replace each word with the term_id. If the word did not exist in vocabulary dictionary we assign NA to it
# +
def get_query():
query = input("Insert your query: ")
return(clean(query))
def get_query_index(query) :
indexes = []
for i in range(len(query)) :
if query[i] in vocabulary.values() : #if the vocab in query exist in vocabulary dataset
indexes.append(reverse_voc[query[i]]) #add term_id of that vocab to query
else : #if it does not exist in vocabulary we replace it with 'NA'
indexes.append('NA')
return(indexes)
# -
# In this step, we should find the documents that contain all words of the query
def execute_query(query):
if len(query) == 0:
return('Please, insert text in your search')
query = get_query_index(query)
docs = []
for i in query :
if (i == 'NA') :
#if there is a vocab in query that does not exist in vocabulary dataset, there isn't a match and we should terminate the function
return("No match for your query")
else :
docs.append(set(inverted_index[i]))
docs = set.intersection(*docs)
return(docs)
# In this part we create some functions that we need to run and show the results
# +
def Linked_URL(val): #we will use this to make the urls in output clickable
# target _blank to open new window
return '<a target="_blank" href="{}">{}</a>'.format(val, val)
def replacer(val): #This is used to escape the character $ in the output for Intro,
return val.replace('$', '\$') #otherwise it would be interpreted by displayer
def Run_SE1():
query = get_query()
results = []
for file in execute_query(query):
docid = file.split('_')[1]
tsv = newdict[docid]
results.append([docid,tsv[0],tsv[1],Movies[docid]]) #create movies file before
df = pd.DataFrame(results, columns = ['Id','Title', 'Intro', 'Wikipedia Url'])
f = open(path + '\\display.html','w', encoding = 'utf-8')
message = df.style.format({'Wikipedia Url': Linked_URL}).format({'Intro': replacer}).render()
f.write(message)
f.close()
#Change path to reflect file location
filename = path + '\\display.html'
webbrowser.open_new_tab(filename) # for showing the results in the browser
# -
def get_results(query):
results = []
for file in execute_query(query):
docid = file.split('_')[1]
tsv = newdict[docid]
results.append([docid,tsv[0],tsv[1],Movies[docid]]) #create movies file before
result = pd.DataFrame(results, columns = ['Id','Title', 'Intro', 'Wikipedia Url'])
return result
# ### 2.2) Conjunctive query & Ranking score
# In this part we should give scores based on cosine similarity
#
def cosine_similarity(a,b):
cosine_distance = spatial.distance.cosine
return 1 - cosine_distance(a,b)
# #### 2.2.1) Inverted index
# what we need now is to calculate the IDF and TF - IDF, according to the formulas:
# - $TF = \frac{N_{(x,y)}}{N_{(*,y)}}$
# - $IDF = log[1 + (\frac{D}{D_x})]$ <fr>
#
# Where:
# - $N_{(x,y)}$ is the number of times that the word $X$ is in the document $D_y$;
# - $N_{(*,y)}$ is the total number of the words in the document;
# - $D$ is the total number of documents;
# - $D_x$ is the number of documents in which the word $X$ appears at least once.
with open(path + "\\WORDS\\Inverted_index.json", encoding = "utf-8") as fd:
inverted_index = json.load(fd
with open(path + "\\WORDS\\DocWords.json", encoding = "utf-8") as fd:
docwords = json.load(fd)
IDFs = dict()
inv_ind_tfIDF = defaultdict(list)
for term in inverted_index.keys() :
IDFs[term] = log(1+ 30000/len(inverted_index[term])) #first we calculat IDF for each term_id
for doc in inverted_index[term] :
tf = docwords[doc].count(vocabulary[term]) / len(docwords[doc])
tfidf = tf * IDFs[term]
inv_ind_tfIDF[term].append((doc,round(tfidf, 3))
savetojson(path + "\\WORDS\\TfIdf_inv_index.json", inv_ind_tfIDF)
# #### 2.2.2)Execute the query
# We start calculating the tfidf values for words searched in the query, previoysly cleaned and tokenized.
def querytf(query):
qtf = dict()
for word in query :
term_id = reverse_voc[word]
try :
qtf[term_id] += 1/len(query)
except :
qtf[term_id] = 1/len(query)
return(qtf)
# With execute_SE2 we just take take the results from conjunct. query, get their ids, and then create a dictionary (wordtf) that stores the list of tfidf values for words contained in each document of the conjunctive query result.
# +
def execute_SE2(query) :
results = get_results(query)
ids = get_query_index(query)
wordtf = defaultdict(list)
for i in results['Id']:
doc = 'document_'+str(i)
for term in ids:
for docs in inv_ind_tfIDF[term]:
if docs[0] == doc:
wordtf[i].append(docs[1])
return(wordtf)
# -
# Run_SE2 will just take the functions defined previously to build a heap structure based on cosine similarity between query and each document. The result will be the first 10 pages sorted by cosine similarity, displayed in display.html page.
def Run_SE2():
query = get_query()
wordtf = execute_SE2(query)
qtfs = querytf(query)
list_query = list(qtfs.values())
heap = []
for doc in wordtf:
heapq.heappush(heap, (cosine_similarity(list_query, wordtf[doc]), doc))
heap_result = heapq.nlargest(10, heap)
df = dict()
for x,y in heap_result:
df[y] = newdict[y][0:2]
df[y].append(Movies[y])
df[y].append(x)
df = pd.DataFrame.from_dict(df, orient = 'index', columns=['Title', 'Intro', 'Wikipedia Url', 'Score'])
f = open(path + '\\display.html','w', encoding = 'utf-8')
message = df.style.format({'Wikipedia Url': Linked_URL}).format({'Intro': replacer}).render()
f.write(message)
f.close()
filename = path + '\\display.html'
webbrowser.open_new_tab(filename)
# ## Defining a new score
# In this step we should define some variables to calculate the new scores based on them. The variables that we decided to use are : The release Year, length of the movie(Run time), Budget and number of stars as these variables seems to be more important to most of users. First we get some queries from user and based on maximum and minimum value of these varaibles among resulted documents of the first search engine we define a scoring function for each variable that gives a score between 0 and 1. Finally we calculate the mean of these scores and put them in a heap structure to find 10 documents that have most scores
#getting query from user
def get_query_SE3():
query = input("insert your query : ")
query = clean(query)
q = dict()
year = input("Do you want to specify the release year ? [Y/N] : ").lower()
if year == "y" :
year = input("Please, specify the release date : ")
q["year"] = year
else:
q["year"] = 'NA'
Runtime = input("Do you want to specify the length of the movie? [Y/N] : ").lower()
if Runtime == "y" :
Runtime = input("Please, specify the length of the movie : ")
if re.search('\d', Runtime):
q['Runtime'] = Runtime
else:
return 'Please, enter a valid runtime.'
else :
q["Runtime"] = 'NA'
starring = input("Is number of stars an important factor for you? [Y/N] : ").lower()
if starring == "y" :
starring = input("Please, specify if you're looking for a big or small cast [B/S]: ")
q["starring"] = starring
else :
q["starring"] = 'NA'
budget = input("Is movie budget an important factor for you? [Y/N] : ").lower()
if budget == "y" :
q['Budget'] = input("Please, specify the budget of the movie you're looking for : ")
else :
q['Budget'] = 'NA'
return query,q
# Now we should execute our search engine with the query
# +
def search_engine3() :
(query, q) = get_query_SE3()
results = execute_query(query) #running the first search engine to get all query_related documents
# Now we should define variables that we want to use to give a new score
d = defaultdict(dict)
result_variables = dict() # A dictionary that assigns each document to a dictionary of variables in that document
# A dictionary that
for i in results :
docId = i.split("_")[1]
tsv = newdict[docId]
d[i] = dict()
if tsv[6] == 'NA':
d[i]['Starring'] = '-10000'
else:
d[i]['Starring'] = str(len(tsv[6].replace('\n', '').strip(',').split(',,')))
try:
d[i]['Release Year'] = re.search(r'\d{4}', tsv[8]).group(0)
except:
d[i]['Release Year'] = '-10000'
try:
d[i]['Runtime'] = re.search(r'\d+.*',tsv[9]).group(0)
except:
d[i]['Runtime'] = '-10000'
#some movies have running time expressed in reels, and the conversion in minutes is not univoque, so we'll just ignore those info
if re.search(r'min', d[i]['Runtime']):
d[i]['Runtime'] = re.search(r'\d+[\.|\,|:]*\d*', d[i]['Runtime']).group(0)
d[i]['Runtime'] = re.search(r'\d+', d[i]['Runtime']).group(0)
else:
d[i]['Runtime'] = '-10000'
try:
d[i]['Budget'] = re.findall(r'\$.*', tsv[12])[0]
except:
d[i]['Budget'] = '-10000'
if re.search(r'mil', d[i]['Budget']):
d[i]['Budget'] = str(int(float(re.search(r'\d+[\.|\,]*\d*', d[i]['Budget']).group(0).replace(',', '.'))*10**6))
elif re.search(r'\,', d[i]['Budget']) or re.search(r'\.', d[i]['Budget']):
d[i]['Budget'] = re.search(r'(\d+[\,!\.])+\d+', d[i]['Budget']).group(0).replace(',', '').replace('.', '')
result_variables[docId] = d[i]
Runtimes = []
Release_year = []
Starring = []
Budget = []
for i in result_variables.keys() :
i = 'document_'+str(i)
Runtimes.append(int(d[i]["Runtime"]))
Release_year.append(int(d[i]["Release Year"]))
Starring.append(int(d[i]["Starring"]))
Budget.append(int(d[i]["Budget"]))
scores = dict()
for i in result_variables :
# calculating score for Running time
i = 'document_'+ str(i)
minrun = min(Runtimes)
maxrun = max(Runtimes)
if re.search('\d', q['Runtime']):
run_score = exp(-(int(re.search('\d+', q['Runtime']).group(0)) -int(d[i]['Runtime']))**2/100)
else:
run_score = 0
# calculating score for quantitative Release_year query
if re.search('\d', q['year']):
distance = abs(int(d[i]['Release Year']) - int(re.search('\d+',q["year"]).group(0)))
year_score = exp(-distance/10)
else:
year_score = 0
# calculating score for budget
if re.search('\d', q['Budget']):
if re.search(r'mil', q['Budget']):
Budget = int(float(re.search(r'\d+[\.|\,]*\d*', q['Budget']).group(0).replace(',', '.'))*10**6)
elif re.search(r'\,', q['Budget']) or re.search(r'\.', q['Budget']):
Budget = int(re.search(r'(\d+[\,!\.])+\d+', q['Budget']).group(0).replace(',', '').replace('.', ''))
budget_score = exp(-abs(int(Budget) - int(d[i]['Budget'])) / 10**5)
else:
budget_score = 0
# calculating score for starring
maxstar = max(Starring)
minstar = min(Starring)
if q['starring'] == 'B':
starring_score = (maxstar - int(d[i]['Starring']))/(maxstar-minstar)
elif q['starring'] == 'S':
starring_score = (int(d[i]['Starring']) - minstar)/(maxstar-minstar)
else:
starring_score = 0
mean_score = 1/4 * (run_score + year_score + budget_score + starring_score)
scores[i] = (mean_score, i)
# use heap structure to sfine the 10 best score
heap = []
for doc in scores:
heapq.heappush(heap, scores[doc])
heap_result = heapq.nlargest(10, heap)
df = dict()
for x,z in heap_result:
y = z.split('_')[1]
df[y] = newdict[y][0:2]
df[y].append(Movies[y])
df[y].append(x)
df = pd.DataFrame.from_dict(df, orient = 'index', columns=['Title', 'Intro', 'Wikipedia UrlL', 'Score'])
f = open(path + '\\display.html','w', encoding = 'utf-8')
message = df.style.format({'Wikipedia Url': Linked_URL}).format({'Intro': replacer}).render()
f.write(message)
f.close()
#Change path to reflect file location
filename = path + '\\display.html'
webbrowser.open_new_tab(filename)
# -
# # Bonus - Create a co-stardom network
import networkx as nx
import matplotlib.pyplot as plt
from Functions import *
import seaborn as sns
# First we should insert a query and get the results from the third search engine
(query, q) = get_query_SE3()
query, q = (['orc', 'elv'],
{'year': '1995', 'Runtime': 'NA', 'starring': 'NA', 'Budget': '20 milions'})
result = search_engine3(query, q)
result.index
# Now we need to finde nodes and edges. Nodes of this network, are all star actors in the documents that we found in the previous step, and edges are any duel combination of these actors that exist in at least 2 documents.
newdict['14018'] #from the functions import, newdict is the dictionary of all tsv's
stars = [] #set of star actors in 10 first result of search engine
for ind in result.index :
tsv = newdict[str(ind)]
for i in tsv[6].replace('\n', '').strip(',').split(',,') : #stars of a movie
if i not in stars :
stars.append(i)
stars
# Now we have nodes of our network. In order to create edges, first we shoud make a list of all possible duel combination
duel_stars = []
for i in range(0 , len(stars)) :
for j in range(i+1,len(stars)) :
duel_stars.append((stars[i],stars[j])) #make a nested list with all possible duel combination
# In this step we should check which duel combinations that we created in previous step, exist in more than 2 documents
# +
edge = []
nodes = set()
for x,y in duel_stars : #for each dual combination
counter = 0
for j in result.index: # search in the whole dataset
tsv = newdict[str(j)]
starring = tsv[6].replace('\n', '').strip(',').split(',,')
if len(set((x,y)).intersection(set(starring))) == 2:
counter += 1
if counter == 2:
edge.append((x,y))
nodes.add(x)
nodes.add(y)
break
# -
nodes
# Now we have Nodes and Edges. It's time to create a network
# # Creating co-stardom network
# +
G = nx.Graph()
G.add_nodes_from(stars)
G.add_edges_from(edge)
nx.draw(G, with_labels = 5)
plt.savefig("co-stardom.png", format = 'png')
plt.show()
# -
# To make a better visualization, we position unlinked nodes aside, and the linked nodes in the center, so that the names (most of them) are readable.
# +
G = nx.Graph()
G.add_nodes_from(stars)
G.add_edges_from(edge)
pos = dict()
c = 0
for i in set(stars)-nodes:
c+=1
if c%2 ==0:
m = 0
else:
m = 1
pos[i] = (m*250, c*15)
for i in nodes:
pos[i] = (random.choice(range(50,200)), random.choice(range(1,c*15)))
nx.draw(G, pos, with_labels=True, font_color = 'red', font_size = 13, font_weight = 'bold')
plt.xlim([-70, 320])
plt.savefig("co-stardom.png")
# -
# Since the movies resulted from the query were few, and most of them were from the Lord of the Rings trilogy, we have a network in which all of the actors from the trilogy are linked, and actors from the remaining movies that didn't act in Lord of the rings are unlinked, because they acted together only once.
# # Analysis
# Let's do some analysis on this network. We can make a density distribution histogram which shows the number of connections for each node
all_degrees =[val for (node, val) in G.degree()]
unique_degrees = list(set(all_degrees))
count = []
for i in unique_degrees :
x = all_degrees.count(i)
count.append(x)
plt.plot(unique_degrees , count, "yo-")
plt.xlabel("Degree")
plt.ylabel("Number of nodes")
plt.show()
# We can calculate degree centality for each node to see which actors where more involved with other actors(most influential)
nx.degree_centrality(G)
sorted(nx.degree_centrality(G), key = nx.degree_centrality(G).get , reverse = True)
| final main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LASI2021 Machine Learning Workshop
# ## K Nearest Neighbor (KNN)
# ## 0. Import Libraries
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
# -
# ## 1. Import Dataset
df = pd.read_csv("data/knndata.csv")
df.head(10)
df.describe()
# ## 2. Exploratory Data Analysis
df.corr()
X1 = df.iloc[:,0].values
X2 = df.iloc[:,1].values
X3 = df.iloc[:,2].values
y = df.iloc[:,3].values
rgb = np.array(['g','b'])
fig = plt.figure(figsize=(18,15))
ax = fig.add_subplot(111,projection='3d')
ax.scatter(X1,X2,X3,color=rgb[y])
sns.pairplot(df,markers="+",height=2)
# ## 3. Run KNN Model
X = df.drop(['label'],axis=1)
y = df['label']
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
# ## 4. Evaluate Classifier
y_pred = knn.predict(X)
print(metrics.accuracy_score(y,y_pred))
print(metrics.confusion_matrix(y,y_pred))
print(metrics.classification_report(y,y_pred))
# ## 5. Predict New Points
new_points = [[99,9,3.9],[10,1,1.1],[22,2,2]]
predict = knn.predict(new_points)
predict
| MachineLearning/KNearestNeighbor/.ipynb_checkpoints/KNearestNeighbor-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# # Is Julia fast?
# Julia isn't fast *per se*.
#
# One can write terribly slow code in any language, including Julia.
#
# So let's ask a different question.
# # *Can* Julia be fast?
# ### Microbenchmarks
# <img src="benchmarks.svg" alt="drawing" width="800"/>
# ### More realistic case: Vandermonde matrix
# (modified from [Steve's Julia intro](https://web.mit.edu/18.06/www/Fall17/1806/julia/Julia-intro.pdf))
# [Vandermonde matrix:](https://en.wikipedia.org/wiki/Vandermonde_matrix)
# \begin{align}V=\begin{bmatrix}1&\alpha _{1}&\alpha _{1}^{2}&\dots &\alpha _{1}^{n-1}\\1&\alpha _{2}&\alpha _{2}^{2}&\dots &\alpha _{2}^{n-1}\\1&\alpha _{3}&\alpha _{3}^{2}&\dots &\alpha _{3}^{n-1}\\\vdots &\vdots &\vdots &\ddots &\vdots \\1&\alpha _{m}&\alpha _{m}^{2}&\dots &\alpha _{m}^{n-1}\end{bmatrix}\end{align}
using PyCall
np = pyimport("numpy")
np.vander(1:5, increasing=true)
# The source code for this function is [here](https://github.com/numpy/numpy/blob/v1.16.1/numpy/lib/twodim_base.py#L475-L563). It calls `np.multiply.accumulate` which is implemented in C [here](https://github.com/numpy/numpy/blob/deea4983aedfa96905bbaee64e3d1de84144303f/numpy/core/src/umath/ufunc_object.c#L3678). However, this code doesn't actually perform the computation, it basically only checks types and stuff. The actual kernel that gets called is [here](https://github.com/numpy/numpy/blob/deea4983aedfa96905bbaee64e3d1de84144303f/numpy/core/src/umath/loops.c.src#L1742). This isn't even C code but a template for C code which is used to generate type specific kernels.
#
# Overall, this setup only supports a limited set of types, like `Float64`, `Float32`, and so forth.
# Here is a simple Julia implementation
function vander(x::AbstractVector{T}, n=length(x)) where T
m = length(x)
V = Matrix{T}(undef, m, n)
for j = 1:m
V[j,1] = one(x[j])
end
for i= 2:n
for j = 1:m
V[j,i] = x[j] * V[j,i-1]
end
end
return V
end
vander(1:5)
# #### A quick speed comparison
# <details>
# <summary>Show Code</summary>
# <br>
#
# ```julia
# using BenchmarkTools, Plots
# ns = exp10.(range(1, 4, length=30));
#
# tnp = Float64[]
# tjl = Float64[]
# for n in ns
# x = 1:n |> collect
# push!(tnp, @belapsed np.vander(\$x) samples=3 evals=1)
# push!(tjl, @belapsed vander(\$x) samples=3 evals=1)
# end
# plot(ns, tnp./tjl, m=:circle, xscale=:log10, xlab="matrix size", ylab="NumPy time / Julia time", legend=:false)
# ```
# </details>
# <img src="vandermonde.svg" alt="drawing" width="600"/>
# Note that the clean and concise Julia implementation is **beating numpy's C implementation for small matrices** and is **on-par for large matrix sizes**.
#
# At the same time, the Julia code is *generic* and works for arbitrary types!
vander(Int32[4, 8, 16, 32])
# It even works for non-numerical types. The only requirement is that the type has a *one* (identity element) and a multiplication operation defined.
vander(["this", "is", "a", "test"])
# Here, `one(String) == ""` since the empty string is the identity under multiplication (string concatenation).
# # How can Julia be fast?
# <p><img src="from_source_to_native.png" alt="drawing" width="800"/></p>
#
# **AST = Abstract Syntax Tree**
#
# **SSA = Static Single Assignment**
#
# **[LLVM](https://de.wikipedia.org/wiki/LLVM) = Low Level Virtual Machine**
# ### Specialization and code inspection
# **Julia specializes on the types of function arguments.**
#
# When a function is called for the first time, Julia compiles efficient machine code for the given input types.
#
# If it is called again, the already existing machine code is reused, until we call the function with different input types.
func(x,y) = x^2 + y
@time func(1,2)
@time func(1,2)
# **First call:** compilation + running the code
#
# **Second call:** running the code
@time func(1,2)
# If the input types change, Julia compiles a new specialization of the function.
@time func(1.3,4.8)
@time func(1.3,4.8)
# We now have two efficient codes, one for all `Int64` inputs and another one for all `Float64` arguments, in the cache.
# ### *But I really want to see what happens!*
# We can inspect the code at all transformation stages with a bunch of macros:
#
# * The AST after parsing (**`@macroexpand`**)
# * The AST after lowering (**`@code_typed`**, **`@code_warntype`**)
# * The AST after type inference and optimization (**`@code_lowered`**)
# * The LLVM IR (**`@code_llvm`**)
# * The assembly machine code (**`@code_native`**)
@code_typed func(1,2)
@code_lowered func(1,2)
@code_llvm func(1,2)
# We can remove the comments (lines starting with `;` using `debuginfo=:none`).
@code_llvm debuginfo=:none func(1,2)
@code_native debuginfo=:none func(1,2)
# Let's compare this to `Float64` input.
@code_native debuginfo=:none func(1.2,2.9)
# ## How important is code specialization?
# Let's try to estimate the performance gain by specialization.
#
# We wrap our numbers into a custom type which internally stores them as `Any` to prevent specialization.
#
# (This is qualitatively comparable to what Python does.)
# +
struct Anything
value::Any
end
operation(x::Number) = x^2 + sqrt(x)
operation(x::Anything) = x.value^2 + sqrt(x.value)
# +
using BenchmarkTools
@btime operation(2);
@btime operation(2.0);
x = Anything(2.0)
@btime operation($x);
# -
# **That's about an 40 times slowdown!**
@code_native debuginfo=:none operation(2.0)
@code_native debuginfo=:none operation(x)
# # Make run-time the fun time.
# In scientific computations, we typically run a piece of code many times over and over again. Think of a Monte Carlo simulation, for example, where we perform the update and the Metropolis check millions of times.
#
# **Therefore, we want our run-time to be as short as possible.**
#
# On the other hand, for a given set of input arguments, Julia compiles the piece of code only once, as we have seen above. The time it takes to compile our code is almost always negligible compared to the duration of the full computation.
#
# A general strategy is therefore to move parts of the computation to compile-time.
# Since Julia specializes on types, at compile-time **only type information is available to the compiler.**
# +
f1(x::Int) = x + 1
f2(x::Int) = x + 2
function f_slow(x::Int, p::Bool)
if p # check depends on the value of p
return f1(x)
else
return f2(x)
end
end
# -
@code_llvm debuginfo=:none f_slow(1, true)
# We can eliminate the if branch by moving the condition check to the type domain. This way, it **will only be evaluated once at compile-time.**
# +
abstract type Boolean end
struct True <: Boolean end # type domain true
struct False <: Boolean end # type domain false
function f_fast(x::Int, p::Boolean)
if typeof(p) == True # check solely based on the type of p
return f1(x)
else
return f2(x)
end
end
# -
@code_llvm debuginfo=:none f_fast(1, True())
# # Are explicit type annotations necessary? (like in C or Fortran)
# Note that Julia's type inference is powerful. Specifying types **is not** necessary for best performance!
# +
function my_function(x)
y = rand()
z = rand()
x+y+z
end
function my_function_typed(x::Int)::Float64
y::Float64 = rand()
z::Float64 = rand()
x+y+z
end
# -
@btime my_function(10);
@btime my_function_typed(10);
# However, annotating types explicitly can serve a purpose.
#
# * **Define a user interface/type filter** (will throw error if incompatible type is given)
# * Enforce conversions
# * Rarely, help the compiler infer types in tricky situations
# # Core messages of this Notebook
#
# * Julia **can be fast.**
# * **A function is compiled when called for the first time** with a given set of argument types.
# * The are **multiple compilation steps** all of which can be inspected through macros like `@code_warntype`.
# * **Code specialization** based on the types of all of the input arguments is important for speed.
# * Calculations can be moved to compile-time to make run-time faster.
# * In virtually all cases, **explicit type annotations are irrelevant for performance**.
# * Type annotations in function signatures define a **type filter/user interface**.
| Day1/3_specialization.ipynb |