text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# Simple Visulization
```
import pandas as pd
import os
from datetime import datetime
import matplotlib.dates as mdates
import pickle
import json
# import pygmt
# dir_name = "Ridgecrest_demo"
dir_name = "Ridgecrest_oneweek"
# dir_name = "SaltonSea"
# dir_name = "SanSimeon"
# dir_name = "Ridgecrest"
# dir_name = "Italy"
# dir_name = "Hawaii"
# dir_name = "PuertoRico"
# dir_name = "Tahoe"
if not os.path.exists(dir_name):
os.mkdir(dir_name)
root_dir = lambda x: os.path.join(dir_name, x)
result_label="QuakeFlow"
catalog_label="Standard"
# catalog_label="SCSN"
# catalog_label="IRIS"
catalog_file = "catalog.csv"
picks_file = "picks.csv"
with open(root_dir("config.json"), "r") as fp:
config = json.load(fp)
stations = pd.read_csv(root_dir("stations.csv"), delimiter="\t")
events = pd.read_csv(root_dir("events.csv"), delimiter="\t")
events["time"] = events["time"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%f"))
catalog = pd.read_csv(root_dir(catalog_file), delimiter="\t")
catalog["time"] = catalog["time"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%f"))
catalog["covariance"] = catalog["covariance"].apply(lambda x: [float(i) for i in x.split(",")])
marker_size = 0.1
bins = min(len(catalog["time"])//50 + 10, 50)
# region = config["xlim_degree"] + config["ylim_degree"]
# fig = pygmt.Figure()
# fig.basemap(region=region, projection="M8i", frame=True)
# fig.grdimage("@earth_relief_15s", cmap="topo", shading=True)
# # fig.grdimage("@earth_relief_15s", cmap="sealand", shading=True)
# fig.plot(x=stations["longitude"], y=stations["latitude"], style="t0.5", color="blue", pen="black", label="Station")
# fig.savefig(root_dir(f"stations-{dir_name.lower()}.png"))
# fig.savefig(root_dir(f"stations-{dir_name.lower()}.pdf"))
# fig.show()
plt.figure()
plt.hist(catalog["time"], range=(config["starttime"], config["endtime"]), bins=bins, edgecolor="k", alpha=1.0, linewidth=0.5, label=f"{result_label}: {len(catalog['time'])}")
plt.hist(events["time"], range=(config["starttime"], config["endtime"]), bins=bins, edgecolor="k", alpha=1.0, linewidth=0.5, label=f"{catalog_label}: {len(events['time'])}")
plt.ylabel("Frequency")
plt.xlabel("Date")
# plt.yscale("log")
plt.gca().autoscale(enable=True, axis='x', tight=True)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d:%H'))
plt.gcf().autofmt_xdate()
plt.legend()
plt.savefig(root_dir("earthquake_number.png"), bbox_inches="tight", dpi=300)
plt.savefig(root_dir("earthquake_number.pdf"), bbox_inches="tight")
plt.show()
fig = plt.figure(figsize=plt.rcParams["figure.figsize"]*np.array([1.5,1]))
box = dict(boxstyle='round', facecolor='white', alpha=1)
text_loc = [0.05, 0.92]
grd = fig.add_gridspec(ncols=2, nrows=2, width_ratios=[1.5, 1], height_ratios=[1,1])
fig.add_subplot(grd[:, 0])
plt.plot(stations["longitude"], stations["latitude"], 'k^', markersize=3, alpha=0.5, label="Stations")
plt.plot(catalog["longitude"], catalog["latitude"], '.',markersize=marker_size, alpha=1.0, rasterized=True)
plt.plot(events["longitude"], events["latitude"], '.', markersize=marker_size, alpha=0.6, rasterized=True)
plt.axis("scaled")
plt.xlim(np.array(config["xlim_degree"]))#+np.array([0.2,-0.27]))
plt.ylim(np.array(config["ylim_degree"]))#+np.array([0.2,-0.27]))
plt.ylabel("Latitude")
plt.xlabel("Longitude")
plt.gca().set_prop_cycle(None)
plt.plot(config["xlim_degree"][0]-10, config["ylim_degree"][0]-10, '.', markersize=10, label=f"{result_label}: {len(catalog['time'])}")
plt.plot(config["xlim_degree"][0]-10, config["ylim_degree"][0]-10, '.', markersize=10, label=f"{catalog_label}: {len(events['time'])}")
plt.legend(loc="lower right")
plt.text(text_loc[0], text_loc[1], '(i)', horizontalalignment='left', verticalalignment="top",
transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box)
fig.add_subplot(grd[0, 1])
plt.plot(catalog["longitude"], catalog["depth(m)"]/1e3, '.', markersize=marker_size, alpha=1.0, rasterized=True)
plt.plot(events["longitude"], events["depth(m)"]/1e3, '.', markersize=marker_size, alpha=0.6, rasterized=True)
# plt.axis("scaled")
plt.xlim(np.array(config["xlim_degree"]))#+np.array([0.2,-0.27]))
# plt.ylim([0,21])
plt.ylim(bottom=0, top=41)
plt.gca().invert_yaxis()
plt.xlabel("Longitude")
plt.ylabel("Depth (km)")
plt.gca().set_prop_cycle(None)
plt.plot(config["xlim_degree"][0]-10, 31, '.', markersize=10, label=f"{result_label}")
plt.plot(31, 31, '.', markersize=10, label=f"{catalog_label}")
plt.legend(loc="lower right")
plt.text(text_loc[0], text_loc[1], '(ii)', horizontalalignment='left', verticalalignment="top",
transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box)
fig.add_subplot(grd[1, 1])
plt.plot(catalog["latitude"], catalog["depth(m)"]/1e3, '.', markersize=marker_size, alpha=1.0, rasterized=True)
plt.plot(events["latitude"], events["depth(m)"]/1e3, '.', markersize=marker_size, alpha=0.6, rasterized=True)
# plt.axis("scaled")
plt.xlim(np.array(config["ylim_degree"]))#+np.array([0.2,-0.27]))
# plt.ylim([0,21])
plt.ylim(bottom=0, top=41)
plt.gca().invert_yaxis()
plt.xlabel("Latitude")
plt.ylabel("Depth (km)")
plt.gca().set_prop_cycle(None)
plt.plot(config["ylim_degree"][0]-10, 31, '.', markersize=10, label=f"{result_label}")
plt.plot(31, 31, '.', markersize=10, label=f"{catalog_label}")
plt.legend(loc="lower left")
plt.tight_layout()
plt.text(text_loc[0], text_loc[1], '(iii)', horizontalalignment='left', verticalalignment="top",
transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box)
plt.savefig(root_dir("earthquake_location.png"), bbox_inches="tight", dpi=300)
plt.savefig(root_dir("earthquake_location.pdf"), bbox_inches="tight", dpi=300)
plt.show()
# catalog["magnitude"] += np.log(np.sqrt(3))
plt.figure()
xrange = (-1., max(events["magnitude"].max(), catalog["magnitude"].max()))
# xrange = (-1., events["magnitude"].max())
plt.hist(catalog["magnitude"], range=xrange, bins=bins, alpha=1.0, edgecolor="k", linewidth=0.5, label=f"{result_label}: {len(catalog['magnitude'])}")
plt.hist(events["magnitude"], range=xrange, bins=bins, alpha=0.6, edgecolor="k", linewidth=0.5, label=f"{catalog_label}: {len(events['magnitude'])}")
plt.legend()
plt.xlim(xrange)
plt.xlabel("Magnitude")
plt.ylabel("Frequency")
plt.gca().set_yscale('log')
plt.savefig(root_dir("earthquake_magnitude_frequency.png"), bbox_inches="tight", dpi=300)
plt.savefig(root_dir("earthquake_magnitude_frequency.pdf"), bbox_inches="tight")
plt.show()
plt.figure()
plt.plot(catalog["time"], catalog["magnitude"], '.', markersize=marker_size+1.5, alpha=1.0, rasterized=True)
plt.plot(events["time"], events["magnitude"], '.', markersize=marker_size+1.5, alpha=0.8, rasterized=True)
plt.xlim(config["starttime"], config["endtime"])
# plt.ylim(top=events["magnitude"].max())
ylim = plt.ylim(bottom=-1)
xlim = plt.xlim()
plt.ylabel("Magnitude")
plt.xlabel("Date")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d:%H'))
plt.gcf().autofmt_xdate()
plt.gca().set_prop_cycle(None)
plt.plot(datetime.fromisoformat(config["starttime"]), -10, '.', markersize=15, alpha=1.0, label=f"{result_label}: {len(catalog['magnitude'])}")
plt.plot(datetime.fromisoformat(config["starttime"]), -10, '.', markersize=15, alpha=1.0, label=f"{catalog_label}: {len(events['magnitude'])}")
plt.legend()
plt.xlim(xlim)
plt.ylim(ylim)
# plt.grid()
plt.savefig(root_dir("earthquake_magnitude_time.png"), bbox_inches="tight", dpi=300)
plt.savefig(root_dir("earthquake_magnitude_time.pdf"), bbox_inches="tight", dpi=300)
plt.show();
covariance = np.array(catalog["covariance"].to_list())
fig = plt.figure(figsize=plt.rcParams["figure.figsize"]*np.array([0.8,1.1]))
box = dict(boxstyle='round', facecolor='white', alpha=1)
text_loc = [0.05, 0.90]
plt.subplot(311)
plt.plot(catalog["time"], covariance[:,0], '.', markersize=marker_size, label="Travel-time", rasterized=True)
plt.ylim([0, 3])
plt.ylabel(r"$\Sigma_{11}$ (s)$^2$")
plt.legend(loc="upper right")
plt.text(text_loc[0], text_loc[1], '(i)', horizontalalignment='left', verticalalignment="top",
transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box)
plt.subplot(312)
plt.plot(catalog["time"], covariance[:,1], '.', markersize=marker_size, label="Amplitude", rasterized=True)
plt.ylim([0, 1])
plt.ylabel(r"$\Sigma_{22}$ ($\log10$ m/s)$^2$")
plt.legend(loc="upper right")
plt.text(text_loc[0], text_loc[1], '(ii)', horizontalalignment='left', verticalalignment="top",
transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box)
plt.subplot(313)
plt.plot(catalog["time"], covariance[:,2], '.', markersize=marker_size, label="Travel-time vs. Amplitude", rasterized=True)
plt.ylabel(r"$\Sigma_{12}$")
plt.ylim([-0.5, 0.7])
plt.legend(loc="upper right")
plt.text(text_loc[0], text_loc[1], '(iii)', horizontalalignment='left', verticalalignment="top",
transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d:%H'))
plt.gcf().autofmt_xdate()
# plt.suptitle(r"Covariance Matrix ($\Sigma$) Coefficients")
plt.tight_layout()
plt.gcf().align_labels()
plt.savefig(root_dir("covariance.png"), bbox_inches="tight", dpi=300)
plt.savefig(root_dir("covariance.pdf"), bbox_inches="tight")
plt.show();
```
| github_jupyter |
# Comparision with other approaches
The aim of this experiment is to compare the Deep Learning model with other Conventional ML approaches to the same problem of clickbait detection.
```
import sys
import string
import re
import numpy as np
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from keras.models import load_model
from keras.preprocessing import sequence
genuine = open("../data/genuine.preprocessed.txt").read().split("\n")
clickbait = open("../data/clickbait.preprocessed.txt").read().split("\n")
print "Clickbait: "
for each in clickbait[:5]:
print each
print "-" * 50
print "Genuine: "
for each in genuine[:5]:
print each
data = clickbait + genuine
labels = len(genuine) * [0] + len(clickbait) * [1]
clickbait_valid = open("../data/clickbait.valid.txt").read().split("\n")
genuine_valid = open("../data/genuine.valid.txt").read().split("\n")
print "Clickbait: "
for each in clickbait_valid[:5]:
print each
print "-" * 50
print "Genuine: "
for each in genuine_valid[:5]:
print each
valid_data = clickbait_valid + genuine_valid
vocabulary = open("../data/vocabulary.txt").read().split("\n")
inverse_vocabulary = dict((word, i) for i, word in enumerate(vocabulary))
valid_data = [" ".join([w if w in vocabulary else "<UNK>" for w in sent.split()]) for sent in valid_data]
valid_labels = len(clickbait_valid) * [1] + len(genuine_valid) * [0]
svm_clf = Pipeline([("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", SVC())])
svm_clf.fit(data, labels);
UNK = "<UNK>"
PAD = "<PAD>"
MATCH_MULTIPLE_SPACES = re.compile("\ {2,}")
SEQUENCE_LENGTH = 20
def words_to_indices(words):
return [inverse_vocabulary.get(word, inverse_vocabulary[UNK]) for word in words]
def clean(text):
for punctuation in string.punctuation:
text = text.replace(punctuation, " " + punctuation + " ")
for i in range(10):
text = text.replace(str(i), " " + str(i) + " ")
text = MATCH_MULTIPLE_SPACES.sub(" ", text)
return text
model = load_model("../models/detector.h5")
inputs = sequence.pad_sequences([words_to_indices(clean(sent.lower()).split()) for sent in valid_data], maxlen=SEQUENCE_LENGTH)
predictions = model.predict(inputs)
predictions = predictions.flatten() > .5
print ("SVM")
print (metrics.classification_report(valid_labels, svm_clf.predict(valid_data)))
print "-" * 50
print ("Convolutional Neural Network")
print (metrics.classification_report(valid_labels, predictions))
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib.colors import ListedColormap
#from sklearn import neighbors, datasets
#from sklearn.cluster import DBSCAN
#from sklearn.preprocessing import StandardScaler
#import geopandas as gf
#from shapely.geometry import Point
train_all_first = pd.read_pickle("../data/processed_all/train_all_first.pickle")
train_all_first
beijing_districts = pd.read_csv("../data/external/districts/beijing_districts.csv")
beijing_districts
# Plot Beijing Districts and Origin
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(train_all_first.o_lat, train_all_first.o_long, color="green", alpha=0.5)
ax.scatter(train_all_first.d_lat, train_all_first.d_long, color="yellow", alpha=0.5)
ax.scatter(beijing_districts.o_lat, beijing_districts.o_long, color="red", alpha=0.5)
# Euclidean Distance Caculator
def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
# Getting coordinates of Starting Points
f1 = train_all_first['o_lat'].values
f2 = train_all_first['o_long'].values
P = np.array(list(zip(f1, f2)))
len(P)
# Getting coordinates of Starting Points
g1 = train_all_first['d_lat'].values
g2 = train_all_first['d_long'].values
Q = np.array(list(zip(g1, g2)))
len(Q)
# Number of clusters
k = 16
# Setting Centroids
x1 = beijing_districts['o_lat'].values
y1 = beijing_districts['o_long'].values
C = np.array(list(zip(x1, y1)))
len(C)
C
x = []
for each in C:
x = each
print(f'x = {x}')
train_all_first_districts = train_all_first
index_o_lat = train_all_first.columns.get_loc("o_lat")
index_o_lat
k = index_o_lat + 1
train_all_first_districts.insert(k, 'o_district', np.nan, True)
train_all_first_districts
index_d_lat = train_all_first_districts.columns.get_loc("d_lat")
index_d_lat
m = index_d_lat + 1
train_all_first_districts.insert(m, 'd_district', np.nan, True)
list(train_all_first_districts.columns)
train_all_first_districts
train_all_first_districts.loc[train_all_first_districts.index == 0, 'o_district'] = 1.0
train_all_first_districts
train_all_first_districts.loc[train_all_first_districts.index == 0, 'o_district'] = 'district_' + str(1.0)
train_all_first_districts
# Cluster array filled with 0
o_clusters = np.zeros(len(P))
# Assigning each value to its closest cluster
for p in range(len(P)):
if p % 100 == 0:
print("Processing row {}".format(str(p)), end="\r")
distances = dist(P[p], C)
o_cluster = np.argmin(distances)
o_clusters[p] = o_cluster
train_all_first_districts.loc[train_all_first_districts.index == p, 'o_district'] = 'o_district_' + str(o_cluster)
print('Assign origins to districts successful')
# Cluster array filled with 0
d_clusters = np.zeros(len(Q))
# Assigning each value to its closest cluster
for q in range(len(Q)):
if q % 100 == 0:
print("Processing row {}".format(str(q)), end="\r")
distances = dist(Q[q], C)
d_cluster = np.argmin(distances)
d_clusters[q] = d_cluster
train_all_first_districts.loc[train_all_first_districts.index == q, 'd_district'] = 'd_district_' + str(d_cluster)
print('Assign destinations to districts successful')
train_all_first_districts
train_all_first_districts.to_pickle("../data/external/districts/assign_districts.pickle")
train_all_first_districts[['o_district', 'd_district']]
df_1 = train_all_first_districts.join(pd.get_dummies(train_all_first_districts["o_district"]))
list(df_1.columns)
df_1
train_all_first_districts = df_1.join(pd.get_dummies(df_1["d_district"]))
list(train_all_first_districts.columns)
train_all_first_districts
train_all_first_districts.to_pickle("../data/external/districts/train_all_first_districts.pickle")
x = df_2[df_2.d_district_9 == 1]
x
```
| github_jupyter |
# Day 9, 2d & 3d movies!
```
# import our usual things
import pandas as pd
import matplotlib.pyplot as plt
import ipywidgets
import numpy as np
planets = pd.read_csv('https://jnaiman.github.io/csci-p-14110_su2020/lesson08/planets_2020.06.22_10.10.17.csv',
sep=",", comment="#")
planets
plt.hist(planets['pl_orbeccen'])
plt.show()
from hermite_library import read_hermite_solution_from_file
planet_file = 'data/Kepler-11-savedSim.txt'
#planet_file = 'Kepler-11-savedSim.txt' # if all .txt files are in the same directory as my notebook file
t_h, E_h, r_h, v_h = read_hermite_solution_from_file(planet_file)
#time, energy, postion, velocity
r_h.shape
```
The shape of r_h (position) array is: `r_h[# of planets+host star, x/y/z - 3D position, time steps]`
```
r_h[0,:,:].shape # all positions, all times of the first planet
# x position, all times for the 2nd body in this system
r_h[1,0,:].shape
```
First thing we are going to do is plot the orbits statically (not a movie).
2D plot of x/y positions over ALL timesteps for ALL of the planets.
```
r_h.shape[0]
fig, ax = plt.subplots(1,1,figsize=(4,4))
# want to loop over all bodies in this simulation and plot their TOTAL orbits -- all times
# x/y positions only
# r_h[# bodies, x/y/z 3D positions, all time steps]
for i in range(r_h.shape[0]): # looping over all bodies
# now we want to plot (for given body) the x/y positions
# over ALL times
# r_h[ith planet, x position, all times], r_h[ith planet, y position, all times]
ax.plot(r_h[i,0,:], r_h[i,1,:])
ax.set_xlabel('x in AU')
ax.set_ylabel('y in AU')
ax.set_xlim(-0.4, 0.4)
ax.set_ylim(-0.4, 0.4)
plt.show()
# X VS. Z plot
fig, ax = plt.subplots(1,1,figsize=(4,4))
# want to loop over all bodies in this simulation and plot their TOTAL orbits -- all times
# x/y positions only
# r_h[# bodies, x/y/z 3D positions, all time steps]
for i in range(r_h.shape[0]): # looping over all bodies
# now we want to plot (for given body) the x/z positions
# over ALL times
# r_h[ith planet, x position, all times], r_h[ith planet, z position, all times]
ax.plot(r_h[i,0,:], r_h[i,2,:])
ax.set_xlabel('x in AU')
ax.set_ylabel('z in AU')
plt.show()
# Y vs. Z.
fig, ax = plt.subplots(1,1,figsize=(4,4))
# want to loop over all bodies in this simulation and plot their TOTAL orbits -- all times
# x/y positions only
# r_h[# bodies, x/y/z 3D positions, all time steps]
for i in range(r_h.shape[0]): # looping over all bodies
# now we want to plot (for given body) the x/z positions
# over ALL times
# r_h[ith planet, y position, all times], r_h[ith planet, z position, all times]
ax.plot(r_h[i,1,:], r_h[i,2,:])
ax.set_xlabel('y in AU')
ax.set_ylabel('z in AU')
plt.show()
```
## Attempting! to make and save a 2D movie
```
from animations_library import plot_animations
```
Note: to use this animation library we have to use the `matplotlib.animation` sub-library.
```
from matplotlib import animation
animation
# create a figure objection (but don't plot anything on it)
#fig, ax = plt.subplots(1,1, figsize=(5,5))
```
Before creating our animation on this figure object, we might want to make it a little more efficient by downsampling in time.
```
stepSize = 50 # I'm going to create a image every 50th timestep out of the 8800
# downsampled position vector
r = r_h[:,:,0:-1:stepSize] # technically this goes up until the 2nd to last timestep
# can also do: r_h[:,:, 0::stepSize]
r_h.shape, r.shape
t_h.shape
t_h
# downsample in time the time variable
t = t_h[0:-1:stepSize]
t_h.shape, t.shape
# use the downsampled versions!!
fig, ax = plt.subplots(1,1,figsize=(4,4))
# want to loop over all bodies in this simulation and plot their TOTAL orbits -- all times
# x/y positions only
# r_h[# bodies, x/y/z 3D positions, all time steps]
for i in range(r.shape[0]): # looping over all bodies
# now we want to plot (for given body) the x/y positions
# over ALL times
# r_h[ith planet, x position, all times], r_h[ith planet, y position, all times]
ax.plot(r[i,0,:], r[i,1,:])
ax.set_xlabel('x in AU')
ax.set_ylabel('y in AU')
ax.set_xlim(-0.4, 0.4)
ax.set_ylim(-0.4, 0.4)
plt.show()
```
After that bit of downsampling tangent, let's actually make that animation!
```
# create a figure objection (but don't plot anything on it)
fig, ax = plt.subplots(1,1, figsize=(5,5))
# 2nd step is to use plot_animations to setup the animation stuff we need
init, animate, nFrames = plot_animations(fig, ax, t, r) # t is need here because Jill wrote the library funny!
# 3rd step is to use matplotlib to put animation files together
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=nFrames, interval=20, blit=True)
anim.save?
anim.save('anim_trial.mp4')
# if the above didn't work, you can try this:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
anim.save('anim_trial2.mp4', writer=writer)
from IPython.display import Video
Video("anim_trial.mp4", width=400)
```
## Stuff in 3D!
```
from hermite_library import read_hermite_solution_from_file
planet_file = 'data/Kepler-11-savedSim.txt'
#planet_file = 'Kepler-11-savedSim.txt' # if all .txt files are in the same directory as my notebook file
t_h, E_h, r_h, v_h = read_hermite_solution_from_file(planet_file)
# import numpy as np
# import ipyvolume as ipv
# V = np.zeros((128,128,128)) # our 3d array
# # outer box
# V[30:-30,30:-30,30:-30] = 0.75
# V[35:-35,35:-35,35:-35] = 0.0
# # inner box
# V[50:-50,50:-50,50:-50] = 0.25
# V[55:-55,55:-55,55:-55] = 0.0
# ipv.quickvolshow(V, level=[0.25, 0.75], opacity=0.03, level_width=0.1, data_min=0, data_max=1)
#!conda install -c conda-forge ipyvolume --yes
import ipyvolume
import numpy as np
x, y, z = np.random.random((3,10))
ipyvolume.quickscatter(x,y,z, size=1, marker="sphere")
x.shape
r_h[:,0,:].shape # [all the planets, x-values, all times]
r_h[:,0,:].ravel() # take our x array for all of the planets and making it into a 1D array (ipyvolume)
r_h[:,0,:].ravel().shape
# if we wanna downsample
stepSize = 50
r = r_h[:,:,0:-1:stepSize] # technically this goes up until the 2nd to last timestep
# do this for all of the positions x/y/z
x = r[:,0,:].ravel()
y = r[:,1,:].ravel()
z = r[:,2,:].ravel()
ipyvolume.quickscatter(x,y,z, size=1, marker="sphere")
```
Going to make a more complex figure and loop and plot each orbit!
```
r[0,0,:].shape
ipyvolume.figure()
colors = ['red', 'blue', 'green', 'orange', 'gray', 'yellow', 'magenta']
#colors = [(1,0,0),(0,0,1), (0,1,0), (1,0.5,0), (0.5,0.5,0.5), (0.5,1,0), (1,0,1)]
for i in range(r.shape[0]): # looping over planets
ipyvolume.scatter(r[i,0,:], r[i,1,:], r[i,2,:],
color=colors[i], marker='sphere')
ipyvolume.show()
from flip_colors import flip_colors
color = [(1,0,0), (0,0,1), (0,1,0), (0,1,1), (1,1,0), (1,0,1), (0.5, 0.5, 0.5)]
colors = flip_colors(color,r)
colors.shape, r.T.shape # so this is a way to do more data formatting specific to animations
# all of this weird data manipulation is so we can do animations in 3D
ipyvolume.figure()
s = ipyvolume.scatter(r[:,0,:].T+0.5, r[:,1,:].T+0.5, r[:,2,:].T+0.5, marker='sphere', color=colors)
ani = ipyvolume.animation_control(s, interval=200)
ipyvolume.show()
```
export to HTML
```
ipyvolume.figure()
s = ipyvolume.scatter(r[:,0,:].T+0.5, r[:,1,:].T+0.5, r[:,2,:].T+0.5, marker='sphere', color=colors)
ani = ipyvolume.animation_control(s, interval=200)
import ipywidgets
myVBox = ipywidgets.VBox([ipyvolume.gcc()]) # grabbing the last plotted thing
ipyvolume.embed.layout = myVBox.children[0].layout
ipyvolume.embed.layout.min_width='400px'
ipyvolume.embed.embed_html("myNewPage.html", myVBox, offline=False, devmode=False)
```
| github_jupyter |
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
<i>Licensed under the MIT License.</i>
# User2Item recommendations with LightGCN
We offer an example to help readers to run a ID-based collaborative filtering baseline with LightGCN. <br>
LightGCN is a simple and neat Graph Convolution Network (GCN) model for recommender systems. I It uses a GCN to learn the embeddings of users/items, with the goal that low-order and high-order user-item interactions are explicitly exploited into the embedding function.
<img src="https://recodatasets.z20.web.core.windows.net/kdd2020/images%2FLightGCN-graphexample.JPG" width="600">
The model architecture is illustrated as follows:
<img src="https://recodatasets.z20.web.core.windows.net/images/lightGCN-model.jpg" width="600">
For more details and instructions, please refer to [lightgcn_deep_dive.ipynb](../../02_model_collaborative_filtering/lightgcn_deep_dive.ipynb).
```
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from reco_utils.common.timer import Timer
from reco_utils.recommender.deeprec.models.graphrec.lightgcn import LightGCN
from reco_utils.recommender.deeprec.DataModel.ImplicitCF import ImplicitCF
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_stratified_split
from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k
from reco_utils.common.constants import SEED as DEFAULT_SEED
from reco_utils.recommender.deeprec.deeprec_utils import prepare_hparams
from reco_utils.recommender.deeprec.deeprec_utils import cal_metric
from utils.general import *
from utils.data_helper import *
from utils.task_helper import *
tf.logging.set_verbosity(tf.logging.ERROR)
tag = 'small'
lightgcn_dir = 'data_folder/my/LightGCN-training-folder'
rawdata_dir = 'data_folder/my/DKN-training-folder'
create_dir(lightgcn_dir)
```
First, we need to transform the raw dataset into LightGCN's input data format:
```
prepare_dataset(lightgcn_dir, rawdata_dir, tag)
df_train = pd.read_csv(
os.path.join(lightgcn_dir, 'lightgcn_train_{0}.txt'.format(tag)),
sep=' ',
engine="python",
names=['userID', 'itemID', 'rating'],
header=0
)
df_train.head()
```
LightGCN only takes positive user-item interactions for model training. Pairs with rating < 1 will be ignored by the model.
```
df_valid = pd.read_csv(
os.path.join(lightgcn_dir, 'lightgcn_valid_{0}.txt'.format(tag)),
sep=' ',
engine="python",
names=['userID', 'itemID', 'rating'],
header=0
)
data = ImplicitCF(
train=df_train, test=df_valid, seed=0,
col_user='userID',
col_item='itemID',
col_rating='rating'
)
yaml_file = './lightgcn.yaml'
hparams = prepare_hparams(yaml_file,
learning_rate=0.005,
eval_epoch=1,
top_k=10,
save_model=True,
epochs=15,
save_epoch=1
)
hparams.MODEL_DIR = os.path.join(lightgcn_dir, 'saved_models')
hparams.values
model = LightGCN(hparams, data, seed=0)
with Timer() as train_time:
model.fit()
print("Took {} seconds for training.".format(train_time.interval))
user_emb_file = os.path.join(lightgcn_dir, 'user.emb.txt')
item_emb_file = os.path.join(lightgcn_dir, 'item.emb.txt')
model.infer_embedding(
user_emb_file,
item_emb_file
)
```
To compare LightGCN's performance with DKN, we need to make predictions on the same test set. So we infer the users/items embedding, then compute the similarity scores between each pairs of user-item in the test set.
```
def infer_scores_via_embeddings(test_filename, user_emb_file, item_emb_file):
print('loading embedding file...', end=' ')
user2vec = load_emb_file(user_emb_file)
item2vec = load_emb_file(item_emb_file)
preds, labels, groupids = [], [], []
with open(test_filename, 'r') as rd:
while True:
line = rd.readline()
if not line:
break
words = line.strip().split('%')
tokens = words[0].split(' ')
userid = words[1]
itemid = tokens[2]
pred = user2vec[userid].dot(item2vec[itemid])
preds.append(pred)
labels.append(int(tokens[0]))
groupids.append(userid)
print('done')
return labels, preds, groupids
test_filename = os.path.join(rawdata_dir, 'test_{}.txt'.format(tag))
labels, preds, group_keys = infer_scores_via_embeddings(test_filename, user_emb_file, item_emb_file)
group_labels, group_preds = group_labels(labels, preds, group_keys)
res_pairwise = cal_metric(
group_labels, group_preds, ['ndcg@2;4;6', "group_auc"]
)
print(res_pairwise)
res_pointwise = cal_metric(labels, preds, ['auc'])
print(res_pointwise)
```
### Reference:
1. Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang & Meng Wang, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126
| github_jupyter |
This notebook is part of the $\omega radlib$ documentation: https://docs.wradlib.org.
Copyright (c) $\omega radlib$ developers.
Distributed under the MIT License. See LICENSE.txt for more info.
# Dealing with time series
Dealing with radar data typically means implies dealing with time series (of radar records or rain gauge observations). This article gives a brief intro on how to deal with times series and datetimes in Python.
## The datetime module
The datetime module provides a number of types to deal with dates, times, and time intervals.
```
import datetime as dt
```
There are different ways to create datetime objects.
```
# This is now (system time)
now = dt.datetime.now()
# Just using the date
birth_van_rossum = dt.datetime(1956, 1, 31)
# Providing both date and time
first_wradlib_commit = dt.datetime(2011, 10, 26, 11, 54, 58)
# Or initialising from a string
erad_2016_begins = dt.datetime.strptime("2016-10-09 09:00:00", "%Y-%m-%d %H:%M:%S")
```
You can compute the difference between two datetime objects.
```
# Age of Guido van Rossum
age_van_rossum = now - birth_van_rossum
print("This is a %r object.\n" % type(age_van_rossum) )
print("It looks like this: %r" % age_van_rossum )
print("and consists of\n\t%d days,\n\t%d seconds,\n\tand %d microseconds.\n"
% (age_van_rossum.days, age_van_rossum.seconds, age_van_rossum.microseconds) )
# Age of wradlib
age_wradlib = now - first_wradlib_commit
# Time until (or since) beginning of ERAD 2016 OSS Short course
from_to_erad2016 = now - erad_2016_begins
print("Guido van Rossum is %d seconds old." % age_van_rossum.total_seconds())
print("wradlib's first commit was %d days ago." % age_wradlib.days)
if from_to_erad2016.total_seconds() < 0:
print("The ERAD 2016 OSS Short course will start in %d days." % -from_to_erad2016.days )
else:
print("The ERAD 2016 OSS Short course took place %d days ago." % from_to_erad2016.days)
```
Or you can create a `datetime.timedelta` object yourself
and add/subtract a time interval from/to a `datetime` object.
You can use any of these keywords: `days, seconds, microseconds, milliseconds, minutes, hours, weeks`,
but `datetime.timedelta` will always represent the result in `days, seconds, microseconds`.
```
# This is an interval of two minutes
print(dt.timedelta(minutes=1, seconds=60))
# And this is, too
print(dt.timedelta(minutes=2))
now = dt.datetime.now()
print("This is now: %s" % now)
print("This is two minutes before: %s" % (now - dt.timedelta(minutes=2)) )
```
The default string format of a `datetime` object corresponds to the [isoformat](https://en.wikipedia.org/wiki/ISO_8601). Using the `strftime` function, however, you can control string formatting yourself. The following example shows this feature together with other features we have learned before. The idea is to loop over time and generate corresponding string representations. We also store the `datetime` objects in a list.
```
start = dt.datetime(2016, 10, 9)
end = dt.datetime(2016, 10, 14)
interval = dt.timedelta(days=1)
dtimes = []
print("These are the ERAD 2016 conference days (incl. short courses):")
while start <= end:
print(start.strftime("\t%A, %d. %B %Y"))
dtimes.append(start)
start += interval
```
[matplotlib](../python/mplintro.ipynb) generally understands `datetime` objects and tries to make sense of them in plots.
```
# Instead of %matplotlib inline
import matplotlib.pyplot as pl
try:
get_ipython().magic("matplotlib inline")
except:
pl.ion()
import numpy as np
# Create some dummy data
level = np.linspace(100,0,len(dtimes))
# And add a time series plot
fig = pl.figure(figsize=(10,5))
ax = fig.add_subplot(111)
pl.plot(dtimes, level, "bo", linestyle="dashed")
pl.xlabel("Day of the conference", fontsize=15)
pl.ylabel("Relative attentiveness (%)", fontsize=15)
pl.title("Development of participants' attentiveness during the conference", fontsize=15)
pl.tick_params(labelsize=12)
```
| github_jupyter |
# Regression with Amazon SageMaker XGBoost algorithm
_**Distributed training for regression with Amazon SageMaker XGBoost script mode**_
---
## Contents
1. [Introduction](#Introduction)
2. [Setup](#Setup)
1. [Fetching the dataset](#Fetching-the-dataset)
2. [Data Ingestion](#Data-ingestion)
3. [Training the XGBoost model](#Training-the-XGBoost-model)
3. [Deploying the XGBoost model](#Deploying-the-XGBoost-model)
---
## Introduction
This notebook demonstrates the use of Amazon SageMaker XGBoost to train and host a regression model. [XGBoost (eXtreme Gradient Boosting)](https://xgboost.readthedocs.io) is a popular and efficient machine learning algorithm used for regression and classification tasks on tabular datasets. It implements a technique know as gradient boosting on trees, and performs remarkably well in machine learning competitions, and gets a lot of attention from customers.
We use the [Abalone data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html), originally from the [UCI data repository](https://archive.ics.uci.edu/ml/datasets/abalone). More details about the original dataset can be found [here](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names). In this libsvm converted version, the nominal feature (Male/Female/Infant) has been converted into a real valued feature as required by XGBoost. Age of abalone is to be predicted from eight physical measurements.
---
## Setup
This notebook was created and tested on an ml.m5.2xlarge notebook instance.
Let's start by specifying:
1. The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
1. The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with a the appropriate full IAM role arn string(s).
```
import sys
!{sys.executable} -m pip install -qU awscli boto3 "sagemaker>=1.71.0,<2.0.0"
%%time
import os
import boto3
import re
import sagemaker
# Get a SageMaker-compatible role used by this Notebook Instance.
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
### update below values appropriately ###
bucket = sagemaker.Session().default_bucket()
prefix = 'sagemaker/DEMO-xgboost-dist-script'
####
print(region)
```
### Fetching the dataset
Following methods split the data into train/test/validation datasets and upload files to S3.
```
%%time
import io
import boto3
import random
def data_split(FILE_DATA, DATA_DIR, FILE_TRAIN_BASE, FILE_TRAIN_1, FILE_VALIDATION, FILE_TEST,
PERCENT_TRAIN_0, PERCENT_TRAIN_1, PERCENT_VALIDATION, PERCENT_TEST):
data = [l for l in open(FILE_DATA, 'r')]
train_file_0 = open(DATA_DIR + "/" + FILE_TRAIN_0, 'w')
train_file_1 = open(DATA_DIR + "/" + FILE_TRAIN_1, 'w')
valid_file = open(DATA_DIR + "/" + FILE_VALIDATION, 'w')
tests_file = open(DATA_DIR + "/" + FILE_TEST, 'w')
num_of_data = len(data)
num_train_0 = int((PERCENT_TRAIN_0/100.0)*num_of_data)
num_train_1 = int((PERCENT_TRAIN_1/100.0)*num_of_data)
num_valid = int((PERCENT_VALIDATION/100.0)*num_of_data)
num_tests = int((PERCENT_TEST/100.0)*num_of_data)
data_fractions = [num_train_0, num_train_1, num_valid, num_tests]
split_data = [[],[],[],[]]
rand_data_ind = 0
for split_ind, fraction in enumerate(data_fractions):
for i in range(fraction):
rand_data_ind = random.randint(0, len(data)-1)
split_data[split_ind].append(data[rand_data_ind])
data.pop(rand_data_ind)
for l in split_data[0]:
train_file_0.write(l)
for l in split_data[1]:
train_file_1.write(l)
for l in split_data[2]:
valid_file.write(l)
for l in split_data[3]:
tests_file.write(l)
train_file_0.close()
train_file_1.close()
valid_file.close()
tests_file.close()
def write_to_s3(fobj, bucket, key):
return boto3.Session(region_name=region).resource('s3').Bucket(bucket).Object(key).upload_fileobj(fobj)
def upload_to_s3(bucket, channel, filename):
fobj=open(filename, 'rb')
key = prefix+'/'+channel
url = 's3://{}/{}/{}'.format(bucket, key, filename)
print('Writing to {}'.format(url))
write_to_s3(fobj, bucket, key)
```
### Data ingestion
Next, we read the dataset from the existing repository into memory, for preprocessing prior to training. This processing could be done *in situ* by Amazon Athena, Apache Spark in Amazon EMR, Amazon Redshift, etc., assuming the dataset is present in the appropriate location. Then, the next step would be to transfer the data to S3 for use in training. For small datasets, such as this one, reading into memory isn't onerous, though it would be for larger datasets.
```
%%time
import urllib.request
# Load the dataset
FILE_DATA = 'abalone'
urllib.request.urlretrieve("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/abalone", FILE_DATA)
#split the downloaded data into train/test/validation files
FILE_TRAIN_0 = 'abalone.train_0'
FILE_TRAIN_1 = 'abalone.train_1'
FILE_VALIDATION = 'abalone.validation'
FILE_TEST = 'abalone.test'
PERCENT_TRAIN_0 = 35
PERCENT_TRAIN_1 = 35
PERCENT_VALIDATION = 15
PERCENT_TEST = 15
DATA_DIR = 'data'
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
data_split(FILE_DATA, DATA_DIR, FILE_TRAIN_0, FILE_TRAIN_1, FILE_VALIDATION, FILE_TEST,
PERCENT_TRAIN_0, PERCENT_TRAIN_1, PERCENT_VALIDATION, PERCENT_TEST)
#upload the files to the S3 bucket
upload_to_s3(bucket, 'train/train_0.libsvm', DATA_DIR + "/" + FILE_TRAIN_0)
upload_to_s3(bucket, 'train/train_1.libsvm', DATA_DIR + "/" + FILE_TRAIN_1)
upload_to_s3(bucket, 'validation/validation.libsvm', DATA_DIR + "/" + FILE_VALIDATION)
upload_to_s3(bucket, 'test/test.libsvm', DATA_DIR + "/" + FILE_TEST)
```
## Create a XGBoost script to train with
SageMaker can now run an XGboost script using the XGBoost estimator. When executed on SageMaker a number of helpful environment variables are available to access properties of the training environment, such as:
- `SM_MODEL_DIR`: A string representing the path to the directory to write model artifacts to. Any artifacts saved in this folder are uploaded to S3 for model hosting after the training job completes.
- `SM_OUTPUT_DIR`: A string representing the filesystem path to write output artifacts to. Output artifacts may include checkpoints, graphs, and other files to save, not including model artifacts. These artifacts are compressed and uploaded to S3 to the same S3 prefix as the model artifacts.
Supposing two input channels, 'train' and 'validation', were used in the call to the XGBoost estimator's fit() method, the following environment variables will be set, following the format `SM_CHANNEL_[channel_name]`:
`SM_CHANNEL_TRAIN`: A string representing the path to the directory containing data in the 'train' channel
`SM_CHANNEL_VALIDATION`: Same as above, but for the 'validation' channel.
A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to model_dir so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an argparse.ArgumentParser instance. For example, the script that we will run in this notebook is provided as the accompanying file (`abalone.py`) and also shown below:
```python
import argparse
import json
import logging
import os
import pandas as pd
import pickle as pkl
from sagemaker_containers import entry_point
from sagemaker_xgboost_container.data_utils import get_dmatrix
from sagemaker_xgboost_container import distributed
import xgboost as xgb
def _xgb_train(params, dtrain, evals, num_boost_round, model_dir, is_master):
"""Run xgb train on arguments given with rabit initialized.
This is our rabit execution function.
:param args_dict: Argument dictionary used to run xgb.train().
:param is_master: True if current node is master host in distributed training, or is running single node training job. Note that rabit_run will include this argument.
"""
booster = xgb.train(params=params, dtrain=dtrain, evals=evals, num_boost_round=num_boost_round)
if is_master:
model_location = model_dir + '/xgboost-model'
pkl.dump(booster, open(model_location, 'wb'))
logging.info("Stored trained model at {}".format(model_location))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Hyperparameters are described here. In this simple example we are just including one hyperparameter.
parser.add_argument('--max_depth', type=int,)
parser.add_argument('--eta', type=float)
parser.add_argument('--gamma', type=int)
parser.add_argument('--min_child_weight', type=int)
parser.add_argument('--subsample', type=float)
parser.add_argument('--verbose', type=int)
parser.add_argument('--objective', type=str)
parser.add_argument('--num_round', type=int)
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output_data_dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model_dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--validation', type=str, default=os.environ['SM_CHANNEL_VALIDATION'])
parser.add_argument('--sm_hosts', type=str, default=os.environ['SM_HOSTS'])
parser.add_argument('--sm_current_host', type=str, default=os.environ['SM_CURRENT_HOST'])
args, _ = parser.parse_known_args()
# Get SageMaker host information from runtime environment variables
sm_hosts = json.loads(os.environ['SM_HOSTS'])
sm_current_host = args.sm_current_host
dtrain = get_dmatrix(args.train, 'libsvm')
dval = get_dmatrix(args.validation, 'libsvm')
watchlist = [(dtrain, 'train'), (dval, 'validation')] if dval is not None else [(dtrain, 'train')]
train_hp = {
'max_depth': args.max_depth,
'eta': args.eta,
'gamma': args.gamma,
'min_child_weight': args.min_child_weight,
'subsample': args.subsample,
'verbose': args.verbose,
'objective': args.objective}
xgb_train_args = dict(
params=train_hp,
dtrain=dtrain,
evals=watchlist,
num_boost_round=args.num_round,
model_dir=args.model_dir)
if len(sm_hosts) > 1:
# Wait until all hosts are able to find each other
entry_point._wait_hostname_resolution()
# Execute training function after initializing rabit.
distributed.rabit_run(
exec_fun=_xgb_train,
args=xgb_train_args,
include_in_training=(dtrain is not None),
hosts=sm_hosts,
current_host=sm_current_host,
update_rabit_args=True
)
else:
# If single node training, call training method directly.
if dtrain:
xgb_train_args['is_master'] = True
_xgb_train(**xgb_train_args)
else:
raise ValueError("Training channel must have data to train model.")
def model_fn(model_dir):
"""Deserialized and return fitted model.
Note that this should have the same name as the serialized model in the _xgb_train method
"""
model_file = 'xgboost-model'
booster = pkl.load(open(os.path.join(model_dir, model_file), 'rb'))
return booster
```
Because the container imports your training script, always put your training code in a main guard `(if __name__=='__main__':)` so that the container does not inadvertently run your training code at the wrong point in execution.
For more information about training environment variables, please visit https://github.com/aws/sagemaker-containers.
## Training the XGBoost model
After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between few minutes.
To run our training script on SageMaker, we construct a sagemaker.xgboost.estimator.XGBoost estimator, which accepts several constructor arguments:
* __entry_point__: The path to the Python script SageMaker runs for training and prediction.
* __role__: Role ARN
* __train_instance_type__ *(optional)*: The type of SageMaker instances for training. __Note__: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* __sagemaker_session__ *(optional)*: The session used to train on Sagemaker.
* __hyperparameters__ *(optional)*: A dictionary passed to the train function as hyperparameters.
```
hyperparams = {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"subsample":"0.7",
"verbose":"1",
"objective":"reg:linear",
"num_round":"50"}
instance_type = "ml.m5.2xlarge"
output_path = 's3://{}/{}/{}/output'.format(bucket, prefix, 'abalone-dist-xgb')
content_type = "libsvm"
# Open Source distributed script mode
from sagemaker.session import s3_input, Session
from sagemaker.xgboost.estimator import XGBoost
boto_session = boto3.Session(region_name=region)
session = Session(boto_session=boto_session)
script_path = 'abalone.py'
xgb_script_mode_estimator = XGBoost(
entry_point=script_path,
framework_version='1.0-1', # Note: framework_version is mandatory
hyperparameters=hyperparams,
role=role,
train_instance_count=2,
train_instance_type=instance_type,
output_path=output_path)
train_input = s3_input("s3://{}/{}/{}/".format(bucket, prefix, 'train'), content_type=content_type)
validation_input = s3_input("s3://{}/{}/{}/".format(bucket, prefix, 'validation'), content_type=content_type)
```
### Train XGBoost Estimator on abalone data
Training is as simple as calling `fit` on the Estimator. This will start a SageMaker Training job that will download the data, invoke the entry point code (in the provided script file), and save any model artifacts that the script creates.
```
xgb_script_mode_estimator.fit({'train': train_input, 'validation': validation_input})
```
## Deploying the XGBoost model
After training, we can use the estimator to create an Amazon SageMaker endpoint – a hosted and managed prediction service that we can use to perform inference.
You can also optionally specify other functions to customize the behavior of deserialization of the input request (`input_fn()`), serialization of the predictions (`output_fn()`), and how predictions are made (`predict_fn()`). The defaults work for our current use-case so we don’t need to define them.
```
predictor = xgb_script_mode_estimator.deploy(initial_instance_count=1,
instance_type="ml.m5.2xlarge")
predictor.serializer = str
test_file = DATA_DIR + "/" + FILE_TEST
with open(test_file, 'r') as f:
payload = f.read()
runtime_client = boto3.client('runtime.sagemaker', region_name=region)
response = runtime_client.invoke_endpoint(EndpointName=predictor.endpoint,
ContentType='text/x-libsvm',
Body=payload)
result = response['Body'].read().decode('ascii')
print('Predicted values are {}.'.format(result))
```
### (Optional) Delete the Endpoint
If you're done with this exercise, please run the delete_endpoint line in the cell below. This will remove the hosted endpoint and avoid any charges from a stray instance being left on.
```
xgb_script_mode_estimator.delete_endpoint()
```
| github_jupyter |
# Deploying a MedNIST Classifier with BentoML
This notebook demos the process of packaging up a trained model using BentoML into an artifact which can be run as a local program performing inference, a web service doing the same, and a Docker containerized web service. BentoML provides various ways of deploying models with existing platforms like AWS or Azure but we'll focus on local deployment here since researchers are more likely to do this. This tutorial will train a MedNIST classifier like the [MONAI tutorial here](https://github.com/Project-MONAI/tutorials/blob/master/2d_classification/mednist_tutorial.ipynb) and then do the packaging as described in this [BentoML tutorial](https://github.com/bentoml/gallery/blob/master/pytorch/fashion-mnist/pytorch-fashion-mnist.ipynb).
## Setup environment
```
!python -c "import monai" || pip install -q "monai[pillow, tqdm]"
!python -c "import bentoml" || pip install -q bentoml
```
## Setup imports
```
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import glob
import PIL.Image
import torch
import numpy as np
from ignite.engine import Events
from monai.apps import download_and_extract
from monai.config import print_config
from monai.networks.nets import DenseNet121
from monai.engines import SupervisedTrainer
from monai.transforms import (
AddChannel,
Compose,
LoadImage,
RandFlip,
RandRotate,
RandZoom,
ScaleIntensity,
EnsureType,
)
from monai.utils import set_determinism
set_determinism(seed=0)
print_config()
```
## Download dataset
The MedNIST dataset was gathered from several sets from [TCIA](https://wiki.cancerimagingarchive.net/display/Public/Data+Usage+Policies+and+Restrictions),
[the RSNA Bone Age Challenge](http://rsnachallenges.cloudapp.net/competitions/4),
and [the NIH Chest X-ray dataset](https://cloud.google.com/healthcare/docs/resources/public-datasets/nih-chest).
The dataset is kindly made available by [Dr. Bradley J. Erickson M.D., Ph.D.](https://www.mayo.edu/research/labs/radiology-informatics/overview) (Department of Radiology, Mayo Clinic)
under the Creative Commons [CC BY-SA 4.0 license](https://creativecommons.org/licenses/by-sa/4.0/).
If you use the MedNIST dataset, please acknowledge the source.
```
directory = os.environ.get("MONAI_DATA_DIRECTORY")
root_dir = tempfile.mkdtemp() if directory is None else directory
print(root_dir)
resource = "https://drive.google.com/uc?id=1QsnnkvZyJPcbRoV_ArW8SnE1OTuoVbKE"
md5 = "0bc7306e7427e00ad1c5526a6677552d"
compressed_file = os.path.join(root_dir, "MedNIST.tar.gz")
data_dir = os.path.join(root_dir, "MedNIST")
if not os.path.exists(data_dir):
download_and_extract(resource, compressed_file, root_dir, md5)
subdirs = sorted(glob.glob(f"{data_dir}/*/"))
class_names = [os.path.basename(sd[:-1]) for sd in subdirs]
image_files = [glob.glob(f"{sb}/*") for sb in subdirs]
image_files_list = sum(image_files, [])
image_class = sum(([i] * len(f) for i, f in enumerate(image_files)), [])
image_width, image_height = PIL.Image.open(image_files_list[0]).size
print(f"Label names: {class_names}")
print(f"Label counts: {list(map(len, image_files))}")
print(f"Total image count: {len(image_class)}")
print(f"Image dimensions: {image_width} x {image_height}")
```
## Setup and Train
Here we'll create a transform sequence and train the network, omitting validation and testing since we know this does indeed work and it's not needed here:
```
train_transforms = Compose(
[
LoadImage(image_only=True),
AddChannel(),
ScaleIntensity(),
RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),
RandFlip(spatial_axis=0, prob=0.5),
RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),
EnsureType(),
]
)
class MedNISTDataset(torch.utils.data.Dataset):
def __init__(self, image_files, labels, transforms):
self.image_files = image_files
self.labels = labels
self.transforms = transforms
def __len__(self):
return len(self.image_files)
def __getitem__(self, index):
return self.transforms(self.image_files[index]), self.labels[index]
# just one dataset and loader, we won't bother with validation or testing
train_ds = MedNISTDataset(image_files_list, image_class, train_transforms)
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=300, shuffle=True, num_workers=10)
device = torch.device("cuda:0")
net = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(class_names)).to(device)
loss_function = torch.nn.CrossEntropyLoss()
opt = torch.optim.Adam(net.parameters(), 1e-5)
max_epochs = 5
def _prepare_batch(batch, device, non_blocking):
return tuple(b.to(device) for b in batch)
trainer = SupervisedTrainer(device, max_epochs, train_loader, net, opt, loss_function, prepare_batch=_prepare_batch)
@trainer.on(Events.EPOCH_COMPLETED)
def _print_loss(engine):
print(f"Epoch {engine.state.epoch}/{engine.state.max_epochs} Loss: {engine.state.output[0]['loss']}")
trainer.run()
```
The network will be saved out here as a Torchscript object but this isn't necessary as we'll see later.
```
torch.jit.script(net).save("classifier.zip")
```
## BentoML Setup
BentoML provides it's platform through an API to wrap service requests as method calls. This is obviously similar to how Flask works (which is one of the underlying technologies used here), but on top of this is provided various facilities for storing the network (artifacts), handling the IO component of requests, and caching data. What we need to provide is a script file to represent the services we want, BentoML will take this with the artifacts we provide and store this in a separate location which can be run locally as well as uploaded to a server (sort of like Docker registries).
The script below will create our API which includes MONAI code. The transform sequence needs a special read Transform to turn a data stream into an image, but otherwise the code like what was used above for training. The network is stored as an artifact which in practice is the stored weights in the BentoML bundle. This is loaded at runtime automatically, but instead we could load the Torchscript model instead if we wanted to, in particular if we wanted an API that didn't rely on MONAI code.
The script needs to be written out to a file first:
```
%%writefile mednist_classifier_bentoml.py
from typing import BinaryIO, List
import numpy as np
from PIL import Image
import torch
from monai.transforms import (
AddChannel,
Compose,
Transform,
ScaleIntensity,
EnsureType,
)
import bentoml
from bentoml.frameworks.pytorch import PytorchModelArtifact
from bentoml.adapters import FileInput, JsonOutput
from bentoml.utils import cached_property
MEDNIST_CLASSES = ["AbdomenCT", "BreastMRI", "CXR", "ChestCT", "Hand", "HeadCT"]
class LoadStreamPIL(Transform):
"""Load an image file from a data stream using PIL."""
def __init__(self, mode=None):
self.mode = mode
def __call__(self, stream):
img = Image.open(stream)
if self.mode is not None:
img = img.convert(mode=self.mode)
return np.array(img)
@bentoml.env(pip_packages=["torch", "numpy", "monai", "pillow"])
@bentoml.artifacts([PytorchModelArtifact("classifier")])
class MedNISTClassifier(bentoml.BentoService):
@cached_property
def transform(self):
return Compose([LoadStreamPIL("L"), AddChannel(), ScaleIntensity(), EnsureType()])
@bentoml.api(input=FileInput(), output=JsonOutput(), batch=True)
def predict(self, file_streams: List[BinaryIO]) -> List[str]:
img_tensors = list(map(self.transform, file_streams))
batch = torch.stack(img_tensors).float()
with torch.no_grad():
outputs = self.artifacts.classifier(batch)
_, output_classes = outputs.max(dim=1)
return [MEDNIST_CLASSES[oc] for oc in output_classes]
```
Now the script is loaded and the classifier artifact is packed with the network's state. This is then saved to a repository directory on the local machine:
```
from mednist_classifier_bentoml import MedNISTClassifier # noqa: E402
bento_svc = MedNISTClassifier()
bento_svc.pack('classifier', net.cpu().eval())
saved_path = bento_svc.save()
print(saved_path)
```
We can look at the contents of this repository, which includes code and setup scripts:
```
!ls -l {saved_path}
```
This repository can be run like a stored program where we invoke it by name and the API name ("predict") we want to use and provide the inputs as a file:
```
!bentoml run MedNISTClassifier:latest predict --input-file {image_files[0][0]}
```
The service can also be run off of a Flask web server. The following script starts the service, waits for it to get going, uses curl to send the test file as a POST request to get a prediction, then kill the server:
```
%%bash -s {image_files[0][0]}
# filename passed in as an argument to the cell
test_file=$1
# start the Flask-based server, sending output to /dev/null for neatness
bentoml serve --port=8000 MedNISTClassifier:latest &> /dev/null &
# recall the PID of the server and wait for it to start
lastpid=$!
sleep 5
# send the test file using curl and capture the returned string
result=$(curl -s -X POST "http://127.0.0.1:8000/predict" -F image=@$test_file)
# kill the server
kill $lastpid
echo "Prediction: $result"
```
The service can be packaged as a Docker container to be started elsewhere as a server:
```
!bentoml containerize MedNISTClassifier:latest -t mednist-classifier:latest
!docker image ls
if directory is None:
shutil.rmtree(root_dir)
```
| github_jupyter |
```
from statsmodels.graphics import utils
import missingno as msno
import fancyimpute
from statsmodels.graphics import utils
import missingno as msno
import fancyimpute
from fancyimpute import KNN
%matplotlib inline
columns = {"parcelid": "I_parcelid" ,
"logerror": "Z_logerror" ,
"transactiondate": "T_transactiondate" ,
"airconditioningtypeid": "C_airconditioningtypeid" ,
"architecturalstyletypeid": "C_architecturalstyletypeid" ,
"basementsqft": "R_basementsqft" ,
"bathroomcnt": "R_bathroomcnt" ,
"bedroomcnt": "R_bedroomcnt" ,
"buildingclasstypeid": "C_buildingclasstypeid" ,
"buildingqualitytypeid": "C_buildingqualitytypeid" ,
"calculatedbathnbr": "R_calculatedbathnbr" ,
"decktypeid": "C_decktypeid" ,
"finishedfloor1squarefeet": "R_finishedfloor1squarefeet" ,
"calculatedfinishedsquarefeet": "R_calculatedfinishedsquarefeet" ,
"finishedsquarefeet12": "R_finishedsquarefeet12" ,
"finishedsquarefeet13": "R_finishedsquarefeet13" ,
"finishedsquarefeet15": "R_finishedsquarefeet15" ,
"finishedsquarefeet50": "R_finishedsquarefeet50" ,
"finishedsquarefeet6": "R_finishedsquarefeet6" ,
"fips": "C_fips" ,
"fireplacecnt": "R_fireplacecnt" ,
"fullbathcnt": "R_fullbathcnt" ,
"garagecarcnt": "R_garagecarcnt" ,
"garagetotalsqft": "R_garagetotalsqft" ,
"hashottuborspa": "C_hashottuborspa" ,
"heatingorsystemtypeid": "C_heatingorsystemtypeid" ,
"latitude": "R_latitude" ,
"longitude": "R_longitude" ,
"lotsizesquarefeet": "R_lotsizesquarefeet" ,
"poolcnt": "R_poolcnt" ,
"poolsizesum": "R_poolsizesum" ,
"pooltypeid10": "C_pooltypeid10" ,
"pooltypeid2": "C_pooltypeid2" ,
"pooltypeid7": "C_pooltypeid7" ,
"propertycountylandusecode": "C_propertycountylandusecode" ,
"propertylandusetypeid": "C_propertylandusetypeid" ,
"propertyzoningdesc": "C_propertyzoningdesc" ,
"rawcensustractandblock": "G_rawcensustractandblock" ,
"regionidcity": "C_regionidcity" ,
"regionidcounty": "C_regionidcounty" ,
"regionidneighborhood": "C_regionidneighborhood" ,
"regionidzip": "C_regionidzip" ,
"roomcnt": "R_roomcnt" ,
"storytypeid": "C_storytypeid" ,
"threequarterbathnbr": "R_threequarterbathnbr" ,
"typeconstructiontypeid": "C_typeconstructiontypeid" ,
"unitcnt": "R_unitcnt" ,
"yardbuildingsqft17": "R_yardbuildingsqft17" ,
"yardbuildingsqft26": "R_yardbuildingsqft26" ,
"yearbuilt": "T_yearbuilt" ,
"numberofstories": "R_numberofstories" ,
"fireplaceflag": "C_fireplaceflag" ,
"structuretaxvaluedollarcnt": "R_structuretaxvaluedollarcnt" ,
"taxvaluedollarcnt": "R_taxvaluedollarcnt" ,
"assessmentyear": "T_assessmentyear" ,
"landtaxvaluedollarcnt": "R_landtaxvaluedollarcnt" ,
"taxamount": "R_taxamount" ,
"taxdelinquencyflag": "C_taxdelinquencyflag" ,
"taxdelinquencyyear": "T_taxdelinquencyyear" ,
"censustractandblock": "G_censustractandblock" ,
}
# Initial data import
df_2016 = pd.read_csv("~/src/properties_2016.csv", index_col="parcelid")
train_2016 = pd.read_csv("~/src/train_2016.csv", index_col="parcelid")
train_2016_join = train_2016.join(df_2016)
train_2016_join.rename(columns=columns, inplace=True)
train_2016_join.sort_index(axis=1, inplace=True)
train_2016_join.to_csv("./datasets/train_2016_join.csv")
df_2017 = pd.read_csv("~/src/properties_2017.csv", index_col="parcelid")
train_2017 = pd.read_csv("~/src/train_2017.csv", index_col="parcelid")
train_2017_join = train_2017.join(df_2017)
train_2017_join.rename(columns=columns, inplace=True)
train_2017_join.sort_index(axis=1, inplace=True)
train_2017_join.to_csv("./datasets/train_2017_join.csv")
del df_2016
del df_2017
df_rs = pd.DataFrame()
for x in train_2016_join.columns:
if x[0] == "R":
df_rs[x] = train_2016_join[x]
names = dict(enumerate(list(df_rs.columns)))
index = dict(enumerate(list(df_rs.index)))
df_rs_impute = pd.DataFrame(fancyimpute.MICE(n_imputations=10).complete(df_rs))
df_rs_impute.rename(columns=names, index=index, inplace=True)
df_rs_impute["Z_logerror"] = train_2016_join["Z_logerror"]
df_cs = pd.DataFrame()
for x in train_2016_join.columns:
if x[0] != "R":
df_cs[x] = train_2016_join[x]
df_cs["Z_logerror"] = df_rs_impute["Z_logerror"]
name1 = "C_propertycountylandusecode"
name2 = "C_regionidzip"
name3 = "C_taxdelinquencyflag"
name4 = "C_regionidneighborhood"
name5 = "T_taxdelinquencyyear"
name6 = "C_fips"
name7 = "C_heatingorsystemtypeid"
name8 = "C_buildingqualitytypeid"
name9 = "C_regionidcounty"
name10 = "C_regionidcity"
name11 = "C_propertyzoningdesc"
dic_1 = [
"0100","0109","010C","010D","010E","0200","0401","040A","0700","1","1110",
"1111","1129","112","1720","1722",]
dic_2 = [
95982,95983,96005,96019,96123,96207,96267,96323,96354,96364,96464,
96480,96488,96507,96951,96989,97003,97089,97118,97344
]
dic_4 = [
31383,36630,40548,47880,115657,118920,268496,275340,275795,416314,416963,760999,
762178,
]
dic_5 = [
8.0, 14.0, 15.0
]
dic_6 = [
6111.0
]
dic_7 = [
2.0,
]
dic_8 = [
1.0, 8.0
]
dic_10 = [
3491.0, 6822.0, 10241.0, 12447.0, 12520.0, 13150.0, 13693.0, 18875.0, 20008.0,
22827.0, 24245.0, 25621.0, 25953.0, 26965.0, 27110.0, 30267.0, 31134.0, 33612.0,
33837.0, 34278.0, 36502.0, 37015.0, 38032.0, 39306.0, 40227.0, 45457.0, 46098.0,
46298.0, 47568.0, 50749.0, 51239.0, 52650.0, 53027.0, 54053.0, 54212.0, 54299.0,
]
dic_11 = [
u"ARR11000D*", u"AVU*", u"BFM1*",
u"BHR1*", u"BRA2*", u"BUR5*", u"CCC3", u"CEADP4", u"CLHC*",
u"CUR3*", u"CV17500-RD", u"EMCO*", u"GLR-4*", u"HBR2B*", u"LAC2", u"LAC2(PV)", u"LAR3",
u"LAR5", u"LARA", u"LARS", u"LBPD1", u"LBR2L", u"LCA11*", u"LCA11000*",
u"LCA16000*", u"LCC1-RA100", u"LCC2YY", u"LCR110000*",
u"LCR2YY", u"LCRA7S", u"LCRAL*", u"LCRPD13U-R", u"LPR2*",
u"LRHI", u"LRP*", u"LRR 2 *", u"LUCR110000", u"MNRH", u"MNRM",
u"PDM4", u"POC4-R3100", u"POR3*", u"PSR1", u"RMC3D*", u"RMR1YY", u"SGR3-R1*",
u"SGR305", u"SLR1YY", u"SOR12L", u"SRRPCC*", u"SSR1*", u"TCR4YY", u"TORR-LO", u"WAR18500RU", u"WDC1B*"
]
df_cs[name1].where(df_cs[name1].isin(dic_1), other="Others", inplace=True)
df_cs[name2].where(df_cs[name2].isin(dic_2), other="Others", inplace=True)
df_cs[name4].where(df_cs[name4].isin(dic_4), other="Others", inplace=True)
df_cs[name5].where(df_cs[name5].isin(dic_5), other="Others", inplace=True)
df_cs[name6].where(df_cs[name6].isin(dic_6), other="Others", inplace=True)
df_cs[name7].where(df_cs[name7].isin(dic_7), other="Others", inplace=True)
df_cs[name8].where(df_cs[name8].isin(dic_8), other="Others", inplace=True)
df_cs[name10].where(df_cs[name10].isin(dic_10), other="Others", inplace=True)
df_cs[name11].where(df_cs[name11].isin(dic_11), other="Others", inplace=True)
df_join = df_rs_impute.join(df_cs.drop("Z_logerror", axis=1))
name1 = "C_propertycountylandusecode"
name2 = "C_regionidzip"
name3 = "C_taxdelinquencyflag"
name4 = "C_regionidneighborhood"
name5 = "T_taxdelinquencyyear"
name6 = "C_fips"
name7 = "C_heatingorsystemtypeid"
name8 = "C_buildingqualitytypeid"
name9 = "C_regionidcounty"
name10 = "C_regionidcity"
name11 = "C_propertyzoningdesc"
c_features = [
"R_logerror", name1, name2, name3, name4, name5, name6, name7, name8, name9,
name10, name11
]
reals = ["scale({})".format(x) for x in df_join.columns if x[0] == "R"]
cates = ["C({})".format(x) for x in c_features[1:]]
features = []
for x in cates:
features.append(x)
for x in reals:
features.append(x)
for y in cates:
features.append("{}:{}".format(x, y))
formula = "Z_logerror~"+"+".join(features)
df_OLS = df_join.fillna("Others")
model = sm.OLS.from_formula(formula, df_OLS)
result = model.fit()
result.summary()
```
| github_jupyter |
<div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
<div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
<div style="position: relative ; top: 50% ; transform: translatey(-50%)">
<div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
<div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Spectral Element Method - 1D Elastic Wave Equation, Heterogeneous case</div>
</div>
</div>
</div>
Seismo-Live: http://seismo-live.org
<p style="width:20%;float:right;padding-left:50px">
<img src=../../share/images/book.jpg>
<span style="font-size:smaller">
</span>
</p>
---
This notebook is part of the supplementary material
to [Computational Seismology: A Practical Introduction](https://global.oup.com/academic/product/computational-seismology-9780198717416?cc=de&lang=en&#),
Oxford University Press, 2016.
##### Authors:
* David Vargas ([@dvargas](https://github.com/davofis))
* Heiner Igel ([@heinerigel](https://github.com/heinerigel))
## Basic Equations
The numerical description of 1D-elastic waves propagating in a heterogeneous media is a natural extension of the homogeneous case. From an algorithmic point of view, now we allow both mass, and stiffness matrices to be initialized separately for each element. In contrast with the homogeneous case, elastic parameters $\lambda$ and $\mu$ may vary at each collocation point.
From a theoretical point of view, we basically follow the same strategy developed in the homogeneous case. The numerical solution for the 1D elastic wave equation
\begin{equation}
\rho(x) \partial_t^2 u(x,t) = \partial_x (\mu(x) \partial_x u(x,t)) + f(x,t),
\end{equation}
using the spectral element method is done after a series of steps summarized as follow:
1) The wave equation is written into its Weak form
2) Apply stress Free Boundary Condition after integration by parts
3) Approximate the wave field as a linear combination of some basis
\begin{equation}
u(x,t) \ \approx \ \overline{u}(x,t) \ = \ \sum_{i=1}^{n} u_i(t) \ \varphi_i(x)
\end{equation}
4) Use the same basis functions in $u(x, t)$ as test functions in the weak form, the so call Galerkin principle.
6) The continuous weak form is written as a system of linear equations by considering the approximated displacement field.
\begin{equation}
\mathbf{M}^T\partial_t^2 \mathbf{u} + \mathbf{K}^T\mathbf{u} = \mathbf{f}
\end{equation}
7) Time extrapolation with centered finite differences scheme
\begin{equation}
\mathbf{u}(t + dt) = dt^2 (\mathbf{M}^T)^{-1}[\mathbf{f} - \mathbf{K}^T\mathbf{u}] + 2\mathbf{u} - \mathbf{u}(t-dt).
\end{equation}
where $\mathbf{M}$ is known as the mass matrix, and $\mathbf{K}$ the stiffness matrix.
The above solution is exactly the same presented for the classic finite-element method. Now we introduce appropriated basis functions and integration scheme to efficiently solve the system of matrices.
#### Interpolation with Lagrange Polynomials
At the elemental level (see section 7.4), we introduce as interpolating functions the Lagrange polynomials and use $\xi$ as the space variable representing our elemental domain:
\begin{equation}
\varphi_i \ \rightarrow \ \ell_i^{(N)} (\xi) \ := \ \prod_{j \neq i}^{N+1} \frac{\xi - \xi_j}{\xi_i-\xi_j}, \qquad i,j = 1, 2, \dotsc , N + 1
\end{equation}
#### Numerical Integration
The integral of a continuous function $f(x)$ can be calculated after replacing $f(x)$ by a polynomial approximation that can be integrated analytically. As interpolating functions we use again the Lagrange polynomials and
obtain Gauss-Lobatto-Legendre quadrature. Here, the GLL points are used to perform the integral.
\begin{equation}
\int_{-1}^1 f(x) \ dx \approx \int _{-1}^1 P_N(x) dx = \sum_{i=1}^{N+1}
w_i f(x_i)
\end{equation}
```
# Import all necessary libraries, this is a configuration step for the exercise.
# Please run it before the simulation code!
import numpy as np
import matplotlib
# Show Plot in The Notebook
matplotlib.use("nbagg")
import matplotlib.pyplot as plt
from gll import gll
from lagrange1st import lagrange1st
from ricker import ricker
```
### 1. Initialization of setup
```
# Initialization of setup
# ---------------------------------------------------------------
nt = 10000 # number of time steps
xmax = 8000. # Length of domain [m]
N = 3 # Order of Lagrange polynomials
ne = 250 # Number of elements
Tdom = .2 # Dominant period of Ricker source wavelet
iplot = 20 # Plotting each iplot snapshot
vs = 2500. # S velocity [m/s]
rho = 2000 # Density [kg/m^3]
# variables for elemental matrices
Me = np.zeros(N+1, dtype = float)
Ke = np.zeros((N+1, N+1), dtype = float)
# ----------------------------------------------------------------
# Initialization of GLL points integration weights
[xi, w] = gll(N) # xi, N+1 coordinates [-1 1] of GLL points
# w Integration weights at GLL locations
# Space domain
le = xmax/ne # Length of elements
# Vector with GLL points
k = 0
xg = np.zeros((N*ne)+1)
xg[k] = 0
for i in range(1,ne+1):
for j in range(0,N):
k = k+1
xg[k] = (i-1)*le + .5*(xi[j+1]+1)*le
# ---------------------------------------------------------------
dxmin = min(np.diff(xg))
eps = 0.1 # Courant value
dt = eps*dxmin/vs # Global time step
# Mapping - Jacobian
J = le/2
Ji = 1/J # Inverse Jacobian
# 1st derivative of Lagrange polynomials
l1d = lagrange1st(N) # Array with GLL as columns for each N+1 polynomial
```
### 2. Low velocity zone
The introduction of an specific velocity model is done after allowing space-dependent elastic parameters. i.e.
\begin{equation}
\mu(x) = \rho(x) v_s(x)
\end{equation}
#### Exercise 1
Introduce a low-velocity zone (-40%) at the center of the model spanning 50 elements. Then, visualize your model. Additionally, you can try different velocity and density models by defining new python methods.
```
#################################################################
# INTRODUCE A LOW VELOCITY ZONE HERE!
#################################################################
#################################################################
# PLOT YOUR VELOCITY MODEL HERE!
#################################################################
```
### 3. The Mass Matrix
Now we initialize the mass and stiffness matrices. In general, the mass matrix at the elemental level is given
\begin{equation}
M_{ji}^e \ = \ w_j \ \rho (\xi) \ \frac{\mathrm{d}x}{\mathrm{d}\xi} \delta_{ij} \vert_ {\xi = \xi_j}
\end{equation}
#### Exercise 2
Implements the mass matrix at each element using the integration weights at GLL locations $w$, the Jacobian $J$, and density $\rho$. Then, perform the global assembly of the mass matrix, compute its inverse, and display the inverse mass matrix to visually inspect how it looks like.
```
#################################################################
# IMPLEMENT THE GLOBAL ASSEMBLY OF M HERE!
#################################################################
#################################################################
# COMPUTE THE INVERSE MASS MATRIX HERE!
#################################################################
#################################################################
# DISPLAY THE INVERSE MASS MATRIX HERE!
#################################################################
```
### 4. The Stiffness matrix
On the other hand, the general form of the stiffness matrix at the elemental level is
\begin{equation}
K_{ji}^e \ = \ \sum_{k = 1}^{N+1} w_k \mu (\xi) \partial_\xi \ell_j (\xi) \partial_\xi \ell_i (\xi) \left(\frac{\mathrm{d}\xi}{\mathrm{d}x} \right)^2 \frac{\mathrm{d}x}{\mathrm{d}\xi} \vert_{\xi = \xi_k}
\end{equation}
#### Exercise 3
Implements the stiffness matrix at each element using the integration weights at GLL locations $w$, the Jacobian $J$, and shear stress $\mu$. Then, perform the global assembly of the mass matrix and display the matrix to visually inspect how it looks like.
```
#################################################################
# IMPLEMENT THE GLOBAL ASSEMBLY OF K HERE!
#################################################################
#################################################################
# DISPLAY THE STIFFNESS MATRIX HERE!
#################################################################
```
### 5. Finite element solution
Finally we implement the spectral element solution using the computed mass $M$ and stiffness $K$ matrices together with a finite differences extrapolation scheme
\begin{equation}
\mathbf{u}(t + dt) = dt^2 (\mathbf{M}^T)^{-1}[\mathbf{f} - \mathbf{K}^T\mathbf{u}] + 2\mathbf{u} - \mathbf{u}(t-dt).
\end{equation}
```
# SE Solution, Time extrapolation
# ---------------------------------------------------------------
# initialize source time function and force vector f
src = ricker(dt,Tdom)
isrc = int(np.floor(ng/2)) # Source location
# Initialization of solution vectors
u = np.zeros(ng)
uold = u
unew = u
f = u
# Initialize animated plot
# ---------------------------------------------------------------
plt.figure(figsize=(10,6))
# ploting low velocity zone
half = np.floor(xmax/2)
plt.axvspan(half - el_span/2*le,
half + el_span/2*le,
alpha=0.2, facecolor='b')
plt.title('SEM 1D Animation')
plt.xlabel(' x (m)')
plt.ylabel(' Amplitude ')
font = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 16,
}
lines = plt.plot(xg, u, lw=1.5)
plt.title('SEM 1D Animation $v_{min} = %s v_{max}$' %percent, size=16)
plt.xlabel(' x (m)')
plt.ylabel(' Displacement ')
plt.ion() # set interective mode
plt.show()
# ---------------------------------------------------------------
# Time extrapolation
# ---------------------------------------------------------------
x_t = []
for it in range(nt):
# Source initialization
f = np.zeros(ng)
if it < len(src):
f[isrc-1] = src[it-1]
# Time extrapolation
unew = dt**2 * Minv @ (f - K @ u) + 2 * u - uold
uold, u = u, unew
# Solution in space-time
x_t.append(u)
# --------------------------------------
# Animation plot. Display solution
if not it % iplot:
for l in lines:
l.remove()
del l
# --------------------------------------
# Display lines
lines = plt.plot(xg, u, color="black", lw = 1.5)
plt.xlim((0,xmax))
plt.gcf().canvas.draw()
```
### 6. Displaying the wave-field
```
# Solution in space-time
x_t = np.asanyarray(x_t)
# Initialize plot
plt.figure()
plt.subplot(1,1,1)
plt.imshow(x_t, cmap='hot', aspect='auto',
extent =[0, xmax, nt*dt, 0])
plt.title('u(x,t) Wavefield')
plt.ylabel('Time [s]')
plt.xlabel('Space [m]')
plt.show()
```
| github_jupyter |
```
import roocs_utils
dir(roocs_utils)
```
# Parameters
Parameters classes are used to parse inputs of collection, area, time and level used as arguments in the subsetting operation
The area values can be input as:
* A string of comma separated values: “0.,49.,10.,65”
* A sequence of strings: (“0”, “-10”, “120”, “40”)
* A sequence of numbers: [0, 49.5, 10, 65]
```
area = roocs_utils.AreaParameter("0.,49.,10.,65")
# the lat/lon bounds can be returned in a dictionary
print(area.asdict())
# the values can be returned as a tuple
print(area.tuple)
```
A collection can be input as
* A string of comma separated values: “cmip5.output1.INM.inmcm4.rcp45.mon.ocean.Omon.r1i1p1.latest.zostoga,cmip5.output1.MPI-M.MPI-ESM-LR.rcp45.mon.ocean.Omon.r1i1p1.latest.zostoga”
* A sequence of strings: e.g. (“cmip5.output1.INM.inmcm4.rcp45.mon.ocean.Omon.r1i1p1.latest.zostoga”,“cmip5.output1.MPI-M.MPI-ESM-LR.rcp45.mon.ocean.Omon.r1i1p1.latest.zostoga”)
```
collection = roocs_utils.CollectionParameter("cmip5.output1.INM.inmcm4.rcp45.mon.ocean.Omon.r1i1p1.latest.zostoga,cmip5.output1.MPI-M.MPI-ESM-LR.rcp45.mon.ocean.Omon.r1i1p1.latest.zostoga")
# the collection ids can be returned as a tuple
print(collection.tuple)
```
Level can be input as:
* A string of slash separated values: “1000/2000”
* A sequence of strings: e.g. (“1000.50”, “2000.60”) A sequence of numbers: e.g. (1000.50, 2000.60)
Level inputs should be a range of the levels you want to subset over
```
level = roocs_utils.LevelParameter((1000.50, 2000.60))
# the first and last level in the range provided can be returned in a dictionary
print(level.asdict())
# the values can be returned as a tuple
print(level.tuple)
```
Time can be input as:
* A string of slash separated values: “2085-01-01T12:00:00Z/2120-12-30T12:00:00Z”
* A sequence of strings: e.g. (“2085-01-01T12:00:00Z”, “2120-12-30T12:00:00Z”)
Time inputs should be the start and end of the time range you want to subset over
```
time = roocs_utils.TimeParameter("2085-01-01T12:00:00Z/2120-12-30T12:00:00Z")
# the first and last time in the range provided can be returned in a dictionary
print(time.asdict())
# the values can be returned as a tuple
print(time.tuple)
```
Parameterise parameterises inputs to instances of parameter classes which allows them to be used throughout roocs.
```
roocs_utils.parameter.parameterise("cmip5.output1.INM.inmcm4.rcp45.mon.ocean.Omon.r1i1p1.latest.zostoga", "0.,49.,10.,65", (1000.50, 2000.60), "2085-01-01T12:00:00Z/2120-12-30T12:00:00Z")
```
# Xarray utils
Xarray utils can bu used to identify the main variable in a dataset as well as idnetifying the type of a coordinate or returning a coordinate based on an attribute or a type
```
from roocs_utils.xarray_utils import xarray_utils as xu
import xarray as xr
ds = xr.open_mfdataset("../tests/mini-esgf-data/test_data/badc/cmip5/data/cmip5/output1/MOHC/HadGEM2-ES/rcp85/mon/atmos/Amon/r1i1p1/latest/tas/*.nc", use_cftime=True, combine="by_coords")
# find the main variable of the dataset
main_var = xu.get_main_variable(ds)
print("main var =", main_var)
ds[main_var]
# to get the coord types
for coord in ds.coords:
print("\ncoord name =", coord, "\ncoord type =", xu.get_coord_type(ds[coord]))
print("\n There is a level, time, latitude and longitude coordinate in this dataset")
# to check the type of a coord
print(xu.is_level(ds["height"]))
print(xu.is_latitude(ds["lon"]))
# to find a coordinate of a specific type
print("time =", xu.get_coord_by_type(ds, "time"))
# to find the level coordinate,set ignore_aux_coords to False
print("\nlevel =", xu.get_coord_by_type(ds, "level", ignore_aux_coords=False))
# to find a coordinate based on an attribute you expect it to have
xu.get_coord_by_attr(ds, "standard_name", "latitude")
```
# Other utilities
Other utilities allow parsing a memory size of any unit into bytes and converting a time object into an ISO 8601 string
```
from roocs_utils.utils.common import parse_size
from roocs_utils.utils.time_utils import to_isoformat
from datetime import datetime
# to parse a size into bytes
size = '50MiB'
size_in_b = parse_size(size)
size_in_b
# to convert a time object into a time string
time = datetime(2005, 7, 14, 12, 30)
time_str = to_isoformat(time)
time_str
```
| github_jupyter |
```
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#export
from fastai.torch_basics import *
from fastai.data.all import *
#hide
from nbdev.showdoc import *
#default_exp text.core
#default_cls_lvl 3
```
# Text core
> Basic function to preprocess text before assembling it in a `DataLoaders`.
```
#export
import spacy,html
from spacy.symbols import ORTH
```
## Preprocessing rules
The following are rules applied to texts before or after it's tokenized.
```
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
```
It starts replacing at 3 repetitions of the same character or more.
```
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurrence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
```
It starts replacing at 3 repetitions of the same word or more.
```
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in Sentence Case by their lower version and add `TK_MAJ` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
```
## Tokenizing
A tokenizer is a class that must implement `__call__`. This method receives a iterator of texts and must return a generator with their tokenized versions. Here is the most basic example:
```
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
test_eq(tok(["This is a text"]), [["This", "is", "a", "text"]])
tok = BaseTokenizer('x')
test_eq(tok(["This is a text"]), [["This is a te", "t"]])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in self.special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(map(str,items), batch_size=self.buf_sz))
#export
WordTokenizer = SpacyTokenizer
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp,inp])), [exp,exp])
#export
class TokenizeWithRules:
"A wrapper around `tok` which applies `rules`, then tokenizes, then applies `post_rules`"
def __init__(self, tok, rules=None, post_rules=None):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeWithRules(BaseTokenizer(),rules=[replace_all_caps])
test_eq(f(["THIS isn't a problem"]), [[TK_UP, 'this', "isn't", 'a', 'problem']])
f = TokenizeWithRules(SpacyTokenizer())
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeWithRules(BaseTokenizer(split_char="'"), rules=[])
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
```
The main function that will be called during one of the processes handling tokenization. It will iterate through the `batch` of texts, apply them `rules` and tokenize them.
```
texts = ["this is a text", "this is another text"]
tok = TokenizeWithRules(BaseTokenizer(), texts.__getitem__)
test_eq(tok([0,1]), [['this', 'is', 'a', 'text'],['this', 'is', 'another', 'text']])
#export
@delegates(TokenizeWithRules)
def tokenize1(text, tok, **kwargs):
"Call `TokenizeWithRules` with a single text"
return first(TokenizeWithRules(tok=tok, **kwargs)([text]))
test_eq(tokenize1("This isn't a problem", SpacyTokenizer()),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", tok=BaseTokenizer(), rules=[]),
['This',"isn't",'a','problem'])
#export
def parallel_tokenize(items, tok=None, rules=None, n_workers=defaults.cpus, **kwargs):
"Calls optional `setup` on `tok` before launching `TokenizeWithRules` using `parallel_gen"
if tok is None: tok = WordTokenizer()
if hasattr(tok, 'setup'): tok.setup(items, rules)
return parallel_gen(TokenizeWithRules, items, tok=tok, rules=rules, n_workers=n_workers, **kwargs)
```
Note that since this uses `parallel_gen` behind the scenes, the generator returned contains tuples of indices and results. There is no guarantee that the results are returned in order, so you should sort by the first item of the tuples (the indices) if you need them ordered.
```
res = parallel_tokenize(['0 1', '1 2'], rules=[], n_workers=2)
idxs,toks = zip(*L(res).sorted(itemgetter(0)))
test_eq(toks, [['0','1'],['1','2']])
#hide
res1 = parallel_tokenize(['0 1', '1 2'], tok=BaseTokenizer(), rules=[], n_workers=0)
idxs1,toks1 = zip(*L(res1).sorted(itemgetter(0)))
test_eq(toks, toks1)
```
### Tokenize texts in files
Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`). This directory is the return value.
```
#export
fn_counter_pkl = 'counter.pkl'
fn_lengths_pkl = 'lengths.pkl'
#export
def _tokenize_files(func, files, path, output_dir=None, output_names=None, n_workers=defaults.cpus, rules=None, tok=None,
encoding='utf8', skip_if_exists=False):
"Tokenize text `files` in parallel using `n_workers`"
if tok is None: tok = WordTokenizer()
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
if skip_if_exists and output_dir.exists(): return output_dir
output_dir.mkdir(exist_ok=True)
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
rules = partial(Path.read_text, encoding=encoding) + L(ifnone(rules, defaults.text_proc_rules.copy()))
lengths,counter = {},Counter()
for i,tok in parallel_tokenize(files, tok, rules, n_workers=n_workers):
out = func(i,output_dir)
out.mk_write(' '.join(tok), encoding=encoding)
lengths[str(files[i].relative_to(path))] = len(tok)
counter.update(tok)
save_pickle(output_dir/fn_lengths_pkl, lengths)
save_pickle(output_dir/fn_counter_pkl, counter)
return output_dir
#export
@delegates(_tokenize_files)
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, skip_if_exists=True, **kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
files = get_files(path, extensions=extensions, recurse=True, folders=folders)
def _f(i,output_dir): return output_dir/files[i].relative_to(path)
return _tokenize_files(_f, files, path, skip_if_exists=skip_if_exists, **kwargs)
```
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.
`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
```
#export
@delegates(_tokenize_files)
def tokenize_files(files, path, output_dir, output_names=None, **kwargs):
"Tokenize text `files` in parallel using `n_workers`"
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
def _f(i,output_dir): return output_dir/output_names[i]
return _tokenize_files(_f, files, path, output_dir=output_dir, **kwargs)
```
### Tokenize texts in a dataframe
```
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#hide
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'text1': texts}, columns=['text', 'text1'])
col = _join_texts(df, mark_fields=True)
for i in range(len(df)):
test_eq(col[i], f'{FLD} 1 This is an example of text {i} {FLD} 2 This is an example of text {i}')
#export
def tokenize_texts(texts, n_workers=defaults.cpus, rules=None, tok=None):
"Tokenize `texts` in parallel using `n_workers`"
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
outputs = L(parallel_tokenize(texts, tok=tok, rules=rules, n_workers=n_workers)
).sorted().itemgot(1)
return outputs
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok=None, tok_text_col="text"):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers` and stores them in `df[tok_text_col]`"
text_cols = [df.columns[c] if isinstance(c, int) else c for c in L(text_cols)]
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok, rules, n_workers=n_workers)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res[tok_text_col] = outputs
res[f'{tok_text_col}_length'] = [len(o) for o in outputs]
return res,Counter(outputs.concat())
```
This function returns a new dataframe with the same non-text columns, a column named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all seen words to quickly build a vocabulary afterward.
`rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markers followed by the number of the field.
```
#export
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok=None, header='infer', chunksize=50000):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok=tok)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
save_pickle(outname.with_suffix('.pkl'), cnt)
#export
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = tuple(out[txt_col].str.split(' '))
return out,load_pickle(fname.with_suffix('.pkl'))
```
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.
`rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markers followed by the number of the field.
The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independently and saved in the output file to save memory usage.
```
def _prepare_texts(tmp_d):
"Prepare texts in a folder struct in tmp_d, a csv file and returns a dataframe"
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
texts = [f"This is an example of text {d} {i}" for i in range(5) for d in ['a', 'b', 'c']]
df = pd.DataFrame({'text': texts, 'label': list(range(15))}, columns=['text', 'label'])
csv_fname = tmp_d/'input.csv'
df.to_csv(csv_fname, index=False)
return path,df,csv_fname
#hide
# integration test
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
#Tokenize as folders
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
for d in ['a', 'b', 'c']:
p = outp/d
for i in range(5):
test_eq((p/f'text{i}.txt').read_text(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
cnt_a = load_pickle(outp/fn_counter_pkl)
test_eq(cnt_a['this'], 15)
test_eq(cnt_a['a'], 5)
test_eq(cnt_a['0'], 3)
#Tokenize as files
files = get_text_files(path)
tokenize_files(files, path, output_dir=path/'d')
for f in files:
test_eq((path/'d'/f.relative_to(path)).read_text(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', f.parent.name, f.name[4]]))
#Tokenize as individual texts
out = tokenize_texts(df['text'].values)
test_eq(out, [(outp/d/f'text{i}.txt').read_text().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
#Tokenize as a dataframe
out,cnt_b = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_length'])
test_eq(out['label'].values, df['label'].values)
test_eq(list(out['text']), [(outp/d/f'text{i}.txt').read_text().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
test_eq(cnt_a, cnt_b)
#Tokenize as a csv
out_fname = Path(tmp_d)/'output.csv'
tokenize_csv(csv_fname, text_cols='text', outname=out_fname)
a,b = load_tokenized_csv(out_fname)
test_eq((out,cnt_b), load_tokenized_csv(out_fname))
```
## `Tokenizer`-
```
#export
class Tokenizer(Transform):
"Provides a consistent `Transform` interface to tokenizers operating on `DataFrame`s and folders"
input_types = (str, list, L, tuple, Path)
def __init__(self, tok, rules=None, counter=None, lengths=None, mode=None, sep=' '):
if isinstance(tok,type): tok=tok()
store_attr('tok,counter,lengths,mode,sep')
self.rules = defaults.text_proc_rules if rules is None else rules
@classmethod
@delegates(tokenize_df, keep=True)
def from_df(cls, text_cols, tok=None, rules=None, sep=' ', **kwargs):
if tok is None: tok = WordTokenizer()
res = cls(tok, rules=rules, mode='df')
res.kwargs,res.train_setup = merge({'tok': tok}, kwargs),False
res.text_cols,res.sep = text_cols,sep
return res
@classmethod
@delegates(tokenize_folder, keep=True)
def from_folder(cls, path, tok=None, rules=None, **kwargs):
path = Path(path)
if tok is None: tok = WordTokenizer()
output_dir = tokenize_folder(path, tok=tok, rules=rules, **kwargs)
res = cls(tok, counter=load_pickle(output_dir/fn_counter_pkl),
lengths=load_pickle(output_dir/fn_lengths_pkl), rules=rules, mode='folder')
res.path,res.output_dir = path,output_dir
return res
def setups(self, dsets):
if not self.mode == 'df' or not isinstance(dsets.items, pd.DataFrame): return
dsets.items,count = tokenize_df(dsets.items, self.text_cols, rules=self.rules, **self.kwargs)
if self.counter is None: self.counter = count
return dsets
def encodes(self, o:Path):
if self.mode=='folder' and str(o).startswith(str(self.path)):
tok = self.output_dir/o.relative_to(self.path)
return L(tok.read_text(encoding='UTF-8').split(' '))
else: return self._tokenize1(o.read_text())
def encodes(self, o:str): return self._tokenize1(o)
def _tokenize1(self, o): return first(self.tok([compose(*self.rules)(o)]))
def get_lengths(self, items):
if self.lengths is None: return None
if self.mode == 'df':
if isinstance(items, pd.DataFrame) and 'text_lengths' in items.columns: return items['text_length'].values
if self.mode == 'folder':
try:
res = [self.lengths[str(Path(i).relative_to(self.path))] for i in items]
if len(res) == len(items): return res
except: return None
def decodes(self, o): return TitledStr(self.sep.join(o))
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
items = get_text_files(path)
splits = RandomSplitter()(items)
dsets = Datasets(items, [Tokenizer.from_folder(path)], splits=splits)
print(dsets.train[0])
dsets = Datasets(df, [Tokenizer.from_df('text')], splits=splits)
print(dsets.train[0][0].text)
tst = test_set(dsets, ['This is a test', 'this is another test'])
test_eq(tst, [(['xxbos', 'xxmaj', 'this','is','a','test'],),
(['xxbos','this','is','another','test'],)])
```
## Sentencepiece
```
#export
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"SentencePiece tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece!=0.1.90,!=0.1.91`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return max(res,29)
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1 --minloglevel=2",
f"--user_defined_symbols={','.join(spec_tokens)} --hard_vocab_limit=false"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules=None):
from sentencepiece import SentencePieceProcessor
if rules is None: rules = []
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
sp_model = self.train(raw_text_path)
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
return {'sp_model': sp_model}
def __call__(self, items):
if self.tok is None: self.setup(items)
for t in items: yield self.tok.EncodeAsPieces(t)
#export
SubwordTokenizer = SentencePieceTokenizer
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok=SentencePieceTokenizer(vocab_sz=34), n_workers=1)
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
items = get_text_files(path)
splits = RandomSplitter()(items)
tok = SentencePieceTokenizer(special_toks=[])
dsets = Datasets(items, [Tokenizer.from_folder(path, tok=tok)], splits=splits)
print(dsets.train[0][0])
with warnings.catch_warnings():
dsets = Datasets(df, [Tokenizer.from_df('text', tok=tok)], splits=splits)
print(dsets.train[0][0].text)
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
Kennitala strings can be converted to the following formats via the `output_format` parameter:
* `compact`: only number strings without any seperators or whitespace, like "1201743399"
* `standard`: Kennitala strings with proper whitespace in the proper places, like "120174-3399"
Invalid parsing is handled with the `errors` parameter:
* `coerce` (default): invalid parsing will be set to NaN
* `ignore`: invalid parsing will return the input
* `raise`: invalid parsing will raise an exception
The following sections demonstrate the functionality of `clean_is_kennitala()` and `validate_is_kennitala()`.
### An example dataset containing Kennitala strings
```
import pandas as pd
import numpy as np
df = pd.DataFrame(
{
"kennitala": [
"1201743399",
"320174-3399",
"51824753556",
"51 824 753 556",
"hello",
np.nan,
"NULL"
],
"address": [
"123 Pine Ave.",
"main st",
"1234 west main heights 57033",
"apt 1 789 s maple rd manhattan",
"robie house, 789 north main street",
"(staples center) 1111 S Figueroa St, Los Angeles",
"hello",
]
}
)
df
```
## 1. Default `clean_is_kennitala`
By default, `clean_is_kennitala` will clean kennitala strings and output them in the standard format with proper separators.
```
from dataprep.clean import clean_is_kennitala
clean_is_kennitala(df, column = "kennitala")
```
## 2. Output formats
This section demonstrates the output parameter.
### `standard` (default)
```
clean_is_kennitala(df, column = "kennitala", output_format="standard")
```
### `compact`
```
clean_is_kennitala(df, column = "kennitala", output_format="compact")
```
## 3. `inplace` parameter
This deletes the given column from the returned DataFrame.
A new column containing cleaned Kennitala strings is added with a title in the format `"{original title}_clean"`.
```
clean_is_kennitala(df, column="kennitala", inplace=True)
```
## 4. `errors` parameter
### `coerce` (default)
```
clean_is_kennitala(df, "kennitala", errors="coerce")
```
### `ignore`
```
clean_is_kennitala(df, "kennitala", errors="ignore")
```
## 4. `validate_is_kennitala()`
`validate_is_kennitala()` returns `True` when the input is a valid Kennitala. Otherwise it returns `False`.
The input of `validate_is_kennitala()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame.
When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated.
When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_is_kennitala()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_is_kennitala()` returns the validation result for the whole DataFrame.
```
from dataprep.clean import validate_is_kennitala
print(validate_is_kennitala("1201743399"))
print(validate_is_kennitala("320174-3399"))
print(validate_is_kennitala("51824753556"))
print(validate_is_kennitala("51 824 753 556"))
print(validate_is_kennitala("hello"))
print(validate_is_kennitala(np.nan))
print(validate_is_kennitala("NULL"))
```
### Series
```
validate_is_kennitala(df["kennitala"])
```
### DataFrame + Specify Column
```
validate_is_kennitala(df, column="kennitala")
```
### Only DataFrame
```
validate_is_kennitala(df)
```
| github_jupyter |
# ibm_db.active()
## Purpose:
Determine whether the Db2 server or database connection specified is active.
## Syntax:
`bool ibm_db.active( IBM_DBConnection `*`connection`*` )`
## Parameters:
* __*connection*__ : A valid Db2 server or database connection.
## Return values:
* `True` : The Db2 server or database connection specified is active.
* `False` : The Db2 server or database connection specified is not active.
## Description:
The __ibm_db.active()__ API is used to determine if a Db2 server or database connection that was established using the __ibm_db.connect()__ API or __ibm_db.pconnect()__ is active.<p>
## Example:
```
#----------------------------------------------------------------------------------------------#
# NAME: ibm_db-active.py #
# #
# PURPOSE: This program is designed to illustrate how to use the ibm_db.active() API. #
# #
#----------------------------------------------------------------------------------------------#
# DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY #
# #
# (C) COPYRIGHT International Business Machines Corp. 2018, 2019 All Rights Reserved #
# Licensed Materials - Property of IBM #
# #
# US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA #
# ADP Schedule Contract with IBM Corp. #
# #
# The following source code ("Sample") is owned by International Business Machines #
# Corporation ("IBM") or one of its subsidiaries and is copyrighted and licensed, not sold. #
# You may use, copy, modify, and distribute the Sample in any form without payment to IBM, #
# for the purpose of assisting you in the creation of Python applications using the ibm_db #
# library. #
# #
# The Sample code is provided to you on an "AS IS" basis, without warranty of any kind. IBM #
# HEREBY EXPRESSLY DISCLAIMS ALL WARRANTIES, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. #
# Some jurisdictions do not allow for the exclusion or limitation of implied warranties, so #
# the above limitations or exclusions may not apply to you. IBM shall not be liable for any #
# damages you suffer as a result of using, copying, modifying or distributing the Sample, #
# even if IBM has been advised of the possibility of such damages. #
#----------------------------------------------------------------------------------------------#
# Load The Appropriate Python Modules
import sys # Provides Information About Python Interpreter Constants And Functions
import ibm_db # Contains The APIs Needed To Work With Db2 Databases
#----------------------------------------------------------------------------------------------#
# Import The Db2ConnectionMgr Class Definition, Attributes, And Methods That Have Been Defined #
# In The File Named "ibm_db_tools.py"; This Class Contains The Programming Logic Needed To #
# Establish And Terminate A Connection To A Db2 Server Or Database #
#----------------------------------------------------------------------------------------------#
from ibm_db_tools import Db2ConnectionMgr
#----------------------------------------------------------------------------------------------#
# Import The ipynb_exit Class Definition, Attributes, And Methods That Have Been Defined In #
# The File Named "ipynb_exit.py"; This Class Contains The Programming Logic Needed To Allow #
# "exit()" Functionality To Work Without Raising An Error Or Stopping The Kernel If The #
# Application Is Invoked In A Jupyter Notebook #
#----------------------------------------------------------------------------------------------#
from ipynb_exit import exit
# Define And Initialize The Appropriate Variables
dbName = "SAMPLE"
userID = "db2inst1"
passWord = "Passw0rd"
dbConnection = None
# Create An Instance Of The Db2ConnectionMgr Class And Use It To Connect To A Db2 Database
conn = Db2ConnectionMgr('DB', dbName, '', '', userID, passWord)
conn.openConnection()
if conn.returnCode is True:
dbConnection = conn.connectionID
else:
conn.closeConnection()
exit(-1)
# Determine Whether The Current Database Connection Is Active Or Inactive
try:
connState = ibm_db.active(dbConnection)
except Exception:
pass
# Display A Status Message That Shows The Current Connection State
print("The connection to the " + dbName + " database is ", end="")
if connState is True:
print("ACTIVE.\n")
elif connState is False:
print("NOT ACTIVE.\n")
# Close The Database Connection That Was Opened Earlier
conn.closeConnection()
# Return Control To The Operating System
exit()
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
from sympy import Symbol, integrate
%matplotlib inline
```
### Smooth local paths
We will use cubic spirals to generate smooth local paths. Without loss of generality, as $\theta$ smoothly changes from 0 to 1, we impose a condition on the curvature as follows
$\kappa = f'(x) = K(x(1-x))^n $
This ensures curvature vanishes at the beginning and end of the path. Integrating, the yaw changes as
$\theta = \int_0^x f'(x')dx'$
With $n = 1$ we get a cubic spiral, $n=2$ we get a quintic spiral and so on. Let us use the sympy package to find the family of spirals
1. Declare $x$ a Symbol
2. You want to find Integral of $f'(x)$
3. You can choose $K$ so that all coefficients are integers
Verify if $\theta(0) = 0$ and $\theta(1) = 1$
```
K = 30 #choose for cubic/quintic
n = 2 #choose for cubic (n=1)/ quintic (n=2)
x = Symbol('x') #declare as Symbol
print(integrate(K*(x*(1-x))**n, x)) # complete the expression
# Return a cubic curve equation
def cubic_equation(K=6):
x = Symbol('x')
n = 1
return integrate(K*(x * (1-x))**n, x)
# Return a quintic curve equation
def quintic_equation(K=30):
x = Symbol('x')
n = 2
return integrate(K * (x * (1-x))**n, x)
print(f"Cubic equation: {cubic_equation(6)}")
print(f"Quintic equation: {quintic_equation(30)}")
```
Another way of doing this is through matrice multiplication
**Cubic Equations**
$f(x) = a x^3 +b x^2 + c x + d$
$f'(x) = 3ax^2 + 2bx + c$
Subjected to constraints $f(0) = 0$, $f(1) = 1$, $f'(0) = 0$ and $f'(1) = 0$
**Quintic Equations**
$f(x) = a x^5 +b x^4 + c x^3 + d x^2 + e x + f$
$f'(x) = 5ax^4 + 4bx^3 + 3cx^2 + 2dx + e$
$f''(x) = 20ax^3 + 12bx^2 + 6cx + 2d$
Subjected to constraints $f(0) = 0$, $f(1) = 1$, $f'(0) = 0$, $f'(1) = 0$, $f''(0) = 0$ and $f''(1) = 0$
```
# Cubic coefficients
C = np.array([
[0, 0, 0, 1],
[1, 1, 1, 1],
[0, 0, 1, 0],
[3, 2, 1, 0]
])
B = np.array([0, 1, 0, 0]).reshape((-1, 1))
cubic_coeffs = np.linalg.inv(C) @ B
print(f"Cubic coefficients [a,b,c,d]: {cubic_coeffs.reshape((-1))}")
# Quintic coefficients
C = np.array([
[0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0],
[5, 4, 3, 2, 1, 0],
[0, 0, 0, 2, 0, 0],
[20, 12, 6, 2, 0, 0]
])
B = np.array([0, 1, 0, 0, 0, 0]).reshape((-1, 1))
quintic_coeffs = np.linalg.inv(C) @ B
print(f"Quintic coefficients [a,b,c,d,e,f]: {quintic_coeffs.reshape((-1))}")
```
Now plot these equations
```
x = np.linspace(0, 1, num=100)
# Note: `num` controlls how many points
thetas = -2*x**3 + 3*x**2
plt.figure()
plt.plot(x, thetas,'.', label="Cubic")
thetas = 6*x**5 - 15*x**4 + 10*x**3
plt.plot(x, thetas,'.', label="Quintic")
plt.legend()
plt.show()
#input can be any theta_i and theta_f (not just 0 and 1)
def cubic_spiral(theta_i, theta_f, n=10):
x = np.linspace(0, 1, num=n)
#-2*x**3 + 3*x**2 -> Scale and add offset (min)
return (theta_f-theta_i)*(-2*x**3 + 3*x**2) + theta_i
def quintic_spiral(theta_i, theta_f, n=10):
x = np.linspace(0, 1, num=n)
#6*x**5 - 15*x**4 + 10*x**3 -> Scale and add offset (min)
return (theta_f-theta_i)*(6*x**5 - 15*x**4 + 10*x**3) + theta_i
```
### Plotting
Plot cubic, quintic spirals along with how $\theta$ will change from $\pi/2$ to $0$ when moving in a circular arc. Remember circular arc is when $\omega $ is constant
```
num_pts = 100
theta_i = np.pi/2
theta_f = 0
# Get the points
theta_circle = (theta_f - theta_i) * np.linspace(0, 1, num=num_pts) + theta_i
theta_cubic = cubic_spiral(theta_i, theta_f, n=num_pts)
theta_quintic = quintic_spiral(theta_i, theta_f, n=num_pts)
# Make the plots (data inline)
plt.figure()
plt.plot(theta_circle, label='Circular') # Theta -> Linear
plt.plot(theta_cubic, label='Cubic')
plt.plot(theta_quintic,label='Quintic')
plt.grid()
plt.legend()
```
## Trajectory
Using the spirals, convert them to trajectories $\{(x_i,y_i,\theta_i)\}$. Remember the unicycle model
$dx = v\cos \theta dt$
$dy = v\sin \theta dt$
$\theta$ is given by the spiral functions you just wrote. Use cumsum() in numpy to calculate {(x_i, y_i)}
What happens when you change $v$?
```
v = 1
dt = 0.02
# Create a function to return points
def ret_spirals(theta_i, theta_f, num_pts):
# Get the points
theta_circle = (theta_f - theta_i) * np.linspace(0, 1, num=num_pts) + theta_i
theta_cubic = cubic_spiral(theta_i, theta_f, n=num_pts)
theta_quintic = quintic_spiral(theta_i, theta_f, n=num_pts)
#cubic
x_cubic = np.cumsum(v*np.cos(theta_cubic)*dt)
y_cubic = np.cumsum(v*np.sin(theta_cubic)*dt)
cubic_pts = {"x": x_cubic, "y": y_cubic}
#Quintic
x_quintic = np.cumsum(v*np.cos(theta_quintic)*dt)
y_quintic = np.cumsum(v*np.sin(theta_quintic)*dt)
quintic_pts = {"x": x_quintic, "y": y_quintic}
#Circular
x_circle = np.cumsum(v*np.cos(theta_circle)*dt)
y_circle = np.cumsum(v*np.sin(theta_circle)*dt)
circular_pts = {"x": x_circle, "y": y_circle}
return cubic_pts, quintic_pts, circular_pts
num_pts = int(v/dt)
# plot trajectories for circular/ cubic/ quintic for left and right turns
plt.figure()
plt.subplot(1,2,1) # Left turn -> np.pi/2 to np.pi
plt.axis('equal')
plt.title("Left turn")
cubic_pts, quintic_pts, circular_pts = ret_spirals(np.pi/2, np.pi, num_pts)
plt.plot(circular_pts["x"], circular_pts["y"], label='Circular')
plt.plot(cubic_pts["x"], cubic_pts["y"], label='Cubic')
plt.plot(quintic_pts["x"], quintic_pts["y"], label='Quintic')
plt.legend()
plt.grid()
plt.subplot(1,2,2) # Right turn -> np.pi/2 to 0
plt.axis('equal')
plt.title("Right turn")
cubic_pts, quintic_pts, circular_pts = ret_spirals(np.pi/2, 0, num_pts)
plt.plot(circular_pts["x"], circular_pts["y"], label='Circular')
plt.plot(cubic_pts["x"], cubic_pts["y"], label='Cubic')
plt.plot(quintic_pts["x"], quintic_pts["y"], label='Quintic')
plt.legend()
plt.grid()
```
## Symmetric poses
We have been doing only examples with $|\theta_i - \theta_f| = \pi/2$.
What about other orientation changes? Given below is an array of terminal angles (they are in degrees!). Start from 0 deg and plot the family of trajectories
```
dt = 0.1
thetas = np.deg2rad([15, 30, 45, 60, 90, 120, 150, 180]) #convert to radians
plt.figure()
for tf in thetas:
t = cubic_spiral(0, tf,50)
x = np.cumsum(np.cos(t)*dt)
y = np.cumsum(np.sin(t)*dt)
plt.plot(x, y, label=f"{np.rad2deg(tf):.0f}")
# On the same plot, move from 180 to 180 - theta
thetas = np.pi - np.deg2rad([15, 30, 45, 60, 90, 120, 150, 180])
for tf in thetas:
t = cubic_spiral(np.pi, tf, 50)
x = np.cumsum(np.cos(t)*dt)
y = np.cumsum(np.sin(t)*dt)
plt.plot(x, y, label=f"{np.rad2deg(tf):.0f}")
plt.grid()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.show()
```
Modify your code to print the following for the positive terminal angles $\{\theta_f\}$
1. Final x, y position in corresponding trajectory: $x_f, y_f$
2. $\frac{y_f}{x_f}$ and $\tan \frac{\theta_f}{2}$
What do you notice?
What happens when $v$ is doubled?
```
dt = 0.05
v = 2.0
thetas = np.deg2rad([15, 30, 45, 60, 90, 120, 150, 180]) #convert to radians
for tf in thetas:
t = cubic_spiral(0, tf,100)
x = np.cumsum(v*np.cos(t)*dt)
y = np.cumsum(v*np.sin(t)*dt)
print(f"tf:{np.rad2deg(tf):0.1f} xf:{x[-1]:0.3f} yf:{y[-1]:0.3f} yf/xf:{y[-1]/x[-1]:0.3f} tan(theta/2):{np.tan(tf/2):0.3f}")
```
These are called *symmetric poses*. With this spiral-fitting approach, only symmetric poses can be reached.
In order to move between any 2 arbitrary poses, you will have to find an intermediate pose that is pair-wise symmetric to the start and the end pose.
What should be the intermediate pose? There are infinite possibilities. We would have to formulate it as an optimization problem. As they say, that has to be left for another time!
```
```
| github_jupyter |
# Instructions:
## Please save a copy of this notebook to your google drive and answer the questions below.
## Once completed please submit your notebook to the following [GitHub Repo](https://github.com/7-gate-academy-ml-program/Synopsis)
### Name: Kohei Suzuki
# 1.) What is the difference between Classification and Regression?
### Classification tasks
The basic idea of classification tasks is to predict labels of given data. So basically, the outputs of classification tasks are discrete values which are corresponding to each class.
As an example of common loss functions for classification tasks: **Cross Entropy Loss**
<br>
<br>
$$
CrossEntropyLoss = -\sum_{i=1}^{n}(y_i log(\hat y_i) + (1-y_i)log(1-\hat y_i))
$$
Where
- $n$ is the number of data points
- $y_i$ is the actual value of i th data point
- $\hat y_i$ is the predicted value for i th data point
### Regression tasks
The main focus of regression tasks is function approximation. So basically, the outputs of regression tasks are continuous values.
As an example of common loss functions for regression tasks: **Mean Square Error**
<br>
<br>
$$
MSE = \frac{\sum_{i=1}^{n}(y_i - \hat y_i)^2}{n}
$$
Where
- $n$ is the number of data points
- $y_i$ is the actual value of i th data point
- $\hat y_i$ is the predicted value for i th data point
# 2.) What is the Curse of Dimensionality?
To explain what the Curse of Demensionality is, suppose we have 3 features in our training set, and each feature has 5 kinds of categories.
The table below is showing how feature spece grows up as the number of features increases.
|The number of features| Total feature space |
|-|-|
|1| 5 |
|2|25|
|3|125|
**As the number of features increases, the more total feature space grows exponentially.**
### How do we tackle the Curse of Demensionality then?
1. Feature selection
The idea of feature selection is to take a look at each feature we have in our training set and get rid of some features that we think less necessity than others.
<br><br>
2. Feature creation
The idea of feature creation is to combine some features in our training set which are similar or sort of related to each other into one feature.
# 3.) What is Cross Validation?
Using K-Folds Cross Validation generally results in a less biased model compare to other methods. Because every observation in our training set appears to both of training and test set.
As an example of K-Folds Cross Validation, I created a table which shows how the cross validation works.
I splited our training set into 5 chuncs, and `x` means that the chunc is being used as a training data and `o` means that the chunc is being used as a validation data.
So let's take a look at the table now.
|$\quad$|chunk1|chunk2|chunk3|chunk4|chunk5|
|-|-|-|-|-|-|
|Iteration1|o|x|x|x|x|
|Iteration2|x|o|x|x|x|
|Iteration3|x|x|o|x|x|
|Iteration4|x|x|x|o|x|
|Iteration5|x|x|x|x|o|
At the first iteration, we use `chunk2`, `chunk3`, `chunk4`, and `chunk5` as the training data, which means we train a model on them, and at the end of epoch (iteration1), we validate the model by using `chunk1` as validation data.
Then iterate the process until every chunk is used as the validation data.
# 4.) On a high level how do Decision Trees work?
Decision trees build classification or regression models in the form of a tree structure. And the idea of the Decision trees is that we want to narrow down possibilities.
The deeper the tree, the more complex the decision rules, and the fitter the model.
At the first, A decision tree tries to find the best attribute in our dataset to split the dataset and to create the top if-else node, which is called the root node.
After creating the node, it continues creating if-else nodes until it creates leaf nodes, which are the decisions that we should take.
# 5.) In regards to SVMs what is the Kernel Trick?
Kernel trick is used when given $N$ dimensional data points are not linearly separable.
So what we do in that case is to make the data points higher dimensions which may be able to linearly separatable.
we need not compute the exact transformation of our data, we just need the inner product of our data in that higher-dimensional space.
| github_jupyter |
## Dependencies
```
import json, glob
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts_aux import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras import layers
from tensorflow.keras.models import Model
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
input_base_path = '/kaggle/input/113-robertabase/'
with open(input_base_path + 'config.json') as json_file:
config = json.load(json_file)
config
# vocab_path = input_base_path + 'vocab.json'
# merges_path = input_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
vocab_path = base_path + 'roberta-base-vocab.json'
merges_path = base_path + 'roberta-base-merges.txt'
config['base_model_path'] = base_path + 'roberta-base-tf_model.h5'
config['config_path'] = base_path + 'roberta-base-config.json'
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = '\n')
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
```
# Pre process
```
test['text'].fillna('', inplace=True)
test['text'] = test['text'].apply(lambda x: x.lower())
test['text'] = test['text'].apply(lambda x: x.strip())
x_test, x_test_aux, x_test_aux_2 = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
x = layers.Dropout(.1)(last_hidden_state)
x_start = layers.Dense(1)(x)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Dense(1)(x)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
for model_path in model_path_list:
print(model_path)
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE']))
test_start_preds += test_preds[0]
test_end_preds += test_preds[1]
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
# Post-process
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test.loc[test['sentiment'] == 'neutral', 'selected_text'] = test["text"]
test.loc[test['text_wordCnt'] <= 3, 'selected_text'] = test["text"]
test["selected_text"] = test.apply(lambda x: ' '.join([word for word in x['selected_text'].split() if word in x['text'].split()]), axis=1)
test['selected_text'] = test.apply(lambda x: x['text'] if (x['selected_text'] == '') else x['selected_text'], axis=1)
test['selected_text'].fillna(test['text'], inplace=True)
```
# Visualize predictions
```
test['text_len'] = test['text'].apply(lambda x : len(x))
test['label_len'] = test['selected_text'].apply(lambda x : len(x))
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test['label_wordCnt'] = test['selected_text'].apply(lambda x : len(x.split(' ')))
test['text_tokenCnt'] = test['text'].apply(lambda x : len(tokenizer.encode(x).ids))
test['label_tokenCnt'] = test['selected_text'].apply(lambda x : len(tokenizer.encode(x).ids))
test['jaccard'] = test.apply(lambda x: jaccard(x['text'], x['selected_text']), axis=1)
display(test.head(10))
display(test.describe())
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test['selected_text']
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
<img src="http://xarray.pydata.org/en/stable/_static/dataset-diagram-logo.png" align="right" width="30%">
# Dask e Xarray para computação paralela
Este notebook demonstra um dos recursos mais poderosos do xarray: a capacidade
de trabalhar em sintonia com matrizes dask e facilmente permitir que os usuários executem o código de análise em paralelo.
Até o final deste notebook, veremos:
1. Que as estruturas de dados Xarray `DataArray` e `Dataset` são parte das coleções Dask, isso é, podemos executar as funções de alto nível Dask como `dask.visualize(xarray_object)`;
2. Que todas as operações integradas do xarray podem usar o dask de forma transparente;
3. Que o Xarray fornece ferramentas para paralelizar facilmente funções personalizadas em blocos de objetos xarray apoiados em dask.
## Conteúdo
1. [Lendo dados com Dask e Xarray](#Lendo-dados-com-Dask-e-Xarray)
2. [Computação paralela/streaming/lazy usando dask.array com Xarray](#Computação-paralela/streaming/lazy-usando-dask.array-com-Xarray)
3. [Paralelização automática com apply_ufunc e map_blocks](#Paralelização-automática-com-apply_ufunc-e-map_blocks)
Primeiro, vamos fazer as importações necessárias, iniciar um cluster dask e testar o painel
```
import expectexception
import numpy as np
import xarray as xr
```
Primeiro, vamos configurar um `LocalCluster` usando` dask.distributed`.
Você pode usar qualquer tipo de cluster dask. Esta etapa é completamente independente de
xarray.
```
from dask.distributed import Client
client = Client()
client
```
<p>👆</p> Clique no link Dashboard acima.
Vamos testar se o painel está funcionando.
```
import dask.array
dask.array.ones(
(1000, 4), chunks=(2, 1)
).compute() # devemos ver a atividade no painel
```
<a id='readwrite'></a>
## Lendo dados com Dask e Xarray
O argumento `chunks` para `open_dataset` e `open_mfdataset` permite que você leia conjuntos de dados como matrizes dask. Veja https://xarray.pydata.org/en/stable/dask.html#reading-and-writing-data para mais
detalhes.
```
ds = xr.tutorial.open_dataset(
"air_temperature",
chunks={
"lat": 25,
"lon": 25,
"time": -1,
}, # isso diz ao xarray para abrir o conjunto de dados como um array dask
)
ds
```
A representação para o DataArray `air` inclui agora também a representação dask.
```
ds.air
ds.air.chunks
```
**Dica**: Todas as variáveis em um `Dataset` _não_ necessariamente precisam ter o mesmo tamanho de blocos ao longo dimensões comuns.
```
mean = ds.air.mean("time") # nenhuma atividade no painel
mean # contém uma matriz dask
```
Isso é verdadeiro para todas as operações de xarray, incluindo *slicing*
```
ds.air.isel(lon=1, lat=20)
```
e operações mais complicadas...
<a id='compute'></a>
## Computação paralela/*streaming*/*lazy* usando dask.array com Xarray
O Xarray envolve o dask perfeitamente para que todos os cálculos sejam adiados até que explicitamente requeridos:
```
mean = ds.air.mean("time") # nenhuma atividade no painel
mean # contém uma matriz dask
```
Isso é verdadeiro para todas as operações de xarray, incluindo seleção em fatias
```
timeseries = (
ds.air.rolling(time=5).mean().isel(lon=1, lat=20)
) # nenhuma atividade no painel
timeseries # contém uma matriz dask
timeseries = ds.air.rolling(time=5).mean() # nenhuma atividade no painel
timeseries # contém uma matriz dask
```
### Obtendo valores concretos de arrays dask
Em algum ponto, você desejará realmente obter valores concretos do dask.
Existem duas maneiras de calcular valores em matrizes dask. Esses valores concretos são
geralmente matrizes NumPy, mas podem ser uma matriz `pydata/sparse`, por exemplo.
1. `.compute()` retorna um objeto xarray;
2. `.load()` substitui a matriz dask no objeto xarray por uma matriz numpy. Isso é equivalente a `ds = ds.compute()`.
```
computed = mean.compute() # atividade no painel
computed # contém agora valores reais NumPy
```
Observe que `mean` ainda contém uma matriz dask
```
mean
```
Mas se chamarmos `.load()`, `mean` agora conterá uma matriz numpy
```
mean.load()
```
Vamos verificar outra vez...
```
mean
```
**Dica:** `.persist()` carrega os valores na RAM distribuída. Isso é útil se
você usará repetidamente um conjunto de dados para computação, mas é muito grande para
carregar na memória local. Você verá uma tarefa persistente no painel.
Veja https://docs.dask.org/en/latest/api.html#dask.persist para mais detalhes.
### Extraindo dados subjacentes: `.values` vs` .data`
Existem duas maneiras de extrair os dados subjacentes em um objeto xarray.
1. `.values` sempre retornará uma matriz NumPy. Para objetos xarray apoiados em dask,
isso significa que compute sempre será chamado;
2. `.data` retornará uma matriz Dask.
#### Exercício
Tente extrair um array dask de `ds.air`.
```
# Seu código aqui
```
Agora extraia um array NumPy de `ds.air`. Você vê atividade de computação em seu
painel de controle?
## Estruturas de dados Xarray são coleções dask de primeira classe.
Isso significa que você pode fazer coisas como `dask.compute(xarray_object)`,
`dask.visualize(xarray_object)`, `dask.persist(xarray_object)`. Isso funciona para
DataArrays e Datasets.
#### Exercício
Visualize o gráfico de tarefas para `média`.
```
# Seu código aqui
```
Visualize o gráfico de tarefas para `mean.data`. É igual ao gráfico ao acima?
```
# Seu código aqui
```
## Paralelização automática com apply_ufunc e map_blocks
Quase todas as operações integradas do xarray funcionam em arrays Dask.
Às vezes, a análise exige funções que não estão na API do xarray (por exemplo, scipy).
Existem três maneiras de aplicar essas funções em paralelo em cada bloco de seu
objeto xarray:
1. Extraia arrays Dask de objetos xarray (`.data`) e use Dask diretamente, por exemplo,
(`Apply_gufunc`, `map_blocks`,` map_overlap` ou `blockwise`);
2. Use `xarray.apply_ufunc()` para aplicar funções que consomem e retornam matrizes NumPy;
3. Use `xarray.map_blocks()`, `Dataset.map_blocks()` ou `DataArray.map_blocks()` para aplicar funções que consomem e retornam objetos xarray.
O método que você usa depende basicamente do tipo de objetos de entrada esperados pela função que você está envolvendo e o nível de desempenho ou conveniência que você deseja.
### `map_blocks`
`map_blocks` é inspirado na função `dask.array` de mesmo nome e permite você mapear uma função em blocos do objeto xarray (incluindo Datasets).
No tempo de _computação_, sua função receberá um objeto Xarray com valores concretos
(calculados) junto com os metadados apropriados. Esta função deve retornar um objeto xarray.
Aqui está um exemplo:
```
def time_mean(obj):
# use a conveniente API do xarray aqui
# você pode converter para um dataframe do pandas e usar a API extensa do pandas
# ou use .plot() e plt.savefig para salvar visualizações em disco em paralelo.
return obj.mean("lat")
ds.map_blocks(time_mean) # isso é lazy!
# isto irá calcular os valores e devolverá True se o cálculo funcionar como esperado
ds.map_blocks(time_mean).identical(ds.mean("lat"))
```
#### Exercise
Tente aplicar a seguinte função com `map_blocks`. Especifique `scale` como um
argumento e `offset` como um kwarg.
A docstring pode ajudar:
https://xarray.pydata.org/en/stable/generated/xarray.map_blocks.html
```python
def time_mean_scaled(obj, scale, offset):
return obj.mean("lat") * scale + offset
```
#### Funções mais avançadas
`map_blocks` precisa saber _exatamente_ como o objeto retornado se parece.
A função faz isso passando um objeto xarray de formato "0" para a função e examinando o
resultado. Essa abordagem pode não funcionar em todos os casos. Para esses casos de uso avançados, `map_blocks` permite um kwarg` template`.
Veja
https://xarray.pydata.org/en/latest/dask.html#map-blocks para mais detalhes.
### apply_ufunc
`Apply_ufunc` é um wrapper mais avançado que é projetado para aplicar funções
que esperam e retornam NumPy (ou outras matrizes). Por exemplo, isso incluiria
toda a API do SciPy. Uma vez que `apply_ufunc` opera em NumPy ou objetos Dask, ele ignora a sobrecarga de usar objetos Xarray, tornando-o uma boa escolha para funções de desempenho crítico.
`Apply_ufunc` pode ser um pouco complicado de acertar, pois opera em um nível mais baixo
nível do que `map_blocks`. Por outro lado, o Xarray usa `apply_ufunc` internamente
para implementar muito de sua API, o que significa que é bastante poderoso!
### Um exemplo simples
Funções simples que atuam independentemente em cada valor devem funcionar sem qualquer
argumentos adicionais. No entanto, o manuseio do `dask` precisa ser explicitamente habilitado
```
%%expect_exception
squared_error = lambda x, y: (x - y) ** 2
xr.apply_ufunc(squared_error, ds.air, 1)
```
Existem duas opções para o kwarg `dask`:
1. `dask = "allowed"` (permitido): Arrays Dask são passados para a função do usuário. Essa é uma boa escolha se sua função pode lidar com arrays dask e não chamará compute explicitamente.
2. `dask = "paralelizado"` (paralelizado). Isso aplica a função do usuário sobre os blocos do dask array usando `dask.array.blockwise`. Isso é útil quando sua função não pode lidar com matrizes dask nativamente (por exemplo, API scipy).
Uma vez que `squared_error` pode lidar com arrays dask sem computá-los, especificamos
`dask = "permitido"`.
```
sqer = xr.apply_ufunc(
squared_error,
ds.air,
1,
dask="allowed",
)
sqer # DataArray apoiado por dask! com bons metadados!
```
### Um exemplo mais complicado com uma função compatível com dask
Para usar operações mais complexas que consideram alguns valores de matriz coletivamente,
é importante entender a ideia de **dimensões centrais** do NumPy ao generalizar ufuncs. As dimensões principais são definidas como dimensões que não devem ser
propagadas. Normalmente, eles correspondem às dimensões fundamentais sobre
as quais uma operação é definida, por exemplo, o eixo somado em `np.sum`. Uma boa pista sobre a necessidade de dimensões centrais é a presença de um argumento do `axis` na
função NumPy correspondente.
Com `apply_ufunc`, as dimensões principais são reconhecidas pelo nome e, em seguida, movidas para a última dimensão de quaisquer argumentos de entrada antes de aplicar a função fornecida.
Isso significa que para funções que aceitam um argumento de `axis`, você geralmente precisa para definir `axis = -1`.
Vamos usar `dask.array.mean` como um exemplo de uma função que pode lidar com o dask
arrays e usa um kwarg `axis`:
```
def time_mean(da):
return xr.apply_ufunc(
dask.array.mean,
da,
input_core_dims=[["time"]],
dask="allowed",
kwargs={"axis": -1}, # core dimensions are moved to the end
)
time_mean(ds.air)
ds.air.mean("time").identical(time_mean(ds.air))
```
### Paralelizando funções que desconhecem dask
Um recurso muito útil do `apply_ufunc` é a capacidade de aplicar funções arbitrárias
em paralelo a cada bloco. Esta habilidade pode ser ativada usando `dask = "parallelized"`. Novamente, o Xarray precisa de muitos metadados extras, dependendo da função, argumentos extras como `output_dtypes` e `output_sizes` podem ser necessários.
Usaremos `scipy.integrate.trapz` como um exemplo de uma função que não consegue
lidar com matrizes dask e requer uma dimensão central:
```
import scipy as sp
import scipy.integrate
sp.integrate.trapz(ds.air.data) # NÃO retorna uma matriz dask
```
#### Exercício
Use `apply_ufunc` para aplicar `sp.integrate.trapz` ao longo do eixo do `tempo` para que
você obtenha o retorno de um array dask. Você precisará especificar `dask = "parallelized"` e `output_dtypes` (uma lista de `dtypes` por variável retornada).
```
# Seu código aqui
```
## Veja mais detalhes
1. https://xarray.pydata.org/en/stable/examples/apply_ufunc_vectorize_1d.html#
2. https://docs.dask.org/en/latest/array-best-practices.html
| github_jupyter |
# 湾区自行车共享分析
说明:[点此查看此文档的英文版本](https://github.com/udacity/data-analyst/tree/master/projects/bike_sharing)
。
## 简介
> **提示**:诸如此类的引用部分可以为如何导航和使用 iPython notebook 提供有用说明。
湾区自行车共享系统([Bay Area Bike Share](http://www.bayareabikeshare.com/))是一家为旧金山、雷德伍德城、帕罗奥多、芒廷维尤和圣荷西的客户提供按需自行车租赁的公司。使用者可在每个城市的各种车站解锁自行车,然后在同城内的任何一个车站还车。使用者可通过按年订购或购买 3 日或 24 小时通票来付费。使用者的出行次数无限制,三十分钟内的行程不收取额外费用;更长行程将产生超时费。
在此项目中,你将以一名数据分析师的身份执行数据的探索性分析。你将了解数据分析过程的两个重要部分:数据整理和探索性数据分析。但是在你开始查看数据前,先思考几个你需要理解的关于自行车共享数据的问题,例如,如果你在 Bay Area Bike Share 工作,你会想要获得什么类型的信息来做出更明智的业务决策?或者你可以思考你是否会成为自行车共享服务的使用者。哪些因素可能会影响你使用此服务的方式?
**问题 1**:至少写下两个你认为可以用数据来回答的问题。
**答案**:将此文本替换为你的回答!
> **提示**:如果你双击此单元格,你会看到文本发生变化,所有样式均被清除。这将允许你编辑此文本块。此文本块使用 [Markdown](http://daringfireball.net/projects/markdown/syntax) 编写,这是一种使用标题、链接、斜体和许多其他选项为文本添加样式的方式。你将在之后的纳米学位课程中了解关于 Markdown 的更多信息。按 shift + Enter 或 Shift + Return 预览此单元格。
## 使用可视化交流数据发现
作为一名数据分析师,有效交流发现结果的能力是这项工作的重要部分。毕竟,你的分析能力再高也得通过好的交流能力去传达。
在 2014 年,Bay Area Bike Share 举行了一项[开放数据挑战](http://www.bayareabikeshare.com/datachallenge-2014),以鼓励数据分析师基于他们的开放数据集创建可视化。你将在这个项目中创建自己的可视化,但在开始之前,请阅读来自“最佳分析奖项”得主 Tyler Field 的[分析报告(英文)](http://thfield.github.io/babs/index.html)。通读整个报告并回答以下问题:
**问题 2**:在你看来,哪种可视化可提供最有趣的见解?你是否能根据 Tyler 的分析回答你在之前提出的任何问题?能或不能的原因是什么?
**答案**:将此文本替换为你的回答!
## 数据整理
现在是时候由你自己来探索数据了。Bay Area Bike Share 的[开放数据](http://www.bayareabikeshare.com/open-data)页中第 1 年和第 2 年的数据已提供在项目资料中;你无需下载任何其他信息。此数据由三个部分组成:第 1 年上半年(从 `201402` 开始的文件),第 1 年下半年(从 `201408` 开始的文件),以及第 2 年全年(从 `201508` 开始的文件)。每个部分关联三个主要数据文件:行程数据(展示系统中每个行程的信息)(`*_trip_data.csv`),系统中车站的信息(`*_station_data.csv`),及系统中每个城市的每日天气数据(`*_weather_data.csv`)。
在处理大量数据时,最好先从数据样本开始。这样更容易检查我们的数据整理步骤是否有效,因为我们完成代码所需的时间将更少。如果我们对整个过程的进展较为满意,那就可以着手整理整个数据集啦。
因为大量的数据包含在行程信息中,我们的目标应该为取行程数据的子集来进行测试。首先我们仅看看第 1 个月的自行车行程数据,从 2013 年 8 月 29 日到 2013 年 9 月 30 日。下面的代码会取第一年上半年的数据,然后将第一个月的数据值写在输出文件上。此代码利用了数据按日期排序的事实(尽管需要指出的是,前两天是按行程时间而非按年月顺序排序)。
首先,运行下方第一个代码单元格来加载你将在分析中使用的所有数据包和函数。然后,运行第二个代码单元格以读取第一个行程数据文件的子集,然后编写一个新文件,其中仅包含我们初步感兴趣的子集。
> **提示**:你可以像格式化 Markdown 单元格那样点击单元格然后使用键盘快捷键 **Shift + Enter** 或 **Shift + Return**,来运行代码单元格。或者,也可以在选中代码单元格后点击工具栏上的 Play 按钮执行它。单元格运行时,你会在单元格左侧的消息中看到一个星号,即 `In [*]:`。在执行完成时,星号将变为一个数字,例如 `In [1]`。如果有输出,将显示 `Out [1]:`,用适当的数字来匹配“In”的数字。
```
# 导入所有需要的包盒函数
import csv
from datetime import datetime
import numpy as np
import pandas as pd
from babs_datacheck import question_3
from babs_visualizations import usage_stats, usage_plot
from IPython.display import display
%matplotlib inline
# 文档地址
file_in = '201402_trip_data.csv'
file_out = '201309_trip_data.csv'
with open(file_out, 'w') as f_out, open(file_in, 'r') as f_in:
# 设置 CSV 读写对象
in_reader = csv.reader(f_in)
out_writer = csv.writer(f_out)
# 从 in-file 向 out-file 写入行,直到遇到特定日期
while True:
datarow = next(in_reader)
# 行程开始日期在第三列,为 m/d/yyyy HH:MM 格式
if datarow[2][:9] == '10/1/2013':
break
out_writer.writerow(datarow)
```
### 精简行程数据
第一步是观察数据集的结构,看看我们是否需要执行任何数据整理。下面的单元格会读取你在之前单元格中创建的抽样数据文件,然后打印出表中的前几行。
```
sample_data = pd.read_csv('201309_trip_data.csv')
display(sample_data.head())
```
在这个探索环节,我们将精简出影响出行次数的行程数据中的因素。首先将注意力放在几个选定列:行程持续时间、开始时间、起始车站、终止车站及订购类型。开始时间将分为年、月和小时部分。我们将添加一列作为星期几,并将起始车站和终止车站转变为起始和终止城市。
现在我们来解决整理过程的最后部分。运行下面的代码单元格,看看车站信息的结构,然后观察代码将如何创建车站城市映射。注意车站映射设立为一个函数 `create_station_mapping()`。因为可随时间推移可添加更多车站或进行删除,在我们准备好开始探索时,此函数将允许我们在数据的所有三个部分结合车站信息。
```
# 显示车站数据文档的前几行数据。
station_info = pd.read_csv('201402_station_data.csv')
display(station_info.head())
# 这个函数会稍后被另一个函数调用,以创建映射。
def create_station_mapping(station_data):
"""
Create a mapping from station IDs to cities, returning the
result as a dictionary.
"""
station_map = {}
for data_file in station_data:
with open(data_file, 'r') as f_in:
# 设置 csv 读取对象 - 注意,我们使用的是 DictReader,他会将
# 文档第一行作为表头,即每一行的字典键值
weather_reader = csv.DictReader(f_in)
for row in weather_reader:
station_map[row['station_id']] = row['landmark']
return station_map
```
现在你可以使用映射到来精简行程数据到上述选定列。这将在下面的 `summarise_data()` 函数中执行。作为此函数的部分,将使用 `datetime` 模块从原始数据文件解析作为 `datetim` 对象 (`strptime`) 的时间戳字符串,该字符串可随后输出为不同的字符串格式 (`strftime`)。解析的对象也有很多属性和方法来快速获取
要完成 `summarise_data()` 函数,你将需要先完成两个任务。首先,你需要执行一个运算将行程持续时间的单位从秒转化为分钟。(一分钟为 60 秒)。第二,你需要为年、月、小时和星期几创建列。你可参阅 [datetime 模块中的 datetime 对象文档](https://docs.python.org/2/library/datetime.html#datetime-objects)。**请找到合适的属性和方法来完成下面的代码**。
```
def summarise_data(trip_in, station_data, trip_out):
"""
This function takes trip and station information and outputs a new
data file with a condensed summary of major trip information. The
trip_in and station_data arguments will be lists of data files for
the trip and station information, respectively, while trip_out
specifies the location to which the summarized data will be written.
"""
# generate dictionary of station - city mapping
station_map = create_station_mapping(station_data)
with open(trip_out, 'w') as f_out:
# set up csv writer object
out_colnames = ['duration', 'start_date', 'start_year',
'start_month', 'start_hour', 'weekday',
'start_city', 'end_city', 'subscription_type']
trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)
trip_writer.writeheader()
for data_file in trip_in:
with open(data_file, 'r') as f_in:
# set up csv reader object
trip_reader = csv.DictReader(f_in)
# collect data from and process each row
for row in trip_reader:
new_point = {}
# convert duration units from seconds to minutes
### Question 3a: Add a mathematical operation below ###
### to convert durations from seconds to minutes. ###
new_point['duration'] = float(row['Duration']) ________
# reformat datestrings into multiple columns
### Question 3b: Fill in the blanks below to generate ###
### the expected time values. ###
trip_date = datetime.strptime(row['Start Date'], '%m/%d/%Y %H:%M')
new_point['start_date'] = trip_date.strftime('%Y-%m-%d')
new_point['start_year'] = trip_date.________
new_point['start_month'] = trip_date.________
new_point['start_hour'] = trip_date.________
new_point['weekday'] = trip_date.________
# remap start and end terminal with start and end city
new_point['start_city'] = station_map[row['Start Terminal']]
new_point['end_city'] = station_map[row['End Terminal']]
# two different column names for subscribers depending on file
if 'Subscription Type' in row:
new_point['subscription_type'] = row['Subscription Type']
else:
new_point['subscription_type'] = row['Subscriber Type']
# write the processed information to the output file.
trip_writer.writerow(new_point)
```
**问题 3**:运行下面的代码块以调用你在上文单元格中完成的 `summarise_data()` 函数。它会提取 `trip_in` 和 `station_data` 变量中所列文件包含的数据,然后在 `trip_out` 变量中指定的位置编写新的文件。如果你正确执行了数据整理,下面的代码块会打印出 `dataframe` 的前几行,并显示一条消息确认数据点计数是正确的。
```
# Process the data by running the function we wrote above.
station_data = ['201402_station_data.csv']
trip_in = ['201309_trip_data.csv']
trip_out = '201309_trip_summary.csv'
summarise_data(trip_in, station_data, trip_out)
# Load in the data file and print out the first few rows
sample_data = pd.read_csv(trip_out)
display(sample_data.head())
# Verify the dataframe by counting data points matching each of the time features.
question_3(sample_data)
```
> **提示**:如果你保存了 jupyter Notebook,运行数据块的输出也将被保存。但是,你的工作空间的状态会在每次开启新会话时重置。请确保你从之前的会话中运行了所有必要的代码块,以在继续上次中断的工作前重建变量和函数。
## 探索性数据分析
现在你已在一个文件中保存了一些数据,那么我们来看看数据的某些初步趋势。`babs_visualizations.py` 脚本中已编写了一些代码,用来帮助你汇总和可视化数据;它们已导出为函数 `usage_stats()` 和 `usage_plot()`。在此部分,我们将了解这些函数的一些用途,你将在项目的最后部分自行使用这些函数。首先,运行以下单元格来加载数据,然后使用 `usage_stats()` 函数查看该服务运营的第一个月的总行程数,以及关于行程持续时间的一些统计数据。
```
trip_data = pd.read_csv('201309_trip_summary.csv')
usage_stats(trip_data)
```
你会看到第一个月共有超过 27,000 次行程,且平均行程持续时间大于行程持续时间中值(即 50% 的行程短于它,而 50% 的行程长于它的点)。事实上,平均值大于 75% 的最短持续时间。这个现象非常有意思,我们稍后再看。
首先我们来看看这些行程如何按订购类型区分。要对数据进行直观的了解,一个简单的方式是将它绘制成图。为此我们将使用 `usage_plot()` 函数。这个函数的第二个参数允许我们算出选定变量的行程的总数,在一个图中显示信息。下面的表达式将展示共有多少客户和订购者行程。现在就来试试吧!
```
usage_plot(trip_data, 'subscription_type')
```
看起来在第一个月,订购者的行程比客户的行程多大约 50%。现在我们来尝试一个不同的变量。来看看行程的持续时间状况如何?
```
usage_plot(trip_data, 'duration')
```
看起来挺奇怪的,不是吗?看看 x 轴的持续时间值。大多数骑行时间都是 30 分钟或更少,因为单个行程的额外时间要收取超时费。第一个柱子跨度显示的持续时间达到了约 1000 分钟,或超过 16 个小时。根据我们从 `usage_stats()` 获得的统计数据,某些行程的持续时间非常长,导致平均值远远高于中值:这个图的效果非常夸张,对我们用处不大。
在探索数据时,你经常需要使用可视化函数参数来使数据更易于理解。这里就要用到 `usage_plot()` 函数的第三个参数。可为数据点设置过滤器,作为一系列条件。首先我们限制为不足 60 分钟的行程。
```
usage_plot(trip_data, 'duration', ['duration < 60'])
```
这样看起来就好多啦!你可以看到大多数行程实际上持续时间都不足 30 分钟,但你还可以通过其他方法来使展示效果更好。因为最短持续时间非 0,左侧的柱子稍高于 0。我们想要找到 30 分钟的明确边界,这样如果一些柱子尺寸和边界对应某些分钟点时,图上就看起来清晰多了。好消息是你可以使用可选的“boundary”和“bin_width”参数调整图。通过将“boundary”设置为 0,其中一个柱边界(这里为最左侧的柱子)将从 0 开始,而不是最短行程持续时间。以及通过将“bin_width”设为 5,每个柱子将以 5 分钟时间间隔总计时间点。
```
usage_plot(trip_data, 'duration', ['duration < 60'], boundary = 0, bin_width = 5)
```
**问题 4**:哪个 5 分钟行程持续时间显示了最多的出行次数?这个范围内大约有多少次出行?
**答案**:将此文本替换为你的回答!
像这样的视觉调整虽然较小,但是却对你理解数据和向他人传达你的发现大有帮助。
## 自己执行分析
现在你已使用数据集的小样本完成了一些探索,是时候更进一步,将所有数据整理到一个文件中并看看你能发现什么趋势。下面的代码将使用与之前一样的 `summarise_data()` 函数来处理数据。在运行下面的单元格后,你便将所有的数据处理到了一个数据文件中。注意该函数在运行时不会显示任何输出,而且要花费较长的时间才能完成,因为你现在使用的数据比之前的样本数据多。
```
station_data = ['201402_station_data.csv',
'201408_station_data.csv',
'201508_station_data.csv' ]
trip_in = ['201402_trip_data.csv',
'201408_trip_data.csv',
'201508_trip_data.csv' ]
trip_out = 'babs_y1_y2_summary.csv'
# This function will take in the station data and trip data and
# write out a new data file to the name listed above in trip_out.
summarise_data(trip_in, station_data, trip_out)
```
由于 `summarise_data()` 函数已创建了一个独立文件,因此无需再次运行上面的单元格,即使你关掉 notebook 并开启一个新会话。你可以直接在数据集中加载,然后从那里进行探索。
```
trip_data = pd.read_csv('babs_y1_y2_summary.csv')
display(trip_data.head())
```
#### 现在轮到你自己使用 `usage_stats()` 和 `usage_plot()` 探索新数据集,并报告你的发现了!下面是如何使用 `usage_plot()` 函数的一些提示:
- 第一个参数(必须):加载的 dataframe,将从这里分析数据。
- 第二个参数(必须):区分出行次数的变量。
- 第三个参数(可选):数据过滤器,限制将计数的数据点。过滤器应作为一系列条件提供,每个元素应该为采用以下格式的一个字符串:`'<field> <op> <value>'`,使用以下任意一个运算符:>、<、>=、<=、==、!=。数据点必须满足所有条件才能计算在内或可视化。例如,`["duration < 15", "start_city == 'San Francisco'"]` 仅保留起始点为旧金山,且持续时间不足 15 分钟的行程。
如果数据在数值变量上进行拆分(从而创建一个直方图),可使用关键字设置一些附加参数。
- "n_bins" 指定成果图中柱子的数量(默认为 10 条)。
- "bin_width" 指定每个柱子的宽(默认为用数据范围除以柱子的数量)。"n_bins" 和 "bin_width" 不可同时使用。
- "boundary" 指定一个柱边界的位置;另一个柱边界将放在那个值的附近(这可能导致绘制多余的柱子)。此参数可以与 "n_bins" 和 "bin_width" 参数一起使用。
你也可以对 `usage_stats()` 函数添加一些自定义。该函数的第二个参数可用于设置过滤器条件,如同用 `usage_plot()` 设置一样。
```
usage_stats(trip_data)
usage_plot(trip_data)
```
使用上面的函数探索一些不同的变量,并记录你发现的一些趋势。如果你想用其他方式或多个方式探索数据集,可自行创建更多的单元格。
> **提示**: 要向 notebook 添加更多单元格,你可以使用上面的菜单栏中的“在上方插入单元格”和“在下方插入单元格”选项。工具栏中也有添加新单元格的图标,以及用于在文档中上下移动单元格的附加图标。默认情况下,新单元格为代码式;你也可以从单元格菜单或工具栏中的下拉菜单中指定单元格类型(代码式或 Markdown)。
完成探索后,将你认为最有趣的两个可视化复制到下方的单元格中,然后用几句话回答以下问题,说明你的发现及你选择这些数字的原因。确保调整柱子的数量或限制,使它们有效传达数据发现。可自行用从 `usage_stats()` 中生成的任何额外数字进行补充,或放置多个可视化来支持你的观察。
```
# Final Plot 1
usage_plot(trip_data)
```
**问题 5a**:上述可视化有何有趣之处?你为什么选择它
**答案**:将此文本替换为你的回答
```
# Final Plot 2
usage_plot(trip_data)
```
**问题 5b**:上述可视化有何有趣之处?你为什么选择它
**答案**:将此文本替换为你的回答
## 结论
恭喜你完成了此项目!这只是数据分析过程的一个样本:从生成问题、整理数据到探索数据。通常,在数据分析过程的这个点,你可能想要通过执行统计检验或将数据拟合到一个模型进行预测,来对我们的数据得出结论。还有很多可以对数据执行的潜在分析,但仅靠这里所给的有限代码无法完成。除了结果轴上的出行次数,你还可以观察具有影响的其他特征,如行程持续时间。我们还未研究过天气数据与自行车使用之间的联系。
**问题 6**:思考你可以应用数据科学技术的一个话题或兴趣领域。你希望从所选主题中学到什么?
**答案**: 将此文本替换为你的回答!
> **提示**:若想与他人分享我们的分析结果,除了向他们提供 jupyter Notebook (.ipynb) 文件的副本外,我们还可以将 Notebook 输出导出为一种甚至那些未安装 Python 的人都能打开的形式。从左上方的“文件”菜单,前往“下载为”子菜单。然后你可以选择一个可以更普遍查看的格式,例如 HTML (.html) 或 PDF (.pdf)。你可能需要额外软件包或软件来执行这些导出。
| github_jupyter |
In this post we study the Regularized Bayesian Regression model to explore and compare the weight and function space and views of Gaussian Process Regression as described in the book [Gaussian Processes for Machine Learning, Ch 2](http://www.gaussianprocess.org/gpml/chapters/RW2.pdf). We follow this reference very closely (and encourage to read it!). Our main objective is to illustrate the concepts and results through a concrete example. We use [PyMC3](https://docs.pymc.io) to run bayesian sampling.
**References:**
- [Gaussian Processes for Machine Learning](http://www.gaussianprocess.org/gpml/chapters/RW2.pdf), Carl Edward Rasmussen and Christopher K. I. Williams, MIT Press, 2006.
- See [this](https://juanitorduz.github.io/intro_pymc3/) post for an introduction to bayesian methods and PyMC3.
- [Documentation](https://docs.pymc.io/notebooks/GLM-linear.html) of linear regression in PyMC3.
- [Documentation](https://docs.pymc.io/api/distributions/multivariate.html#pymc3.distributions.multivariate.MvNormal) for the multivariate normal distribution in PyMC3.
- [Here](https://stackoverflow.com/questions/52509602/cant-compile-c-program-on-a-mac-after-upgrade-to-mojave) is an Stackoverflow post which can help Mac OS users which might have problems with Theano after upgrading to Mojave.
Let us consider the model:
$$
f(x) = x^T b \quad \text{and} \quad y = f(x) + \varepsilon, \quad \text{with} \quad \varepsilon \sim N(0, \sigma_n^2)
$$
where $x \in \mathbb{R}^d$ is a vector of data and $b \in \mathbb{R}^d$ is the vector of weights (parameters). We assume a bias weight (i.e. intercept) is included in $b$.
## Prepare Notebook
```
import numpy as np
import pymc3 as pm
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
%matplotlib inline
```
## Generate Sample Data
Let us begin by generating sample data.
```
# Define dimension.
d = 2
# Number of samples.
n = 100
# Independent variable.
x = np.linspace(start=0, stop=1, num=n).reshape([1, n])
# Design matrix. We add a column of ones to account for the bias term.
X = np.append(np.ones(n).reshape([1, n]), x, axis=0)
```
Now we generate the response variable.
```
# True parameters.
b = np.zeros(d)
## Intercept.
b[0] = 1
## Slope.
b[1] = 3
b = b.reshape(d, 1)
# Error standard deviation.
sigma_n = 0.5
# Errors.
epsilon = np.random.normal(loc=0, scale=sigma_n, size=n).reshape([n, 1])
f = np.dot(X.T, b)
# Observed target variable.
y = f + epsilon
```
We visualize the data set.
```
plt.rcParams['figure.figsize'] = (10,7)
fig, ax = plt.subplots()
sns.distplot(epsilon, ax=ax)
ax.set_title('Error Distribution');
fig, ax = plt.subplots()
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
ax.set(title='Raw Data')
ax.legend(loc='lower right');
```
## Likelihood
A straightforward calculation shows that the likelihood function is given by
$$
p(y|X, b) =
\prod_{i=1}^{n} p(y_i|x_i, b)
=
\frac{1}{(2\pi \sigma_n^2)^{n/2}} \exp\left(-\frac{1}{2\sigma_n^2}||y - X^T b||^2\right) =
N(X^T b, \sigma_n^2 I)
$$
where $X\in M_{d\times n}(\mathbb{R})$ is the design matrix which has the observations as rows.
## Prior Distribution
We set a multivariate normal distribution with mean zero for the prior of the vector of weights \\(b \sim N(0, \Sigma_p)\\). Here \\(\Sigma_p \in M_{d}(\mathbb{R})\\) denotes the covariance matrix.
```
# Mean vector.
mu_0 = np.zeros(d)
# Covariance matrix.
# Add small perturbation for numerical stability.
sigma_p = np.array([[2, 1], [1, 2]]) + 1e-12*np.eye(d)
sigma_p
```
Let us sample from the prior distribution to see the level curves (see [this](https://juanitorduz.github.io/multivariate_normal/) post).
```
# Set number of samples.
m = 10000
# Generate samples.
z = np.random.multivariate_normal(mean=mu_0, cov=sigma_p, size=m)
z = z.T
# PLot.
sns.jointplot(x=z[0], y=z[1], kind="kde", space=0, color='purple');
```
Note that the ellipse-like level curves are rotated (with respect the natural axis) due the fact that $\Sigma_p$ is not diagonal.
Let us begin by sampling lines from the prior distribution.
```
# Number of samples to select.
m = 200
# Sample from prior distribution of the weight vector.
z_prior = np.random.multivariate_normal(mean=mu_0, cov=sigma_p, size=m)
# Compute prior lines.
lines_prior = np.dot(z_prior, X)
```
We visualize the sample lines.
```
fig, ax = plt.subplots()
# Loop over the line samples from the prior distribution.
for i in range(0, m):
sns.lineplot(
x=x.T.flatten(),
y=lines_prior[i],
color='purple',
alpha=0.2
)
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
ax.set(title='Curves drawn from the prior distribution of the weight vector')
ax.legend(loc='lower right');
```
## Posterior Distribution
Now we want to use the data to find the posterior distribution of the vector of weights. Recall that the posterior is obtained (from Bayes rule) by computing
$$
\text{posterior} =
\frac{\text{likelihood × prior}}{\text{marginal likelihood}}
$$
Concretely,
$$
p(b|y, X) =
\frac{p(y|X, b)p(b)}{p(y|X)}
$$
The marginal likelihood $p(y|X)$, which is independent of $b$, is calculated as
$$
p(y|X) = \int p(y|X, b)p(b) db
$$
### MCMC Sampling with PyMC3
Recall that we do not need to compute \\(p(y|X)\\) directly since we can sample from the posterior distribution using [MCMC](https://en.wikipedia.org/wiki/Markov_chain_Monte_Carlo) sampling. Again, see [this](https://juanitorduz.github.io/intro_pymc3/) post for more details.
```
import theano.tensor as tt
model = pm.Model()
with model:
# Define prior.
beta = pm.MvNormal('beta', mu=mu_0, cov=sigma_p, shape=d)
# Define likelihood.
likelihood = pm.Normal('y', mu=tt.dot(X.T, beta), sd=sigma_n, observed=y.squeeze())
# Consider 6000 draws and 3 chains.
trace = pm.sample(draws=7500, cores=3)
```
Let us visualize the posterior distributions.
```
pm.traceplot(trace, figsize=(12, 5));
pm.plot_posterior(trace, figsize=(12, 5));
pm.summary(trace)
```
Let us see the join posterior distribution.
```
sns.jointplot(x=trace['beta'].T[0],y=trace['beta'].T[1], kind='kde', color='green', space=0);
```
Now let us sample from the posterior distribution.
```
# Get a sub-sample of indices of length m.
sample_posterior_indices = np.random.choice(trace["beta"].shape[0], m, replace=False)
# Select samples from the trace of the posterior.
z_posterior = trace["beta"][sample_posterior_indices, ]
# Compute posterior lines.
lines_posterior = np.dot(z_posterior, X)
```
Similarly, let us plot the posterior samples.
```
fig, ax = plt.subplots()
# Loop over the line samples from the posterior distribution.
for i in range(0, m):
sns.lineplot(
x=x.T.flatten(),
y=lines_posterior[i],
color='green',
alpha=0.2
)
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
ax.set(title='Curves drawn from the posterior distribution of the weight vector')
ax.legend(loc='lower right');
```
We see how the data makes the posterior distribution much more localized around the mean.
## Predictions
Next, we use the posterior distribution of the weights vector to generate predictions.
### Parameter Mean
Let us begin by using the mean of the posterior distribution of each parameter to find the linear fit.
```
# Compute mean of the posterior distribution.
beta_hat = np.apply_over_axes(func=np.mean, a=trace['beta'], axes=0).reshape(d,1)
# Compute linear fit.
y_hat = np.dot(X.T, beta_hat)
```
Let us plot the result.
```
fig, ax = plt.subplots()
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
# Plot line corresponding to the posterior mean of the weight vector.
sns.lineplot(x=x.T.flatten(), y=y_hat.flatten(), color='green', label='posterior mean')
ax.set(title='Linear fit for parameter posterior mean')
ax.legend(loc='lower right');
```
### Credible Interval
Next, let us compute the credible interval for the fit.
```
# We sample from the posterior.
y_hat_samples = np.dot(X.T, trace['beta'].T)
# Compute the standard deviation.
y_hat_sd = np.apply_over_axes(func=np.std, a=y_hat_samples, axes=1).squeeze()
```
Let us plot the credible interval corresponding to a corridor corresponding to two standard deviations.
```
fig, ax = plt.subplots()
# Plot credible interval.
plt.fill_between(
x=x.T.reshape(n,),
y1=(y_hat.reshape(n,) - 2*y_hat_sd),
y2=(y_hat.reshape(n,) + 2*y_hat_sd),
color = 'green',
alpha = 0.3,
label='credible interval: mean $\pm 2 \sigma$'
)
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
# Plot line corresponding to the posterior mean of the weight vector.
sns.lineplot(x=x.T.flatten(), y=y_hat.flatten(), color='green', label='posterior mean')
ax.set(title='Linear fit for parameter posterior mean')
ax.legend(loc='lower right');
```
### Test Set
Now, we write a function to generate predictions for a new data point.
```
def generate_prediction(x_star, trace):
"""
Generate prediction for a new value given the
posterior distribution()
"""
# Compute prediction distribution.
prob = np.dot(x_star.T, trace['beta'].T)
# Sample from it.
y_hat = np.random.choice(a=prob.squeeze())
return y_hat
```
Let us generate a prediction for the value $z_* = 0.85$
```
z_star = np.array([[1], [0.85]])
y_hat_star = generate_prediction(z_star, trace)
y_hat_star
fig, ax = plt.subplots()
# Plot credible interval.
plt.fill_between(
x=x.T.reshape(n,),
y1=(y_hat.reshape(n,) - 2*y_hat_sd),
y2=(y_hat.reshape(n,) + 2*y_hat_sd),
color = 'green',
alpha = 0.3,
label='credible interval: mean $\pm 2 \sigma$'
)
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
# Plot line corresponding to the posterior mean of the weight vector.
sns.lineplot(x=x.T.flatten(), y=y_hat.flatten(), color='green', label='posterior mean')
# Point prediction
sns.scatterplot(x=z_star[1], y=y_hat_star, color='black', label='point prediction $z_*$')
ax.set(title='Linear fit for parameter posterior mean & point prediction')
ax.legend(loc='lower right');
```
## Analytical Solution
For this concrete example, we can find the analytical solution of the posterior distribution. Recall that this one is proportional to the product:
$$
p(b|y, X) \propto
\exp\left(
-\frac{1}{2\sigma_n^2}||y - X^T b||^2
\right)
\exp\left(
-\frac{1}{2} b^T \Sigma_p b
\right)
$$
The main idea to find the functional form of the posterior distribution is to "complete the square" of the exponents in the right hand side of the equation above. Specifically, let us define:
$$
A:= \sigma_n^{-2}XX^T + \Sigma_p^{-1} \in M_{d}(\mathbb{R})
\quad
\text{and}
\quad
\bar{b}:= \sigma_n^{-2}A^{-1}Xy \in \mathbb{R}^d
$$
Then,
$$
\sigma_n^{-2} ||y - X^T b||^2 + b^T \Sigma_p b
=
b^T A b - \sigma_n^{-2}(b^T Xy + (b^T Xy)^T) + \sigma_n^{-2}y^Ty.
$$
The last term does not depend on $b$ so we can ignore it for the calculation. Observe that $\sigma_n^{-2} b^T Xy = \sigma_n^{-2} b^TAA^{-1}Xy = b^TA\bar{b}$, hence
$$
b^T A b - \sigma_n^{-2}(b^T Xy + (b^T Xy)^T) =
b^T A b - b^TA\bar{b} - \bar{b}^TAb =
b^T A b - b^TA\bar{b} - \bar{b}^TAb =
(b - \bar{b})^TA(b - \bar{b}) - \bar{b}^TA\bar{b}
$$
Again, the therm $\bar{b}^TA\bar{b}$ does not depend on $b$, so it is not relevant for the computation. We *then recognize the form of the posterior distribution as gaussian with mean $\bar{b}$ and covariance matrix $A^{-1}$.*
$$
p(b|y, X) \sim N
\left(
\bar{b},
A^{-1}
\right)
$$
Let us compute the analytic solution for this example:
```
# Compute A.
A = (sigma_n)**(-2)*np.dot(X, X.T) + np.linalg.inv(sigma_p)
# Compute its inverse.
A_inv = np.linalg.inv(A)
# Compute b_bar.
b_bar = (sigma_n)**(-2)*np.dot(A_inv, np.dot(X, y))
b_bar
```
Note that these values coincide with the values above obtained from the MCMC sampling. Let us sample from the analytical solution of the posterior distribution.
```
# Set number of samples.
m = 10000
# Sample from the posterior distribution.
z = np.random.multivariate_normal(mean=b_bar.squeeze(), cov=A_inv, size=m)
z = z.T
sns.jointplot(x=z[0], y=z[1], kind='kde', color='green', space=0);
```
These level curves coincide with the ones obtained above.
A straightforward computation shows that the predictive posterior distribution is given by:
$$
p(y_*|z_*, X, y) = N\left(\frac{1}{\sigma_n^2}z_*^T A^{-1}Xy, z_*^TA^{-1}z_*\right)
$$
## Regularized Bayesian Linear Regression as a Gaussian Process
A **gaussian process** is a collection of random variables, any finite number of which have a joint gaussian distribution (See [Gaussian Processes for Machine Learning, Ch2 - Section 2.2](http://www.gaussianprocess.org/gpml/chapters/RW2.pdf)).
A Gaussian process $f(x)$ is completely specified by its mean function $m(x)$ and covariance function $k(x, x')$. Here $x \in \mathcal{X}$ denotes a point on the index set $\mathcal{X}$. These functions are defined by
$$
m(x) = E[f(x)]
\quad
\text{and}
\quad
k(x, x') = E[(f(x) - m(x))(f(x') - m(x'))]
$$
**Claim:** The map \\(f(x) = x^T b \\) defines a Gaussian process.
*Proof:*
1. Let $x_1, \cdots, x_N \in \mathcal{X}=\mathbb{R}^d$. As $b$ has a multivariate normal distribution, then every linear combination of its components is normally distributed (see [here](https://juanitorduz.github.io/multivariate_normal/)). In particular, for any $a_1, \cdots, a_N \in \mathbb{R}$, we see that
$$
\sum_{i=1}^N a_i f(x_i)
=
\sum_{i=1}^N a_i x_i^Tb
=
\left(\sum_{i=1}^N a_i x_i\right)^Tb
$$
is a linear combination of the components of \\(b\\), thus is normally distributed. This shows that \\(f(x)\\) is a gaussian process.
Let us now compute the mean and covariance functions:
2. $m (x) = E[f(x)] = x^T E[b] = 0$.
3. $k(x, x') = E[f(x)f(x')] = E[x^T b (x')^T b] = E[x^T b b^T x'] = x^T E[bb^T]x' = x^T \Sigma_px'$.
Note that the posterior predictive distribution can be written in therms of this kernel function (more generally, even for non-linear regressions, this statement remains valid in therm of the so called "kernel trick").
## Function-Space View
We can understand a gaussian process, and in particular a regularized bayesian regression, as an inference directly in function space (see details in [this](https://juanitorduz.github.io/gaussian_process_reg/) post). The main difference in this change of perspective is that we do not sample from the parameters join distribution but rather on the space of functions themselves (to be precise, this function space is infinitely dimensional, so the function is characterized by evaluating it on a finite sample of points which we call the "test" set).
```
# Number of test points
n_star = 80
# Create test set.
x_star = np.linspace(start=0, stop=1, num=n_star).reshape([1, n_star])
# Add columns of ones.
X_star = np.append(np.ones(n_star).reshape([1, n_star]), x_star, axis=0)
```
From the calculation above, let us define the kernel function:
```
def kernel(x_1, x_2, k):
"""Kernel function."""
z = np.dot(x_1.T, k)
return np.dot(z, x_2)
```
Let us compute the kernel image between $X$ and $X_*$.
```
K = kernel(X, X, sigma_p)
K_star = kernel(X_star, X, sigma_p)
K_star_star = kernel(X_star, X_star, sigma_p)
```
In the function view of regularized linear regression, the kernel on the test set defines the prior distribution on the function space.
```
cov_prior = K_star_star
```
Let us sample from the prior distribution:
```
# Number of samples to select.
m = 200
fig, ax = plt.subplots()
for i in range(0, m):
points_sample = np.random.multivariate_normal(
mean=np.zeros(n_star),
cov=cov_prior
)
sns.lineplot(
x=x_star.flatten(),
y=points_sample,
color='purple',
alpha=0.2,
ax=ax
)
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
ax.set(title='Curves drawn from the GP prior distribution')
ax.legend(loc='lower right');
```
As pointed out above, we can express the posterior predictive distribution iin terms of the kernel function (refer to [Gaussian Processes for Machine Learning, Ch 2.2](http://www.gaussianprocess.org/gpml/chapters/RW2.pdf)):
```
mean_posterior = np.dot(np.dot(K_star, np.linalg.inv(K + sigma_n**2*np.eye(n))), y)
cov_posterior = K_star_star - np.dot(np.dot(K_star, np.linalg.inv(K + sigma_n**2*np.eye(n))), K_star.T)
```
Let us sample from the posterior distribution:
```
# Number of samples to select.
m = 200
fig, ax = plt.subplots()
for i in range(0, m):
points_sample = np.random.multivariate_normal(
mean=mean_posterior.flatten(),
cov=cov_posterior
)
sns.lineplot(
x=x_star.flatten(),
y=points_sample,
color='green',
alpha=0.2,
ax=ax
)
# Plot "true" linear fit.
sns.lineplot(x=x.T.flatten(), y=f.flatten(), color='red', label='true')
# Plot raw data.
sns.scatterplot(x=x.T.flatten(), y=y.flatten())
ax.set(title='Curves drawn from the GP posterior distribution')
ax.legend(loc='lower right');
```
This analysis should give a better intuition on the definition of gaussian process, which at first glance might appear somehow mysterious. I will continue exploring this topic in future posts.
| github_jupyter |
# Germany: LK Rendsburg-Eckernförde (Schleswig-Holstein)
* Homepage of project: https://oscovida.github.io
* Plots are explained at http://oscovida.github.io/plots.html
* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Schleswig-Holstein-LK-Rendsburg-Eckernförde.ipynb)
```
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="LK Rendsburg-Eckernförde", weeks=5);
overview(country="Germany", subregion="LK Rendsburg-Eckernförde");
compare_plot(country="Germany", subregion="LK Rendsburg-Eckernförde", dates="2020-03-15:");
# load the data
cases, deaths = germany_get_region(landkreis="LK Rendsburg-Eckernförde")
# get population of the region for future normalisation:
inhabitants = population(country="Germany", subregion="LK Rendsburg-Eckernförde")
print(f'Population of country="Germany", subregion="LK Rendsburg-Eckernförde": {inhabitants} people')
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 1000 rows
pd.set_option("max_rows", 1000)
# display the table
table
```
# Explore the data in your web browser
- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Schleswig-Holstein-LK-Rendsburg-Eckernförde.ipynb)
- and wait (~1 to 2 minutes)
- Then press SHIFT+RETURN to advance code cell to code cell
- See http://jupyter.org for more details on how to use Jupyter Notebook
# Acknowledgements:
- Johns Hopkins University provides data for countries
- Robert Koch Institute provides data for within Germany
- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)
- Open source and scientific computing community for the data tools
- Github for hosting repository and html files
- Project Jupyter for the Notebook and binder service
- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))
--------------------
```
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
```
| github_jupyter |
```
import gensim
import pandas as pd
import numpy as np
import csv
import re
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
import operator
import gensim.models
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;%]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
REPLACE_RE = re.compile('[/(){}\[\]\|@.:,?'';%]')
STOPWORDS = set(stopwords.words('english'))
stop_words = set(stopwords.words('english'))
stop_words.add('unk')
def remove_stopwords(sentence):
#sentence =sentence.lower()
sentence=sentence.split()
newsentence=""
j=0
for w in sentence:
if w not in stop_words:
if j>0:
newsentence+=" "+w
else:
newsentence+=""+w
j=j+1
if j>500:
return newsentence
return newsentence
def text_prepare(text):
"""
text: a string
return: modified initial string
"""
#print("initial text= "+text)
#text = text.lower()
text = re.sub(REPLACE_RE," ",text,)
text= re.sub(REPLACE_BY_SPACE_RE," ", text)
#print("repace by space= " +text)
# print("bad symbols= "+text)
text = remove_stopwords(text)
#text=text[0:200]
# print("remove stopwords "+text)
text=text.split(' ')
return text
#this is the csv file containing all of your financial articles, one article per row typically
#https://www.dropbox.com/s/vwcklshfuhvsdz8/articles_final.zip?dl=0
yourtextfile='/media/user/Data/wellai/models/articles_final.csv'
from csv import reader
from gensim.test.utils import datapath
from gensim import utils
class MyCorpus(object):
"""An interator that yields sentences (lists of str)."""
def __iter__(self):
with open(yourtextfile, 'r') as read_obj:
# pass the file object to reader() to get the reader object
csv_reader = reader(read_obj)
# Iterate over each row in the csv using reader object
for row in csv_reader:
row="".join(row)
yield(text_prepare(row))
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = MyCorpus()
model = gensim.models.Word2Vec(sentences=sentences,size=200,workers=24,window=15,min_count=10,iter=5)
model.save("/media/user/Data/wellai/models/word2vec_financial_new2.model")
```
| github_jupyter |
author: Diogo Silva
SKL = SciKit-Learn
```
%pylab inline
import seaborn as sns
home = %env HOME
cd $home/QCThesis/
from sklearn.cluster import KMeans as KMeans_skl
import MyML.cluster.eac as eac
reload(eac)
import MyML.cluster.K_Means3 as K_Means3
reload(K_Means3)
import MyML.metrics.accuracy as determine_ci
reload(determine_ci)
```
# Helper functions
```
def stat_my_kmeans(data,nclusters,gtruth,rounds=20):
nsamples=data.shape[0]
all_acc = list()
for r in xrange(rounds):
iters="converge"
kmeans_mode="numpy"
grouper = K_Means3.K_Means(n_clusters=nclusters, mode=kmeans_mode, cuda_mem='manual',tol=1e-4,max_iters=iters)
grouper._centroid_mode = "iter"
grouper.fit(data)
myAcc = determine_ci.HungarianIndex(nsamples=nsamples)
myAcc.score(gtruth,grouper.labels_,format='array')
all_acc.append(myAcc.accuracy)
return np.mean(all_acc),np.var(all_acc),np.max(all_acc),np.min(all_acc)
def stat_skl_kmeans(data,nclusters,gtruth,rounds=20,init='random'):
nsamples=data.shape[0]
all_acc = list()
for r in xrange(rounds):
iters="converge"
kmeans_mode="numpy"
gSKL = KMeans_skl(n_clusters=nclusters,n_init=1,init=init)
gSKL.fit(data)
myAcc = determine_ci.HungarianIndex(nsamples=nsamples)
myAcc.score(gtruth,grouper.labels_,format='array')
all_acc.append(myAcc.accuracy)
return np.mean(all_acc),np.var(all_acc),np.max(all_acc),np.min(all_acc)
print "b MyML/cluster/K_Means3.py:"
def k_analysis(partition_files,ground_truth,nprots,iters="converge",rounds=20,files=True):
nsamples=data.shape[0]
all_acc = list()
for r in xrange(rounds):
prot_mode="random"
estimator=eac.EAC(nsamples)
estimator.fit(partition_files,files=files,assoc_mode='prot', prot_mode=prot_mode, nprot=nprots,build_only=True)
kmeans_mode = "numpy"
nclusters = np.unique(ground_truth).shape[0]
grouper = K_Means3.K_Means(n_clusters=nclusters,mode=kmeans_mode, cuda_mem='manual',tol=1e-4,max_iters=iters)
grouper._centroid_mode = "iter"
grouper.fit(estimator._coassoc)
myAcc = determine_ci.HungarianIndex(nsamples=nsamples)
myAcc.score(ground_truth,grouper.labels_,format='array')
all_acc.append(myAcc.accuracy)
return np.mean(all_acc),np.var(all_acc),np.max(all_acc),np.min(all_acc)
def k_skl_analysis(partition_files,ground_truth,nprots,rounds=20,files=True):
nsamples=data.shape[0]
all_acc = list()
for r in xrange(rounds):
prot_mode="random"
estimator=eac.EAC(nsamples)
estimator.fit(partition_files,files=files,assoc_mode='prot', prot_mode=prot_mode, nprot=nprots,build_only=True)
kmeans_mode = "numpy"
nclusters = np.unique(ground_truth).shape[0]
grouper = KMeans_skl(n_clusters=nclusters,n_init=1,init="random")
grouper.fit(estimator._coassoc)
myAcc = determine_ci.HungarianIndex(nsamples=nsamples)
myAcc.score(ground_truth,grouper.labels_,format='array')
all_acc.append(myAcc.accuracy)
return np.mean(all_acc),np.var(all_acc),np.max(all_acc),np.min(all_acc)
```
# Generate data
```
center1=(0,0)
center2=(10,10)
cov1=1
cov2=1
n1=500000
n2=500000
nsamples=n1+n2
dim=2
g1 = np.random.normal(loc=center1,scale=cov1,size=(n1,dim)).astype(np.float32)
g2 = np.random.normal(loc=center2,scale=cov2,size=(n2,dim)).astype(np.float32)
data = np.vstack((g1,g2))
gt=np.zeros(data.shape[0],dtype=np.int32)
gt[100:]=1
figData=plt.figure()
plt.plot(g1[:,0],g1[:,1],'.')
plt.plot(g2[:,0],g2[:,1],'.')
import MyML.helper.partition
reload(MyML.helper.partition)
py_estimator=K_Means3.K_Means(n_clusters=20,mode="numpy", cuda_mem='manual',tol=1e-4,max_iter=3)
cu_estimator=K_Means3.K_Means(n_clusters=20,mode="cuda", cuda_mem='manual',tol=1e-4,max_iter=3)
%timeit MyML.helper.partition.generateEnsemble(data,cu_estimator,n_clusters=[6,30],npartitions=30,iters=3)
%timeit MyML.helper.partition.generateEnsemble(data,py_estimator,n_clusters=[6,30],npartitions=30,iters=3)
cProfile.run("grouperSKL.fit(data)")
cProfile.run("grouper.fit(data)")
```
## Generate partitions, k=6,10,[4,25]
```
def formatPartition(partition):
clusters=np.unique(partition)
nclusters=clusters.size
finalPartition=[None]*nclusters
for c,l in clusters:
finalPartition[c] = np.where(clusters==l)
return finalPartition
def generatePartitions(data,npartitions,nclusters,iters=3):
if type(nclusters) is list:
clusterRange = True
min_ncluster=nclusters[0]
max_ncluster=nclusters[1]
else:
clusterRange = False
k = nclusters
partitions = list()
mode = "numpy"
for p in xrange(npartitions):
if clusterRange:
k = np.random.randint(min_ncluster,max_ncluster)
grouper = K_Means3.K_Means(n_clusters=k,mode=mode, cuda_mem='manual',tol=1e-4,max_iters=iters)
grouper._centroid_mode = "index"
grouper.fit(data)
partitions.append(grouper.partition)
return partitions
def generatePartitionsSKL(data,npartitions,nclusters,iters=3):
if type(nclusters) is list:
clusterRange = True
min_ncluster=nclusters[0]
max_ncluster=nclusters[1]
else:
clusterRange = False
k = nclusters
partitions = list()
mode = "numpy"
for p in xrange(npartitions):
if clusterRange:
k = np.random.randint(min_ncluster,max_ncluster)
gSKL = KMeans_skl(n_clusters=k,n_init=1,init="random",max_iter=iters)
gSKL.fit(data)
partitions.append(formatPartition(gSKL.labels_))
return partitions
reload(K_Means3)
npartitions=30
iters=3
nclusters=10
partitions_my_10 = generatePartitions(data=data,npartitions=npartitions,nclusters=nclusters,iters=iters)
partitions_skl_10 = generatePartitions(data=data,npartitions=npartitions,nclusters=nclusters,iters=iters)
if type(nclusters) is not list:
allGood=True
for p in xrange(npartitions):
if len(partitions_my_10[p]) != nclusters:
print 'partition {} of partitions_my has different number of clusters:{}'.format(p,len(partitions_my_10[p]))
allGood=False
if len(partitions_skl_10[p]) != nclusters:
print 'partition {} of partitions_my has different number of clusters:{}'.format(p,len(partitions_skl_10[p]))
allGood=False
if allGood:
print 'All partitions have good number of clusters.'
nclusters=6
partitions_my_6 = generatePartitions(data=data,npartitions=npartitions,nclusters=nclusters,iters=iters)
partitions_skl_6 = generatePartitions(data=data,npartitions=npartitions,nclusters=nclusters,iters=iters)
if type(nclusters) is not list:
allGood=True
for p in xrange(npartitions):
if len(partitions_my_6[p]) != nclusters:
print 'partition {} of partitions_my has different number of clusters:{}'.format(p,len(partitions_my_6[p]))
allGood=False
if len(partitions_skl_6[p]) != nclusters:
print 'partition {} of partitions_my has different number of clusters:{}'.format(p,len(partitions_skl_6[p]))
allGood=False
if allGood:
print 'All partitions have good number of clusters.'
nclusters=[4,25]
partitions_my_rand = generatePartitions(data=data,npartitions=npartitions,nclusters=nclusters,iters=iters)
partitions_skl_rand = generatePartitions(data=data,npartitions=npartitions,nclusters=nclusters,iters=iters)
if type(nclusters) is not list:
allGood=True
for p in xrange(npartitions):
if len(partitions_my_rand[p]) != nclusters:
print 'partition {} of partitions_my has different number of clusters:{}'.format(p,len(partitions_my[p]))
allGood=False
if len(partitions_skl_rand[p]) != nclusters:
print 'partition {} of partitions_my has different number of clusters:{}'.format(p,len(partitions_skl[p]))
allGood=False
if allGood:
print 'All partitions have good number of clusters.'
```
### Visualizing some partitions
```
figEnsemble=plt.figure(figsize=(16,12))
ax1En=figEnsemble.add_subplot(2,2,1)
ax2En=figEnsemble.add_subplot(2,2,2)
ax3En=figEnsemble.add_subplot(2,2,3)
ax4En=figEnsemble.add_subplot(2,2,4)
for c in partitions_my_10[0]:
ax1En.plot(data[c,0],data[c,1],'.')
ax1En.set_title("Sample of one partition generated with my K-Means")
for c in partitions_my_10[1]:
ax2En.plot(data[c,0],data[c,1],'.')
ax2En.set_title("Sample of one partition generated with my K-Means")
for c in partitions_skl_10[0]:
ax3En.plot(data[c,0],data[c,1],'.')
ax3En.set_title("Sample of one partition generated with SKL's K-Means")
for c in partitions_skl_10[1]:
ax4En.plot(data[c,0],data[c,1],'.')
ax4En.set_title("Sample of one partition generated with SKL's K-Means")
```
# EAC K-Means
## 6 clusters per partition
```
# generate coassoc
prot_mode="random"
assoc_mode='prot' # prot or full
nprots=nsamples # number of prototypes
partitions_used = partitions_my_6
myEstimator=eac.EAC(nsamples)
myEstimator.fit(partitions_used,files=False,assoc_mode=assoc_mode, prot_mode=prot_mode, nprot=nprots,build_only=True)
# final clustering with the true number of clusters
true_nclusters = np.unique(gt).shape[0]
# cluster with my K-Means
kmeans_mode = "numpy"
grouper = K_Means3.K_Means(n_clusters=true_nclusters, mode=kmeans_mode, cuda_mem='manual',tol=1e-4,max_iters=iters)
grouper._centroid_mode = "index"
grouper.fit(myEstimator._coassoc)
# cluster with SKL K-Means
gSKL = KMeans_skl(n_clusters=true_nclusters,n_init=1,init="random")
gSKL.fit(myEstimator._coassoc)
# Hungarian accuracy
myAcc = determine_ci.HungarianIndex(nsamples=nsamples)
myAcc.score(gt,grouper.labels_,format='array')
sklAcc = determine_ci.HungarianIndex(nsamples=nsamples)
sklAcc.score(gt,gSKL.labels_,format='array')
print 'My Accuracy:\t',myAcc.accuracy
print 'SKL Accuracy:\t',sklAcc.accuracy
figEAC=plt.figure(figsize=(16,6))
ax1EAC=figEAC.add_subplot(1,2,1)
ax2EAC=figEAC.add_subplot(1,2,2)
for c in np.unique(grouper.labels_):
clusterData=grouper.labels_==c
ax1EAC.plot(data[clusterData,0],data[clusterData,1],'.')
ax1EAC.set_title("Final EAC partition with my K-Means")
for c in np.unique(gSKL.labels_):
clusterData=gSKL.labels_==c
ax2EAC.plot(data[clusterData,0],data[clusterData,1],'.')
ax2EAC.set_title("Final EAC partition with SKL's K-Means")
```
Accuracy is usually 100% in both cases (clustering from my K-Means and SciKit-Learn's). This depends on the ensemble. For some ensembles the accuracy on both is always one, for others it sometimes is not in one or both of the K-Means used (mine vs SKL).
The number of prototypes is equal to the number of samples and since there are not repeated prototypes, all the samples are being used. Above are the visualizations of the solutions.
### Statistic analysis
```
stat_nprots=nsamples
print "{}\t{}\t{}\t{}\t{}".format("type","mean","var","max","min")
print "skl \t",
for metric in k_skl_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,rounds=100):
print "{}\t".format(metric),
print "\nmy \t",
for metric in k_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,iters="converge",rounds=100):
print "{}\t".format(metric),
nprots=[5,20,40,60,80,100,120,140,160,180,200]
results_k10=list()
for n in nprots:
print '.',
r=k_analysis(partitions_used,files=False,ground_truth=gt,nprots=n,rounds=100)
results_k10.append(r)
mean_k10=[res[0] for res in results_k10]
var_k10=[res[1] for res in results_k10]
best_k10=[res[2] for res in results_k10]
worst_k10=[res[3] for res in results_k10]
plt.plot(mean_k10,label='mean')
plt.plot(best_k10,label='best')
plt.plot(worst_k10,label='worst')
plt.plot([0, 10], [0.5, 0.5], 'k-', lw=1)
plt.title("Analysis of the influence of the number of prototypes")
plt.legend(loc='best')
```
## 10 clusters per partition
```
partitions_used = partitions_my_10
# generate coassoc
prot_mode="random"
assoc_mode='prot' # prot or full
nprots=nsamples # number of prototypes
myEstimator=eac.EAC(nsamples)
myEstimator.fit(partitions_used,files=False,assoc_mode=assoc_mode, prot_mode=prot_mode, nprot=nprots,build_only=True)
# final clustering with the true number of clusters
true_nclusters = np.unique(gt).shape[0]
# cluster with my K-Means
kmeans_mode = "numpy"
grouper = K_Means3.K_Means(n_clusters=true_nclusters, mode=kmeans_mode, cuda_mem='manual',tol=1e-4,max_iters=iters)
grouper._centroid_mode = "iter"
grouper.fit(myEstimator._coassoc)
# cluster with SKL K-Means
gSKL = KMeans_skl(n_clusters=true_nclusters,n_init=1,init="random")
gSKL.fit(myEstimator._coassoc)
# Hungarian accuracy
myAcc = determine_ci.HungarianIndex(nsamples=nsamples)
myAcc.score(gt,grouper.labels_,format='array')
sklAcc = determine_ci.HungarianIndex(nsamples=nsamples)
sklAcc.score(gt,gSKL.labels_,format='array')
print 'My Accuracy:\t',myAcc.accuracy
print 'SKL Accuracy:\t',sklAcc.accuracy
figEAC2=plt.figure(figsize=(16,12))
ax1EAC2=figEAC2.add_subplot(2,2,1)
ax2EAC2=figEAC2.add_subplot(2,2,2)
ax3EAC2=figEAC2.add_subplot(2,2,3)
for c in np.unique(grouper.labels_):
clusterData=grouper.labels_==c
ax1EAC2.plot(data[clusterData,0],data[clusterData,1],'.')
ax1EAC2.set_title("Final EAC partition with my K-Means")
for c in np.unique(gSKL.labels_):
clusterData=gSKL.labels_==c
ax2EAC2.plot(data[clusterData,0],data[clusterData,1],'.')
ax2EAC2.set_title("Final EAC partition with SKL's K-Means")
nprots=[5,20,40,60,80,100,120,140,160,180,200]
results_k6=list()
for n in nprots:
r=k_skl_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,rounds=100)
results_k6.append(r)
mean_k6=[res[0] for res in results_k6]
var_k6=[res[1] for res in results_k6]
best_k6=[res[2] for res in results_k6]
worst_k6=[res[3] for res in results_k6]
ax3EAC2.plot(mean_k6)
ax3EAC2.plot(best_k6)
ax3EAC2.plot(worst_k6)
ax3EAC2.plot([0, 10], [0.5, 0.5], 'k-', lw=1)
ax3EAC2.set_title("Analysis of the influence of the number of prototypes (SKL)")
print "\nStatistical analysis"
stat_nprots=nsamples
print "{}\t{}\t{}\t{}\t{}".format("type","mean","var","max","min")
print "skl \t",
for metric in k_skl_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,rounds=100):
print "{}\t".format(metric),
print "\nmy \t",
for metric in k_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,iters="converge",rounds=100):
print "{}\t".format(metric),
```
## Random number of clusters per partition
```
partitions_used = partitions_my_rand
# generate coassoc
prot_mode="random"
assoc_mode='prot' # prot or full
nprots=nsamples # number of prototypes
myEstimator=eac.EAC(nsamples)
myEstimator.fit(partitions_used,files=False,assoc_mode=assoc_mode, prot_mode=prot_mode, nprot=nprots,build_only=True)
# final clustering with the true number of clusters
true_nclusters = np.unique(gt).shape[0]
# cluster with my K-Means
kmeans_mode = "numpy"
grouper = K_Means3.K_Means(n_clusters=true_nclusters, mode=kmeans_mode, cuda_mem='manual',tol=1e-4,max_iters=iters)
grouper._centroid_mode = "iter"
grouper.fit(myEstimator._coassoc)
# cluster with SKL K-Means
gSKL = KMeans_skl(n_clusters=true_nclusters,n_init=1,init="random")
gSKL.fit(myEstimator._coassoc)
# Hungarian accuracy
myAcc = determine_ci.HungarianIndex(nsamples=nsamples)
myAcc.score(gt,grouper.labels_,format='array')
sklAcc = determine_ci.HungarianIndex(nsamples=nsamples)
sklAcc.score(gt,gSKL.labels_,format='array')
print 'My Accuracy:\t',myAcc.accuracy
print 'SKL Accuracy:\t',sklAcc.accuracy
figEAC2=plt.figure(figsize=(16,12))
ax1EAC2=figEAC2.add_subplot(2,2,1)
ax2EAC2=figEAC2.add_subplot(2,2,2)
ax3EAC2=figEAC2.add_subplot(2,2,3)
for c in np.unique(grouper.labels_):
clusterData=grouper.labels_==c
ax1EAC2.plot(data[clusterData,0],data[clusterData,1],'.')
ax1EAC2.set_title("Final EAC partition with my K-Means")
for c in np.unique(gSKL.labels_):
clusterData=gSKL.labels_==c
ax2EAC2.plot(data[clusterData,0],data[clusterData,1],'.')
ax2EAC2.set_title("Final EAC partition with SKL's K-Means")
nprots=[5,20,40,60,80,100,120,140,160,180,200]
results_k6=list()
for n in nprots:
r=k_skl_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,rounds=100)
results_k6.append(r)
mean_k6=[res[0] for res in results_k6]
var_k6=[res[1] for res in results_k6]
best_k6=[res[2] for res in results_k6]
worst_k6=[res[3] for res in results_k6]
ax3EAC2.plot(mean_k6)
ax3EAC2.plot(best_k6)
ax3EAC2.plot(worst_k6)
ax3EAC2.plot([0, 10], [0.5, 0.5], 'k-', lw=1)
ax3EAC2.set_title("Analysis of the influence of the number of prototypes (SKL)")
print "\nStatistical analysis"
stat_nprots=nsamples
print "{}\t{}\t{}\t{}\t{}".format("type","mean","var","max","min")
print "skl \t",
for metric in k_skl_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,rounds=100):
print "{}\t".format(metric),
print "\nmy \t",
for metric in k_analysis(partitions_used,files=False,ground_truth=gt,nprots=stat_nprots,iters="converge",rounds=100):
print "{}\t".format(metric),
plt.pcolor(myEstimator._coassoc)
```
# K-Means only
```
stat_nprots=nsamples
print "{}\t{}\t{}\t{}\t{}".format("type","mean","var","max","min")
print "my \t",
for metric in stat_my_kmeans(data,true_nclusters,gt,rounds=100):
print "{}\t".format(metric),
print "\nskl \t",
for metric in stat_skl_kmeans(data,true_nclusters,gt,rounds=100):
print "{}\t".format(metric),
```
# EAC K-Medoids
```
import MyML.cluster.KMedoids as KMedoids
```
## 6 clusters per partition
```
#%%debug
partitions_used = partitions_my_6
# generate coassoc
prot_mode="random"
assoc_mode='full' # prot or full
nprots=50 # number of prototypes
myEstimator=eac.EAC(nsamples)
myEstimator.fit(partitions_used,files=False,assoc_mode=assoc_mode, prot_mode=prot_mode, nprot=nprots,build_only=True)
# final clustering with the true number of clusters
true_nclusters = np.unique(gt).shape[0]
# compute diassociation from co-assoc
diassoc=myEstimator._coassoc.max()-myEstimator._coassoc
#k-medoids
labels,medoids=KMedoids.cluster(diassoc,k=true_nclusters)
# Hungarian accuracy
acc = determine_ci.HungarianIndex(nsamples=nsamples)
acc.score(gt,labels,format='array')
print 'K-Medoids Accuracy:\t',acc.accuracy
```
### Statistical analysis
```
class acc_medoids():
def __init__(self,data,nclusters,gt):
self.data=data
self.nsamples=data.shape[0]
self.nclusters=nclusters
self.gt=gt
def run(self):
labels,medoids=KMedoids.cluster(self.data,k=self.nclusters)
# Hungarian accuracy
acc = determine_ci.HungarianIndex(nsamples=self.nsamples)
acc.score(self.gt,labels,format='array')
return acc.accuracy
class acc_my_kmeans():
def __init__(self,data,nclusters,gt):
self.data=data
self.nclusters=nclusters
self.nsamples=data.shape[0]
self.gt=gt
def run(self):
# cluster with SKL K-Means
grouper = K_Means3.K_Means(n_clusters=true_nclusters,mode=kmeans_mode, cuda_mem='manual',tol=1e-4,max_iters=iters)
grouper._centroid_mode = "iter"
grouper.fit(self.data)
# Hungarian accuracy
sklAcc = determine_ci.HungarianIndex(nsamples=self.nsamples)
sklAcc.score(self.gt,grouper.labels_,format='array')
return sklAcc.accuracy
class acc_skl_kmeans():
def __init__(self,data,nclusters,gt):
self.data=data
self.nclusters=nclusters
self.nsamples=data.shape[0]
self.gt=gt
def run(self):
# cluster with SKL K-Means
gSKL = KMeans_skl(n_clusters=self.nclusters,n_init=1,init="random")
gSKL.fit(self.data)
# Hungarian accuracy
sklAcc = determine_ci.HungarianIndex(nsamples=self.nsamples)
sklAcc.score(self.gt,gSKL.labels_,format='array')
return sklAcc.accuracy
def stat_analysis(method,rounds=20):
rAll = np.zeros(rounds)
for r in xrange(rounds):
rAll[r]=method.run()
return rAll.mean(),rAll.var(),rAll.max(),rAll.min()
rounds=100
diassoc=myEstimator._coassoc.max()-myEstimator._coassoc
x=acc_medoids(diassoc,nclusters=true_nclusters,gt=gt)
print 'diassoc kmedoids\t',stat_analysis(x,rounds=rounds)
x2=acc_my_kmeans(diassoc,nclusters=true_nclusters,gt=gt)
print 'diassoc kmeans \t',stat_analysis(x2,rounds=rounds)
x3=acc_medoids(myEstimator._coassoc,nclusters=true_nclusters,gt=gt)
print 'assoc kmedoids \t',stat_analysis(x3,rounds=rounds)
x4=acc_my_kmeans(myEstimator._coassoc,nclusters=true_nclusters,gt=gt)
print 'assoc kmeans \t',stat_analysis(x4,rounds=rounds)
```
## 10 clusters per partition
```
#%%debug
partitions_used = partitions_my_10
# generate coassoc
prot_mode="random"
assoc_mode='full' # prot or full
nprots=50 # number of prototypes
myEstimator=eac.EAC(nsamples)
myEstimator.fit(partitions_used,files=False,assoc_mode=assoc_mode, prot_mode=prot_mode, nprot=nprots,build_only=True)
# final clustering with the true number of clusters
true_nclusters = np.unique(gt).shape[0]
# compute diassociation from co-assoc
diassoc=myEstimator._coassoc.max()-myEstimator._coassoc
#k-medoids
labels,medoids=KMedoids.cluster(diassoc,k=true_nclusters)
# Hungarian accuracy
acc = determine_ci.HungarianIndex(nsamples=nsamples)
acc.score(gt,labels,format='array')
print 'K-Medoids Accuracy:\t',acc.accuracy
```
### Statistical analysis
```
rounds=20
diassoc=myEstimator._coassoc.max()-myEstimator._coassoc
x=acc_medoids(diassoc,nclusters=true_nclusters,gt=gt)
print 'diassoc kmedoids\t',stat_analysis(x,rounds=rounds)
x2=acc_skl_kmeans(diassoc,nclusters=true_nclusters,gt=gt)
print 'diassoc kmeans \t',stat_analysis(x2,rounds=rounds)
x3=acc_medoids(myEstimator._coassoc,nclusters=true_nclusters,gt=gt)
print 'assoc kmedoids \t',stat_analysis(x3,rounds=rounds)
x4=acc_skl_kmeans(myEstimator._coassoc,nclusters=true_nclusters,gt=gt)
print 'assoc kmeans \t',stat_analysis(x4,rounds=rounds)
```
## Random clusters per partition
```
#%%debug
npartitions=30
nclusters=[4,25]
iters=3
partitions_used = partitions_my_rand
# generate coassoc
prot_mode="random"
assoc_mode='full' # prot or full
nprots=50 # number of prototypes
myEstimator=eac.EAC(nsamples)
myEstimator.fit(partitions_used,files=False,assoc_mode=assoc_mode, prot_mode=prot_mode, nprot=nprots,build_only=True)
# final clustering with the true number of clusters
true_nclusters = np.unique(gt).shape[0]
# compute diassociation from co-assoc
diassoc=myEstimator._coassoc.max()-myEstimator._coassoc
#k-medoids
labels,medoids=KMedoids.cluster(diassoc,k=true_nclusters)
# Hungarian accuracy
acc = determine_ci.HungarianIndex(nsamples=nsamples)
acc.score(gt,labels,format='array')
print 'K-Medoids Accuracy:\t',acc.accuracy
```
### Statistical analysis
```
rounds=20
diassoc=myEstimator._coassoc.max()-myEstimator._coassoc
x=acc_medoids(diassoc,nclusters=true_nclusters,gt=gt)
print 'diassoc kmedoids\t',stat_analysis(x,rounds=rounds)
x2=acc_skl_kmeans(diassoc,nclusters=true_nclusters,gt=gt)
print 'diassoc kmeans \t',stat_analysis(x2,rounds=rounds)
x3=acc_medoids(myEstimator._coassoc,nclusters=true_nclusters,gt=gt)
print 'assoc kmedoids \t',stat_analysis(x3,rounds=rounds)
x4=acc_skl_kmeans(myEstimator._coassoc,nclusters=true_nclusters,gt=gt)
print 'assoc kmeans \t',stat_analysis(x4,rounds=rounds)
```
## K-Medoids only
```
from sklearn.metrics.pairwise import pairwise_distances
pairwise=pairwise_distances(data)
y=acc_medoids(pairwise,2,gt=gt)
stat_analysis(y,rounds=20)
```
# EAC Single link
```
partitions_used = partitions_my_rand
# generate coassoc
prot_mode="random"
assoc_mode='full' # prot or full
nprots=nsamples # number of prototypes
myEstimator=eac.EAC(nsamples)
myEstimator.fit(partitions_used,files=False,assoc_mode=assoc_mode, prot_mode=prot_mode, nprot=nprots,build_only=True)
# final clustering with the true number of clusters
true_nclusters = np.unique(gt).shape[0]
#k-medoids
myEstimator._apply_linkage()
labels = myEstimator._clusterFromLinkage()
# Hungarian accuracy
acc = determine_ci.HungarianIndex(nsamples=nsamples)
acc.score(gt,labels,format='array')
print 'EAC SL Accuracy:\t',acc.accuracy
```
## Single-Link only
```
from scipy.cluster import hierarchy as hie
from scipy.spatial.distance import squareform
# pairwise distances
dists = np.zeros((nsamples,nsamples))
for i,dp in enumerate(data):
dist = (data - dp)**2
dist = np.sqrt(dist.sum(axis=1))
dists[i]=dist
#pairwise=pairwise_distances(data)
condensed_dists = squareform(dists)
Z = hie.linkage(condensed_dists,method='single')
parents=Z[-1,:2]
labels=myEstimator._buildLabels(Z=Z,parents=parents)
acc.score(gt,labels,format='array')
print "Single-Link accuracy:\t",acc.accuracy
#generated from: http://tools.medialab.sciences-po.fr/iwanthue/
my_colors=["#D37E30",
"#6F6FD8",
"#3AA579",
"#D5337B",
"#4595B8",
"#3EA729",
"#D150D7",
"#4E6E23",
"#8F4D79",
"#D64430",
"#A1952B",
"#C15257",
"#AA5BB3",
"#6A76B0",
"#8E5723",
"#2A7464",
"#D66C9F",
"#60994E",
"#73A32D",
"#33A74F"]
my_pallete=sns.color_palette(my_colors,len(my_colors))
sns.palplot(my_pallete)
sns.set_palette(my_pallete,len(my_colors))
#marker_types=['.','^','*','h','x']
marker_types=matplotlib.markers.MarkerStyle.filled_markers
sns.set_style("whitegrid")
figX=sns.plt.figure(figsize=(12,90))
for i,p in enumerate(partitions_my_rand):
ax=figX.add_subplot(15,2,i+1)
for j,c in enumerate(p):
ax.plot(data[c,0],data[c,1],ls=u'None',marker=marker_types[j/6],markersize=8)
#ax.scatter(data[c,0],data[c,1],marker=marker_types[j/6],linewidths=5)
ax.set_title("partition {}, {} clusters".format(i+1,j+1))
```
| github_jupyter |
# Определение эмоциональной окраски твитов с помощью BERT
```
# Если Вы запускаете ноутбук на colab или kaggle,
# выполните следующие строчки, чтобы подгрузить библиотеку dlnlputils:
# !git clone https://github.com/arturburiev/stepik-dl-nlp.git && pip install -r stepik-dl-nlp/requirements.txt
# import sys; sys.path.append('./stepik-dl-nlp')
```
## Установка библиотек
```
# !pip install pytorch-transformers
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from pytorch_transformers import BertTokenizer, BertConfig
from pytorch_transformers import AdamW, BertForSequenceClassification
from tqdm import tqdm, trange
import pandas as pd
import io
import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device == 'cpu':
print('cpu')
else:
n_gpu = torch.cuda.device_count()
print(torch.cuda.get_device_name(0))
```
## Загрузка данных
Мы выбрали необычный датасет с разметкой сентимента русскоязычных твитов (подробнее про него в [статье](http://www.swsys.ru/index.php?page=article&id=3962&lang=)). В корпусе, который мы использовали 114,911 положительных и 111,923 отрицательных записей. Загрузить его можно [тут](https://study.mokoron.com/).
```
import pandas as pd
# Если Вы запускаете ноутбук на colab или kaggle, добавьте в начало пути ./stepik-dl-nlp
pos_texts = pd.read_csv('datasets/bert_sentiment_analysis/positive.csv', encoding='utf8', sep=';', header=None)
neg_texts = pd.read_csv('datasets/bert_sentiment_analysis/negative.csv', encoding='utf8', sep=';', header=None)
pos_texts.sample(5)
sentences = np.concatenate([pos_texts[3].values, neg_texts[3].values])
sentences = ["[CLS] " + sentence + " [SEP]" for sentence in sentences]
labels = [[1] for _ in range(pos_texts.shape[0])] + [[0] for _ in range(neg_texts.shape[0])]
assert len(sentences) == len(labels) == pos_texts.shape[0] + neg_texts.shape[0]
print(sentences[1000])
from sklearn.model_selection import train_test_split
train_sentences, test_sentences, train_gt, test_gt = train_test_split(sentences, labels, test_size=0.3)
print(len(train_gt), len(test_gt))
```
## Inputs
```
from pytorch_transformers import BertTokenizer, BertConfig
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
tokenized_texts = [tokenizer.tokenize(sent) for sent in train_sentences]
print (tokenized_texts[0])
```
BERTу нужно предоставить специальный формат входных данных.
- **input ids**: последовательность чисел, отождествляющих каждый токен с его номером в словаре.
- **labels**: вектор из нулей и единиц. В нашем случае нули обозначают негативную эмоциональную окраску, единицы - положительную.
- **segment mask**: (необязательно) последовательность нулей и единиц, которая показывает, состоит ли входной текст из одного или двух предложений. Для случая одного предложения получится вектор из одних нулей. Для двух: <length_of_sent_1> нулей и <length_of_sent_2> единиц.
- **attention mask**: (необязательно) последовательность нулей и единиц, где единицы обозначают токены предложения, нули - паддинг.
```
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(
input_ids,
maxlen=100,
dtype="long",
truncating="post",
padding="post"
)
attention_masks = [[float(i>0) for i in seq] for seq in input_ids]
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(
input_ids, train_gt,
random_state=42,
test_size=0.1
)
train_masks, validation_masks, _, _ = train_test_split(
attention_masks,
input_ids,
random_state=42,
test_size=0.1
)
train_inputs = torch.tensor(train_inputs)
train_labels = torch.tensor(train_labels)
train_masks = torch.tensor(train_masks)
validation_inputs = torch.tensor(validation_inputs)
validation_labels = torch.tensor(validation_labels)
validation_masks = torch.tensor(validation_masks)
train_labels
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_dataloader = DataLoader(
train_data,
sampler=RandomSampler(train_data),
batch_size=32
)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_dataloader = DataLoader(
validation_data,
sampler=SequentialSampler(validation_data),
batch_size=32
)
```
## Обучение модели
Загружаем [BertForSequenceClassification](https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/modeling.py#L1129):
```
from pytorch_transformers import AdamW, BertForSequenceClassification
```
Аналогичные модели есть и для других задач:
```
from pytorch_transformers import BertForQuestionAnswering, BertForTokenClassification
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
model.cuda()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)
from IPython.display import clear_output
# Будем сохранять loss во время обучения
# и рисовать график в режиме реального времени
train_loss_set = []
train_loss = 0
# Обучение
# Переводим модель в training mode
model.train()
for step, batch in enumerate(train_dataloader):
# добавляем батч для вычисления на GPU
batch = tuple(t.to(device) for t in batch)
# Распаковываем данные из dataloader
b_input_ids, b_input_mask, b_labels = batch
# если не сделать .zero_grad(), градиенты будут накапливаться
optimizer.zero_grad()
# Forward pass
loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
train_loss_set.append(loss[0].item())
# Backward pass
loss[0].backward()
# Обновляем параметры и делаем шаг используя посчитанные градиенты
optimizer.step()
# Обновляем loss
train_loss += loss[0].item()
# Рисуем график
clear_output(True)
plt.plot(train_loss_set)
plt.title("Training loss")
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.show()
print("Loss на обучающей выборке: {0:.5f}".format(train_loss / len(train_dataloader)))
# Валидация
# Переводим модель в evaluation mode
model.eval()
valid_preds, valid_labels = [], []
for batch in validation_dataloader:
# добавляем батч для вычисления на GPU
batch = tuple(t.to(device) for t in batch)
# Распаковываем данные из dataloader
b_input_ids, b_input_mask, b_labels = batch
# При использовании .no_grad() модель не будет считать и хранить градиенты.
# Это ускорит процесс предсказания меток для валидационных данных.
with torch.no_grad():
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Перемещаем logits и метки классов на CPU для дальнейшей работы
logits = logits[0].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
batch_preds = np.argmax(logits, axis=1)
batch_labels = np.concatenate(label_ids)
valid_preds.extend(batch_preds)
valid_labels.extend(batch_labels)
print("Процент правильных предсказаний на валидационной выборке: {0:.2f}%".format(
accuracy_score(valid_labels, valid_preds) * 100
))
print("Процент правильных предсказаний на валидационной выборке: {0:.2f}%".format(
accuracy_score(valid_labels, valid_preds) * 100
))
```
# Оценка качества на отложенной выборке
```
tokenized_texts = [tokenizer.tokenize(sent) for sent in test_sentences]
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(
input_ids,
maxlen=100,
dtype="long",
truncating="post",
padding="post"
)
attention_masks = [[float(i>0) for i in seq] for seq in input_ids]
prediction_inputs = torch.tensor(input_ids)
prediction_masks = torch.tensor(attention_masks)
prediction_labels = torch.tensor(test_gt)
prediction_data = TensorDataset(
prediction_inputs,
prediction_masks,
prediction_labels
)
prediction_dataloader = DataLoader(
prediction_data,
sampler=SequentialSampler(prediction_data),
batch_size=32
)
model.eval()
test_preds, test_labels = [], []
for batch in prediction_dataloader:
# добавляем батч для вычисления на GPU
batch = tuple(t.to(device) for t in batch)
# Распаковываем данные из dataloader
b_input_ids, b_input_mask, b_labels = batch
# При использовании .no_grad() модель не будет считать и хранить градиенты.
# Это ускорит процесс предсказания меток для тестовых данных.
with torch.no_grad():
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Перемещаем logits и метки классов на CPU для дальнейшей работы
logits = logits[0].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Сохраняем предсказанные классы и ground truth
batch_preds = np.argmax(logits, axis=1)
batch_labels = np.concatenate(label_ids)
test_preds.extend(batch_preds)
test_labels.extend(batch_labels)
acc_score = accuracy_score(test_labels, test_preds)
print('Процент правильных предсказаний на отложенной выборке составил: {0:.2f}%'.format(
acc_score*100
))
print('Неправильных предсказаний: {0}/{1}'.format(
sum(test_labels != test_preds),
len(test_labels)
))
```
### Домашнее задание
Скачайте датасет с отзывами на фильмы. Например, используйте датасет [IMDB Dataset of 50K Movie Reviews](https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews).
```
import pandas as pd
dataset = pd.read_csv('datasets/bert_sentiment_analysis/homework/IMDB_Dataset.csv')
dataset.head()
```
Используйте для дообучения BERT датасет IMDB.
Ответьте на вопросы:
1. удалось ли достичь такого же accuracy (98\%) при использовании IMDB датасета?
2. удалось ли получить хорошее качество классификации всего за одну эпоху?
3. подумайте, в чем может быть причина различий в дообучении одной и той же модели на разных датасетах
- Внимательно изучите датасет с русскими твитами. В чем его особенности? Нет ли явных паттернов или ключевых слов, которые однозначно определяют сентимент твита?
- Попробуйте удалить пунктуацию из датасета с русскими твитами и перезапустите дообучение модели. Изменилось ли итоговое качество работы модели? Почему?
| github_jupyter |
```
import gym
env = gym.make('CartPole-v0')
env.reset()
for _ in range(1000):
env.render()
print(env.step(env.action_space.sample()))
from collections import namedtuple
import random
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward', 'done'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
args = [torch.tensor(x) for x in args]
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
import torch.nn as nn
import torch.nn.functional as F
import torch
class DQN(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(4, 4)
self.fc2 = nn.Linear(4, 2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
from tqdm import tqdm
import numpy as np
import torch.optim as optim
memory_length = 1000
policy_net = DQN()
target_net = DQN()
optimizer = optim.Adam(policy_net.parameters(), lr=0.01)
# Clopy the network
target_net.load_state_dict(policy_net.state_dict())
memory = ReplayMemory(10000)
n_episode = 1000
render_step = 1
BATCH_SIZE = 128
GAMMA = 0.99
EPS_START = 0.9
EPS_END = 0.1
EPS_DECAY = 20000
TARGET_UPDATE = 10
def get_eps(step):
return max(EPS_END, EPS_END + step * (EPS_START - EPS_END) / EPS_DECAY)
for _ in tqdm(range(n_episode)):
done = False
old_obs = env.reset()
old_obs = torch.from_numpy(old_obs).float()
step = 0
global_step = 0
while not done:
eps = get_eps(global_step)
if True or np.random.uniform() < eps:
action = env.action_space.sample()
else:
action = policy_net(old_obs)
obs, reward, done, info = env.step(action)
obs = torch.from_numpy(obs).float()
memory.push(old_obs, action, obs_tensor, reward, done)
old_obs = obs
step += 1
global_step += 1
if step % render_step == 0:
env.render()
# Training
if len(memory) >= BATCH_SIZE:
batch = Transition(*zip(*memory.sample(BATCH_SIZE)))
batch_state = torch.cat([x.unsqueeze(0) for x in batch.state])
batch_action = torch.cat([x.unsqueeze(0) for x in batch.action])
batch_reward = torch.cat([x.unsqueeze(0) for x in batch.reward])
batch_done = torch.cat([x.unsqueeze(0) for x in batch.done])
batch_next_state = torch.cat([x.unsqueeze(0) for x in batch.next_state])
# Build State Action Value
batch_output = policy_net(batch_state)
state_action_values = batch_output.gather(1, batch_action.unsqueeze(1)).squeeze(1)
# Build Target
batch_target = target_net(batch_next_state)
next_state_values = torch.zeros(BATCH_SIZE)
batch_max = batch_target.max(1)[0].detach()
next_state_values[batch_done] = batch_max[batch_done]
target = batch_reward + GAMMA * next_state_values
# Optimization
loss = F.smooth_l1_loss(state_action_values, target)
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
if global_step % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
torch.cat([torch.ones(3).unsqueeze(0), torch.ones(3).unsqueeze(0)]).shape
help(F.smooth_l1_loss)
help(pytorch.detach)
torch.randn(3).max(0)[0]
obs_.float()
transtions = memory.sample(3)
x = Transition(*zip(*transtions))
for i in x:
print(i)
break
list(x)
x
x = torch.ones(3, dtype=torch.long).unsqueeze(1)
y = torch.randn((3, 5))
y.gather(1, x)
x.requires_grad
torch.eye(x)
help(torch.eye)
(1.1 ** 5 - 1) / 0.1 * 35746
1.1 ** 6
np.log1p
help(np.log1p)
np.exp(np.log1p(3))
```
| github_jupyter |
```
from scipy.stats import kruskal, f_oneway
import pandas as pd
import numpy as np
```
# qpe_amod15(2) data
```
#if p value less than alpha reject
qiskit_data = np.concatenate((np.full(245, 0.75), np.full(264, 0.5), np.full(243, 0.25), np.full(248, 0)))
cirq_data = np.concatenate((np.full(259, 0.75), np.full(234, 0.5), np.full(249, 0.25), np.full(258, 0)))
qs_data = np.concatenate((np.full(260, 0.75), np.full(234, 0.5), np.full(264, 0.25), np.full(242, 0)))
kruskal(qiskit_data, cirq_data, qs_data)
```
### P-Value of 0.93.... > 0.05 significance level, we cannot refute the null hypothesis
# qpe_amod15(7) data
```
#if p value less than alpha reject
qiskit_data = np.concatenate((np.full(254, 0.75), np.full(254, 0.5), np.full(230, 0.25), np.full(262, 0)))
cirq_data = np.concatenate((np.full(251, 0.75), np.full(258, 0.5), np.full(269, 0.25), np.full(222, 0)))
qs_data = np.concatenate((np.full(254, 0.75), np.full(232, 0.5), np.full(268, 0.25), np.full(246, 0)))
kruskal(qiskit_data, cirq_data, qs_data)
```
### P-Value of 0.63.... > 0.05 significance level, we cannot refute the null hypothesis
# qpe_amod15(11) data
```
#if p value less than alpha reject
qiskit_data = np.concatenate((np.full(486, 0.5), np.full(514, 0)))
cirq_data = np.concatenate((np.full(483, 0.5), np.full(517, 0)))
qs_data = np.concatenate((np.full(491, 0.5), np.full(509, 0)))
kruskal(qiskit_data, cirq_data, qs_data)
```
### P-Value of 0.93.... > 0.05 significance level, we cannot refute the null hypothesis
# qpe_amod15(13) data
```
#if p value less than alpha reject
qiskit_data = np.concatenate((np.full(246, 0.75), np.full(226, 0.5), np.full(237, 0.25), np.full(291, 0)))
cirq_data = np.concatenate((np.full(247, 0.75), np.full(272, 0.5), np.full(230, 0.25), np.full(251, 0)))
qs_data = np.concatenate((np.full(262, 0.75), np.full(243, 0.5), np.full(251, 0.25), np.full(244, 0)))
kruskal(qiskit_data, cirq_data, qs_data)
```
### P-Value of 0.10.... > 0.05 significance level, we cannot refute the null hypothesis
# shors algorithm(7)
```
#if p value less than alpha reject
qiskit_data = np.concatenate((np.full(651, 3+5), np.full(349, 3)))
cirq_data = np.concatenate((np.full(685, 3+5), np.full(315, 3)))
qs_data = np.concatenate((np.full(662, 3+5), np.full(338, 3)))
kruskal(qiskit_data, cirq_data, qs_data)
```
### P-Value of 0.25.... > 0.05 significance level, we cannot refute the null hypothesis
# shors algorithm(11)
```
#if p value less than alpha reject
qiskit_data = np.concatenate((np.full(1000, 3+5), np.full(0, 3)))
cirq_data = np.concatenate((np.full(1000, 3+5), np.full(0, 3)))
qs_data = np.concatenate((np.full(1000, 3+5), np.full(0, 3)))
kruskal(qiskit_data, cirq_data, qs_data)
```
### Since all values are identical, we can't perform the test.
Though we know that we cant refute the null hypothesis (the data sets come from the same population)
| github_jupyter |
<small><i>This notebook was put together by [Jake Vanderplas](http://www.vanderplas.com). Source and license info is on [GitHub](https://github.com/jakevdp/sklearn_tutorial/).</i></small>
# Supervised Learning In-Depth: Support Vector Machines
Previously we introduced supervised machine learning.
There are many supervised learning algorithms available; here we'll go into brief detail one of the most powerful and interesting methods: **Support Vector Machines (SVMs)**.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# use seaborn plotting defaults
import seaborn as sns; sns.set()
```
## Motivating Support Vector Machines
Support Vector Machines (SVMs) are a powerful supervised learning algorithm used for **classification** or for **regression**. SVMs are a **discriminative** classifier: that is, they draw a boundary between clusters of data.
Let's show a quick example of support vector classification. First we need to create a dataset:
```
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring');
```
A discriminative classifier attempts to draw a line between the two sets of data. Immediately we see a problem: such a line is ill-posed! For example, we could come up with several possibilities which perfectly discriminate between the classes in this example:
```
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]:
plt.plot(xfit, m * xfit + b, '-k')
plt.xlim(-1, 3.5);
```
These are three *very* different separaters which perfectly discriminate between these samples. Depending on which you choose, a new data point will be classified almost entirely differently!
How can we improve on this?
### Support Vector Machines: Maximizing the *Margin*
Support vector machines are one way to address this.
What support vector machined do is to not only draw a line, but consider a *region* about the line of some given width. Here's an example of what it might look like:
```
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5);
```
Notice here that if we want to maximize this width, the middle fit is clearly the best.
This is the intuition of **support vector machines**, which optimize a linear discriminant model in conjunction with a **margin** representing the perpendicular distance between the datasets.
#### Fitting a Support Vector Machine
Now we'll fit a Support Vector Machine Classifier to these points. While the mathematical details of the likelihood model are interesting, we'll let you read about those elsewhere. Instead, we'll just treat the scikit-learn algorithm as a black box which accomplishes the above task.
```
from sklearn.svm import SVC # "Support Vector Classifier"
clf = SVC(kernel='linear')
clf.fit(X, y)
```
To better visualize what's happening here, let's create a quick convenience function that will plot SVM decision boundaries for us:
```
def plot_svc_decision_function(clf, ax=None):
"""Plot the decision function for a 2D SVC"""
if ax is None:
ax = plt.gca()
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30)
y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30)
Y, X = np.meshgrid(y, x)
P = np.zeros_like(X)
for i, xi in enumerate(x):
for j, yj in enumerate(y):
P[i, j] = clf.decision_function([[xi, yj]])
# plot the margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
```
Notice that the dashed lines touch a couple of the points: these points are the pivotal pieces of this fit, and are known as the *support vectors* (giving the algorithm its name).
In scikit-learn, these are stored in the ``support_vectors_`` attribute of the classifier:
```
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
```
Let's use IPython's ``interact`` functionality to explore how the distribution of points affects the support vectors and the discriminative fit.
(This is only available in IPython 2.0+, and will not work in a static view)
```
from ipywidgets import interact
def plot_svm(N=10):
X, y = make_blobs(n_samples=200, centers=2,
random_state=0, cluster_std=0.60)
X = X[:N]
y = y[:N]
clf = SVC(kernel='linear')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plt.xlim(-1, 4)
plt.ylim(-1, 6)
plot_svc_decision_function(clf, plt.gca())
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none')
interact(plot_svm, N=[10, 200], kernel='linear');
```
Notice the unique thing about SVM is that only the support vectors matter: that is, if you moved any of the other points without letting them cross the decision boundaries, they would have no effect on the classification results!
#### Going further: Kernel Methods
Where SVM gets incredibly exciting is when it is used in conjunction with *kernels*.
To motivate the need for kernels, let's look at some data which is not linearly separable:
```
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.1, noise=.1)
clf = SVC(kernel='linear').fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
```
Clearly, no linear discrimination will ever separate these data.
One way we can adjust this is to apply a **kernel**, which is some functional transformation of the input data.
For example, one simple model we could use is a **radial basis function**
```
r = np.exp(-(X[:, 0] ** 2 + X[:, 1] ** 2))
```
If we plot this along with our data, we can see the effect of it:
```
from mpl_toolkits import mplot3d
def plot_3D(elev=30, azim=30):
ax = plt.subplot(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='spring')
ax.view_init(elev=elev, azim=azim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
interact(plot_3D, elev=(-90, 90), azip=(-180, 180));
```
We can see that with this additional dimension, the data becomes trivially linearly separable!
This is a relatively simple kernel; SVM has a more sophisticated version of this kernel built-in to the process. This is accomplished by using ``kernel='rbf'``, short for *radial basis function*:
```
clf = SVC(kernel='rbf')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
```
Here there are effectively $N$ basis functions: one centered at each point! Through a clever mathematical trick, this computation proceeds very efficiently using the "Kernel Trick", without actually constructing the matrix of kernel evaluations.
We'll leave SVMs for the time being and take a look at another classification algorithm: Random Forests.
| github_jupyter |
# Deploying Drift Detection
TorchDrift provides the tools you need to detect drift. But how do you actually get your model to monitor drift?
This short tutorial shows how to use model hooks on your feature extractor to capture data to feed into the drift detector.
First we need to set up a model and drift detector. Let us import some packages.
```
import sys
sys.path.insert(0, '../')
import torch
import torchvision
import torchdrift
import copy
%matplotlib inline
from matplotlib import pyplot
device = "cuda" if torch.cuda.is_available else "cpu"
```
We use a very simple ResNet as our example model. As we often do, we move the normalization out of the dataset transforms. We do this because we want to post-process the images to "fake" drifted inputs, so you would not need to do this for your own data and models (but I would advocate that moving the normalization into the models is indeed uncommon but best practice). We also split out the fully connected layer from the ResNet.
```
resnet = torchvision.models.resnet18(pretrained=True)
model = torch.nn.Sequential(
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
resnet,
resnet.fc
)
resnet.fc = torch.nn.Identity()
model.eval().to(device)
for p in model.parameters():
p.requires_grad_(False)
```
And we set up a dataset.
```
val_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=256),
torchvision.transforms.CenterCrop(size=(224, 224)),
torchvision.transforms.ToTensor()])
ds_train = torchvision.datasets.ImageFolder('./data/hymenoptera_data/train/',
transform=val_transform)
ds_val = torchvision.datasets.ImageFolder('./data/hymenoptera_data/val/',
transform=val_transform)
dl_val = torch.utils.data.DataLoader(ds_val, batch_size=64, shuffle=True)
```
We fit the detector. We use the p-value here for demonstration. Note that this is currently computationally more expensive than the score (but we'll work on pre-computing the score distribution under the null hypothesis).
```
def fit_detector(N_train):
detector = torchdrift.detectors.KernelMMDDriftDetector(return_p_value=True)
dl_train = torch.utils.data.DataLoader(ds_train, batch_size=N_train, shuffle=True)
feature_extractor = model[:2] # without the fc layer
torchdrift.utils.fit(dl_train, feature_extractor, detector, num_batches=1)
return detector
detector = fit_detector(N_train = 100)
```
We build a model monitor: When it hooks into the model to capture the output of `feature_layer`. I will cache the last `N` captured model features in a ring buffer.
If we provide a `callback`, it will call the drift detector every `callback_interval` after it has seen enough samples.
Just to show off, I also throw in a little plot function.
```
class ModelMonitor:
def __init__(self, drift_detector, feature_layer, N = 20, callback = None, callback_interval = 1):
self.N = N
base_outputs = drift_detector.base_outputs
self.drift_detector = drift_detector
assert base_outputs is not None, "fit drift detector first"
feature_dim = base_outputs.size(1)
self.feature_rb = torch.zeros(N, feature_dim, device=base_outputs.device, dtype=base_outputs.dtype)
self.have_full_round = False
self.next_idx = 0
self.hook = feature_layer.register_forward_hook(self.collect_hook)
self.counter = 0
self.callback = callback
self.callback_interval = callback_interval
def unhook(self):
self.hook.remove()
def collect_hook(self, module, input, output):
self.counter += 1
bs = output.size(0)
if bs > self.N:
output = output[-self.N:]
bs = self.N
output = output.reshape(bs, -1)
first_part = min(self.N - self.next_idx, bs)
self.feature_rb[self.next_idx: self.next_idx + first_part] = output[:first_part]
if first_part < bs:
self.feature_rb[: bs - first_part] = output[first_part:]
if not self.have_full_round and self.next_idx + bs >= self.N:
self.have_full_round = True
self.next_idx = (self.next_idx + bs) % self.N
if self.callback and self.have_full_round and self.counter % self.callback_interval == 0:
p_val = self.drift_detector(self.feature_rb)
self.callback(p_val)
def plot(self):
import sklearn.manifold
from matplotlib import pyplot
mapping = sklearn.manifold.Isomap()
ref = mapping.fit_transform(self.drift_detector.base_outputs.to("cpu").numpy())
test = mapping.transform(self.feature_rb.to("cpu").numpy())
pyplot.scatter(ref[:, 0], ref[:, 1])
pyplot.scatter(test[:, 0], test[:, 1])
```
To instantiate our monitor, we need an alarm function.
I just raise an exception, but you could also text the AI facility management or so.
```
def alarm(p_value):
assert p_value > 0.01, f"Drift alarm! p-value: {p_value*100:.03f}%"
mm = ModelMonitor(detector, model[1], callback=alarm)
```
We grab a batch each of benign and drifted samples.
Fun fact: For this dataset, shuffling in the dataloader is important here. Otherwise the class balance of the test batch will be off enough to cause the alarm to be set off.
```
it = iter(dl_val)
batch = next(it)[0].to(device)
batch_drifted = torchdrift.data.functional.gaussian_blur(next(it)[0].to(device), 5)
```
Now we run our model. Imagenet class 309 is _bee_ and 310 is _ant_. Do not believe the model if it says aircraft carrier (it did this during testing). Note that we might be unlucky and get an exception here. This is at least in part a sampling artifact from computing the p-value.
```
res = model(batch).argmax(1)
res
detector.compute_p_value(mm.feature_rb)
```
We can also look at the latents to form an opinion if we think they might be from the same distribution. If you happen to have a heavily class-imbalanced sample (e.g. you disabled the shuffle in the dataloader - for testing, not because you forgot!) you might spot that imbalance here on the projected features.
```
mm.plot()
```
When we call the model with drifted inputs, we are relatively sure to set off the alarm.
```
# call it with drifted inputs...
model(batch_drifted)
```
With any luck, you can also see the drift in the datapoints.
```
mm.plot()
```
So in this notebook we saw how to use model hooks with the drift detector to automatically set of the alarm when something bad happens. Just remember that if you set the p-value to $x\%$ you expect to get a false alarm every $100\%/x\%$ batches to not spam your emergency contact.
| github_jupyter |
# Reinforcement Learning - Thompson Sampling & the Multi-Armed Bandit Problem
In this notebook, we'll build a reinforcement learning-based artificial intelligence system that implements **Thompson Sampling** in order to solve an instance of the famous **Multi-Armed Bandit Problem**.
### Scenario
Imagine that you are at a casino, and that you have \$1,000 to play the slot machines. There are six slot machines available, and each turn playing a machine costs \$1. The probability of winning on any given turn (which is called the ***conversion rate***) is unknown, and varies from machine to machine. In order to maximize your chances of winning, you would like to be able to identify the slot machine that has the highest conversion rate as quickly as possible. How can you achieve this goal?
### Import Required Libraries
```
#import libraries
import numpy as np
```
### Define Environment
```
#Define the total number of turns (i.e., the number of times we will play a slot machine).
#Remember, we have $1,000 available, and each turn costs $1. We thus have 1,000 turns.
number_of_turns = 1000
#define the total number of slot machines
number_of_slot_machines = 6
#Define arrays where we can keep track of our wins (positive rewards)
#and losses (negative rewards) for each slot machine.
number_of_positive_rewards = np.zeros(number_of_slot_machines)
number_of_negative_rewards = np.zeros(number_of_slot_machines)
#define a seed for the random number generator (to ensure that results are reproducible)
np.random.seed(33)
#create a random conversion rate between 1% and 15% for each slot machine
conversion_rates = np.random.uniform(0.01, 0.15, number_of_slot_machines)
#Show conversion rates for each slot machine. Remember that in a real-world scenario
#the decision-maker would not know this information!
for i in range(6):
print('Conversion rate for slot machine {0}: {1:.2%}'.format(i, conversion_rates[i]))
```
### Create the Data Set
```
#define a seed for the random number generator (to ensure that results are reproducible)
np.random.seed(55)
#The data set is a matrix with one row for each turn, and one column for each slot machine.
#Each item in the matrix represents the outcome of what would happen if we were to play a
#particular slot machine on that particular turn. A value of "1" indicates that we would win,
#while a value of "0" indicates that we would lose. The number of "wins" for each slot machine
#is determined by its conversion rate.
outcomes = np.zeros((number_of_turns, number_of_slot_machines)) #create a two-dimensional numpy array, and fill it with zeros
for turn_index in range(number_of_turns): #for each turn
for slot_machine_index in range(number_of_slot_machines): #for each slot machine
#Get a random number between 0.0 and 1.0.
#If the random number is less than or equal to this slot machine's conversion rate, then set the outcome to "1".
#Otherwise, the outcome will be "0" because the entire matrix was initially filled with zeros.
if np.random.rand() <= conversion_rates[slot_machine_index]:
outcomes[turn_index][slot_machine_index] = 1
#display the first 15 rows of data
print(outcomes[0:15, 0:6]) #this sort of indexing means "rows 0 to 14" (i.e., the first 15 rows) and "columns 0 through 5" (i.e., the first six columns)
#show means (i.e., conversion rates) for each column (i.e., for each slot machine)
for i in range(6):
print('Mean for column {0}: {1:.2%}'.format(i, np.mean(outcomes[:, i])))
```
### Run the Simulation
Let's simulate using Thompson Sampling to determine which slot machine to play for each turn...
```
#for each turn
for turn_index in range(number_of_turns):
index_of_machine_to_play = -1
max_beta = -1
#determine which slot machine to play for this turn
for slot_machine_index in range(number_of_slot_machines): #for each slot machine
#Define the shape parameters for the beta distribution. The shape will depend on the number
#of wins and losses that have thus far been observed for this particular slot machine.
a = number_of_positive_rewards[slot_machine_index] + 1
b = number_of_negative_rewards[slot_machine_index] + 1
#Get a random value from the beta distribution whose shape is defined by the number of
#wins and losses that have thus far been observed for this slot machine
random_beta = np.random.beta(a, b)
#if this is the largest beta value thus far observed for this iteration
if random_beta > max_beta:
max_beta = random_beta #update the maximum beta value thus far observed
index_of_machine_to_play = slot_machine_index #set the machine to play to the current machine
#play the selected slot machine, and record whether we win or lose
if outcomes[turn_index][index_of_machine_to_play] == 1:
number_of_positive_rewards[index_of_machine_to_play] += 1
else:
number_of_negative_rewards[index_of_machine_to_play] += 1
#compute and display the total number of times each slot machine was played
number_of_times_played = number_of_positive_rewards + number_of_negative_rewards
for slot_machine_index in range(number_of_slot_machines): #for each slot machine
print('Slot machine {0} was played {1} times'.format(slot_machine_index, number_of_times_played[slot_machine_index]))
#identify and display the best slot machine to play
print('\nOverall Conclusion: The best slot machine to play is machine {}!'.format(np.argmax(number_of_times_played)))
```
### Compare the Performance of Thompson Sampling vs. a Random Sampling Strategy
```
#compute total number of wins using Thompson Sampling strategy
total_wins_thompson_sampling = np.sum(number_of_positive_rewards)
#determine how many times we would win if we randomly choose a slot machine to play for each turn
total_wins_random_sampling = 0
for turn_index in range(number_of_turns):
index_of_machine_to_play = np.random.randint(0, number_of_slot_machines) #randomly choose a machine to play
if outcomes[turn_index][index_of_machine_to_play] == 1:
total_wins_random_sampling += 1
#display results
print('Total wins with Thompson Sampling: {0:.0f}'.format(total_wins_thompson_sampling))
print('Total wins with Random Sampling: {0:.0f}'.format(total_wins_random_sampling))
```
```
```
| github_jupyter |
# Module 1.2 - **Python Part 2**
## _Ashish Shroti_ and _Rajeshkumar K_
---
# Table of Contents
7. [File-Handling](#file-handling)
8. [Object Oriented Programming](#object-oriented-programming)
9. [Modules](#modules)
10. [NumPy](#numpy)
11. [Matplotlib](#matplotlib)
12. [Pandas](#pandas)
---
## File Handling <a name='file-handling'></a>
Function for opening files: `open()`
Syntax:
```python
open(filename, mode)
```
Returns a buffered stream, similar to a generator/iterable.
Available modes:
1. `r` - read only
2. `w` - write only
3. `a` - append
4. `r+` - read and write
In addition, the type of data can also be specified:
1. `t` - text mode
2. `b` - binary mode
```
tcm = open('./data/the-communist-manifesto.txt','r')
tcm
```
Opened file can be read using:
```python
file.read()
file.readline()
file.readlines()
```
Usage of each method:
1. `read()` - will return the entire file as a single string
2. `readline()` - will return the current line as a single string
3. `readlines()` - will return the entire file as a list of strings
```
print('word1\tword2')
tcm.readline(16)
tcm.readlines()
```
Opened file can be written using:
```python
file.write()
file.writelines()
```
Usage:
1. `write()` - writes the string to the file
2. `writelines()` - writes a list of lines to the file
Usage with `with()`:
```python
with open(file, mode) as f:
f.method(argument)
```
Opened files should be closed.
```python
file.close()
```
```
tcm.close()
with open('./data/alpha.txt','w') as newfile:
newfile.write('something else')
with open('./data/points.txt','a') as newfile:
newfile.write('1. point number one\n')
newfile.write('2. point number two\n')
newfile.write('3. point number three\n')
```
Reading binary files
```
hor = open('./data/horizon.png','rb')
print(hor.read(10))
```
Other file handling functions:
```python
remove(file) # for deleting files
rename(oldname, newname) # for renaming files
```
---
## Object Oriented Programming <a name='object-oriented-programming'></a>
Contrary to the practice of [procedural programming](https://en.wikipedia.org/wiki/Procedural_programming)
where programs are made of set of procedures or routines (or subroutines), or to that of
[functional programming](https://en.wikipedia.org/wiki/Functional_programming) - programs are made of set of
interacting **objects**. There is a notion of **self** to each object, with variables and functions that are
present in all objects. There exist **classes** such objects.
Level of abstraction highly useful for code with versatile use cases, especially scientific programming and
computation libraries.
And irrespective of what paradigm you choose to program, behind-the-scenes,
> everything in python is an object, even the classes
---
## Modules <a name='modules'></a>
Functions and libraries organised into **Modules**. Such modules should be *imported* to use the commands and
functions that they provide. This is done using
```python
import modulename
```
Specific libraries from the modules can be imported by specifying their names
```python
from modulename import lib1 lib2
```
All libraries of a module can be imported using
```python
from modulename import *
# try not to do this
```
Imported stuff can be custom named using
```python
import module as m
```
Submodules can be imported by using **dot** notation
```python
import module.submodule as sm
```
```
import random
random.
```
Functions and methods from a module can be accessed using **dot** notation.
```
random.random()
```
---
## NumPy <a name='numpy'></a>
NumPy provides n-dimensional arrays to python, similar to the default `array` datatype, but with a overhaul of
features. The prime difference is vectorisation, which is absent in the default python arrays. Many data processing
libraries are built/based upon NumPy (pandas, OpenCV, Torch)
```
import numpy as np
```
### Basics
The main objects of NumPy are homogenous (same datatype) multi-dimensional arrays (`ndarray`). The indexes are tuples of integers.
An array can be initiated using
```python
np.array(arraylist)
```
where `arraylist` can be a flat list (in which case the ndarray will be 1 dimensional) or can be a nested list.
```
a1 = np.array([1,2,3])
print(a1.size)
a2 = np.array([
[1.,2,3],
[4,5,6],
[7,8,9]
])
print(a2.dtype)
```
Each numpy array has a set of *attributes* (or defining features). Some of them are:
1. `ndim` - number of axes of the array (axis = dimensions)
2. `shape` - tuple containing size of array in each dimension
3. `size` - total number of elements in the array
4. `dtype` - data type of the elements of the array
The attributes can be accessed using the dot notation:
```python
narray.attribute
```
```
print(f"{a1.ndim = }")
print(f"{a2.ndim = }")
print(a1.shape)
```
#### Vectorisation demonstration
```
import array
dearray = array.array('i', [1, 2, 3])
nparray = np.array([1, 2, 3])
array.array('i', [i**2 for i in dearray])
nparray**2
```
#### Array creation
Arrays can be created using the `array()` method.
```
np.array?
np.array([[1, 2], [3, 4]], dtype='complex')
np.array([1, 2, 3], dtype='float64')
```
Arrays can also be initialised in trivial forms:
1. `np.zeros` - creates arrays with all elements 0
2. `np.ones` - creates arrays with all elements 1
3. `np.empty` - creates arrays with random numbers generated based on current system state
```
np.zeros?
np.zeros((2, 3))
np.ones((4, 8))
np.empty((2, 2))
```
Arrays can be iniated as sequences using the methods:
1. `np.arange` - similar to `range` from default python
2. `np.linspace` - creates array with specified number of elements, rather than step size
The syntax is:
```python
np.arange(start, end, stepsize)
np.linspace(start, end, nelements)
```
```
np.arange(1, 10, 0.2)
np.linspace(1, 50, 8)
```
#### Operations
Since NumPy arrays are vectorised, operations happen on element-wide basis.
```
A = np.array([0,23,140])
B = np.array([33,33,33])
A * 3
B * 2
A + B
A < B
```
Three kinds of products are possible:
1. `*` - Elementwise product
2. `@` - Matrix product
3. `dot()` - Matrix product again
```
A = np.array([
[2, 4],
[6, 8]
])
B = np.array([
[0, 1],
[3, 7]
])
A * B
A @ B
A.dot(B)
```
#### Unary operators and Universal functions
+ Unary operators - operate on a single array and returns some value
+ Universal functiosn - functions like Trigonometric, Logarithmic, and other standard functions, but vectorised.
```
np.sin(A)
np.exp(A)
A.min()
A.sum()
B.transpose()
```
#### Slicing
NumPy arrays can be sliced similar to python lists.
```
A = np.sin(np.linspace(1, np.pi, 8))
print(A)
A[1:8:2]
A[::-1]
B = np.random.randint(1, 10, size=(5,5))
print(B)
print(B[1:3,1:3])
C = np.random.randint(1, 100, size=(3, 10, 10))
C
C[::, 2, ::]
```
### Reshaping arrays
Three methods can be used:
1. `ndarray.ravel()` - returns flattened array (rightmost index changes the fastest)
2. `ndarray.reshape(shape)`- returns array according to shape
3. `ndarray.resize(shape)` - in-place reshape
```
A = np.random.randint(1, 5, size=(3,3,3))
A
A.ravel()
A.reshape((3,-1))
A.resize((9,3))
```
### Making copies
Three possible results:
1. `=` - this creates no copy at all, but rather a link
2. `ndarray.view()` and `slicing` - this creates a shallow copy; which means reshaping will not be transferred, but data changes will be transferred
3. `ndarray.copy()` - creates a complete copy of an array and retains no link
```
A = np.random.randint(1, 5, size=(3, 3))
A
B = A
B
A *= 2
B
B = A.view()
B
A = A.reshape((1,-1))
A
B
A *= 3
A
B
B = A.copy()
```
---
## Matplotlib <a name='matplotlib'></a>
Visualisation library for python providing static, animated and interactive visualisations. Similar to how commandline improves your workflow when things get complicated, Matplotlib can help you with complex visualisation tasks. From the official website:
> Matplotlib makes easy things easy and hard things possible.
## Pyplot
Pyplot is one of the API's for Matplotlib (the other being Object Oriented API) which provides a set of functions to make plots.
```
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['figure.figsize'] = [10, 7]
```
`matplotlib.pyplot.plot()` is one of the basic commands for making plots.
```
plt.plot([1, 4, 3, 7, 9])
plt.title('First Plot')
plt.show()
```
Various marker styles can be specified. Available styles can be [found here](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot).
```
A = np.linspace(1, 10, 100)
plt.title("Some functions")
plt.plot(A, np.sin(A), "r-.", label="sin")
plt.plot(A, np.cos(A), "b:", label="cos")
plt.plot(A, np.log(A), "g", label="log")
plt.plot(A, np.tan(A)*0.01, "k_", label="scaled tan")
plt.legend()
plt.show()
```
Scatter plot can be made with `matplotlib.pyplot.scatter()`
```
A = np.random.randint(1, 100, (100, 1))
plt.title("A scatter plot")
plt.scatter(A, np.sin(A), c='k')
plt.show()
```
---
## Pandas <a name='pandas'></a>
Pandas provides data-analysis tools in python, and is based upon the vectorisation power of NumPy. The data structures in Pandas, irrespective of their dimension, are labelled.
The one-dimensional labelled arrays are provided by `pandas.Series` and two-dimensional labelled arrays are provided by `pandas.DataFrame`. In the case of `DataFrame`, the columns can be multiple types, and essentially analogous to spreadsheets.
```
import numpy as np
import pandas as pd
pd.Series?
pd.Series([1, 2, None, 3, 4*np.pi])
pd.DataFrame?
pd.DataFrame(np.random.random((4, 6)))
indices = ['eenie', 'meenie', 'miny', 'mo']
A = pd.DataFrame(np.random.random((4, 6)), index=indices, columns=list('ABCDEF'))
A
```
#### Viewing data
Similar to `head` and `tail` commands in linux commandline, there are `head` and `tail` methods to view the first
few and last few rows of a DataFrame
```
A.head(2)
A.tail(3)
A.columns
A.index
A.describe()
# quick statistics
```
#### Slicing
```
A['A']
# accessing columns
A.loc['mo']
# accessing rows
A.iloc[0]
# accessing rows using index
```
#### Boolean Filtering
```
A[A > 0.5]
A[A < 0.5]
```
#### File Handling
Data from a CSV (.csv) file can be loaded using
```python
pandas.read_csv(file)
```
DataFrame object can be dumped to a CSV file using
```python
pandas.to_csv(file)
```
| github_jupyter |
# NLP with NLTK
Today's talk will address various concepts in the Natural Language Processing pipeline through the use of NLTK. A fundmental understanding of Python is necessary. We will cover:
1. Pre-processing
2. Preparing and declaring your own corpus
3. POS-Tagging
4. Dependency Parsing
5. NER
6. Sentiment Analysis
You will need:
* NLTK ( \$ pip install nltk)
* the parser wrapper requires the [Stanford Parser](http://nlp.stanford.edu/software/lex-parser.shtml#Download) (in Java)
* the NER wrapper requires the [Stanford NER](http://nlp.stanford.edu/software/CRF-NER.shtml#Download) (in Java)
# 1) Pre-processing
This won't be covered much today, but regex and basic python string methods are most important in preprocessing tasks. NLTK does, however, offer an array of tokenizers and stemmers for various languages.
### Tokenizing
```
text = '''Hello, my name is Chris.
I'll be talking about the python library NLTK today.
NLTK is a popular tool to conduct text processing tasks in NLP.'''
from nltk.tokenize import word_tokenize
print("Notice the difference!")
print()
print(word_tokenize(text))
print()
print("vs.")
print()
print(text.split())
```
You can also tokenize sentences.
```
from nltk.tokenize import sent_tokenize
print(sent_tokenize(text))
tokenized_text = [word_tokenize(sent) for sent in sent_tokenize(text)]
print(tokenized_text)
```
A list of sentences with a list of tokenized words is generally the accepted format for most libraries for analysis.
### Stemming/Lemmatizing
```
from nltk import SnowballStemmer
snowball = SnowballStemmer('english')
print(snowball.stem('running'))
print(snowball.stem('eats'))
print(snowball.stem('embarassed'))
```
But watch out for errors:
```
print(snowball.stem('cylinder'))
print(snowball.stem('cylindrical'))
```
Or collision:
```
print(snowball.stem('vacation'))
print(snowball.stem('vacate'))
```
This is why lemmatizing, if the computing power and time is sufficient, is always preferable:
```
from nltk import WordNetLemmatizer
wordnet = WordNetLemmatizer()
print(wordnet.lemmatize('vacation'))
print(wordnet.lemmatize('vacate'))
```
So why is this important?
```
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
categories = ['talk.politics.mideast', 'rec.autos', 'sci.med']
twenty = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)
data_no_stems = [[word_tokenize(sent) for sent in sent_tokenize(text.lower())] for text in twenty.data]
data_stems = [[[snowball.stem(word) for word in word_tokenize(sent)]
for sent in sent_tokenize(text)] for text in twenty.data]
print(data_no_stems[400][5])
print(data_stems[400][5])
data_no_stems = [' '.join([item for sublist in l for item in sublist]) for l in data_no_stems]
data_stems = [' '.join([item for sublist in l for item in sublist]) for l in data_stems]
vectorizer = TfidfVectorizer()
X_data_no_stems = vectorizer.fit_transform(data_no_stems)
vectorizer2 = TfidfVectorizer()
X_data_stems = vectorizer2.fit_transform(data_stems)
from sklearn.cross_validation import train_test_split
from sklearn import ensemble
X_train, X_test, y_train, y_test = train_test_split(X_data_no_stems, twenty.target,
train_size=0.75, test_size=0.25)
rf_classifier = ensemble.RandomForestClassifier(n_estimators=10, # number of trees
criterion='gini', # or 'entropy' for information gain
max_depth=None, # how deep tree nodes can go
min_samples_split=2, # samples needed to split node
min_samples_leaf=1, # samples needed for a leaf
min_weight_fraction_leaf=0.0, # weight of samples needed for a node
max_features='auto', # number of features for best split
max_leaf_nodes=None, # max nodes
min_impurity_split=1e-07, # early stopping
n_jobs=1, # CPUs to use
random_state = 1, # set seed
class_weight="balanced") # adjusts weights inverse of freq, also "balanced_subsample" or None
model = rf_classifier.fit(X_train, y_train)
print(model.score(X_test, y_test))
X_train, X_test, y_train, y_test = train_test_split(X_data_stems, twenty.target,
train_size=0.75, test_size=0.25)
rf_classifier = ensemble.RandomForestClassifier(n_estimators=10, # number of trees
criterion='gini', # or 'entropy' for information gain
max_depth=None, # how deep tree nodes can go
min_samples_split=2, # samples needed to split node
min_samples_leaf=1, # samples needed for a leaf
min_weight_fraction_leaf=0.0, # weight of samples needed for a node
max_features='auto', # number of features for best split
max_leaf_nodes=None, # max nodes
min_impurity_split=1e-07, # early stopping
n_jobs=1, # CPUs to use,
random_state= 1, # set seed
class_weight="balanced") # adjusts weights inverse of freq, also "balanced_subsample" or None
model = rf_classifier.fit(X_train, y_train)
print(model.score(X_test, y_test))
```
# 2) Declaring a corpus in NLTK
While you can use NLTK on strings and lists of sentences, it's better to formally declare your corpus, as this will take care of the above for you and provide methods to access them. For our purposes today, we'll use a corpus of [book summaries](http://www.cs.cmu.edu/~dbamman/booksummaries.html). I've changed them into a folder of .txt files for demonstration. The file below will convert the .tsv file.
```
! ls texts
from nltk.corpus import PlaintextCorpusReader
corpus_root = "texts/" # relative path to texts.
my_texts = PlaintextCorpusReader(corpus_root, '.*txt')
```
We now have a text corpus, on which we can run all the basic preprocessing methods. To list all the files in our corpus:
```
my_texts.fileids()[:10]
my_texts.words('To Kill A Mockingbird.txt') # uses punkt tokenizer like above
my_texts.sents('To Kill A Mockingbird.txt')
```
It also add as paragraph method:
```
my_texts.paras('To Kill A Mockingbird.txt')[0]
```
Let's save these to a variable to look at the next step on a low level:
```
m_sents = my_texts.sents('To Kill A Mockingbird.txt')
print (m_sents)
```
We now have a corpus, or text, from which we can get any of the statistics you learned in Day 3 of the Python workshop. We will review some of these functions once we get some more information
# 3) POS-Tagging
There are many situations, in which "tagging" words (or really anything) may be useful in order to determine or calculate trends, or for further text analysis to extract meaning. NLTK contains several methods to achieve this, from simple regex to more advanced machine learning models models.
It is important to note that in Natural Language Processing (NLP), POS (Part of Speech) tagging is the most common use for tagging, but the actual tag can be anything. Other applications include sentiment analysis and NER (Named Entity Recognition). Tagging is simply labeling a word to a specific category via a tuple.
Nevertheless, for training more advanced tagging models, POS tagging is nearly essential. If you are defining a machine learning model to predict patterns in your text, these patterns will most likley rely on, among other things, POS features. You will therefore first tag POS and then use the POS as a feature in your model.
## On a low-level
Tagging is creating a tuple of (word, tag) for every word in a text or corpus. For example: "My name is Chris" may be tagged for POS as:
My/PossessivePronoun name/Noun is/Verb Chris/ProperNoun ./Period
*NB: type 'nltk.data.path' to find the path on your computer to your downloaded nltk corpora. You can explore these files to see how large corpora are formatted.*
You'll notice how the text is annotated, using a forward slash to match the word to its tag. So how can we get this to a useful form for Python?
```
from nltk.tag import str2tuple
line = "My/Possessive_Pronoun name/Noun is/Verb Chris/Proper_Noun ./Period"
tagged_sent = [str2tuple(t) for t in line.split()]
print (tagged_sent)
```
Further analysis of tags with NLTK requires a *list* of sentences, otherwise you will get an index error on higher level methods.
Naturally, these tags are a bit verbose, the standard tagging conventions follow the Penn Treebank (more in a second): https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
## Automatic Tagging
NLTK's stock English `pos_tag` tagger is a perceptron tagger:
```
from nltk import pos_tag
m_tagged_sent = pos_tag(m_sents[0])
print (m_tagged_sent)
```
What do these tags mean?
```
from nltk import help
help.upenn_tagset()
m_tagged_all = [pos_tag(sent) for sent in m_sents]
print(m_tagged_all[:3])
```
We can find and aggregate certain parts of speech too:
```
from nltk import ConditionalFreqDist
def find_tags(tag_prefix, tagged_text):
cfd = ConditionalFreqDist((tag, word) for (word, tag) in tagged_text
if tag.startswith(tag_prefix))
return dict((tag, cfd[tag].most_common(5)) for tag in cfd.conditions()) # cfd.conditions() yields all tags possibilites
m_tagged_words = [item for sublist in m_tagged_all for item in sublist]
tagdict = find_tags('JJ', m_tagged_words)
for tag in sorted(tagdict):
print(tag, tagdict[tag])
```
We can begin to quantify syntax by look at environments of words, so what commonly follows a verb?
```
import nltk
tags = [b[1] for (a, b) in nltk.bigrams(m_tagged_words) if a[1].startswith('VB')]
fd1 = nltk.FreqDist(tags)
print ("To Kill A Mockingbird")
fd1.tabulate(10)
```
## Creating a tagged corpus
Now that we know how tagging works, we can quickly tag all of our documents, but we'll only do a few hundred from the much larger corpus.
```
tagged_sents = {}
for fid in my_texts.fileids()[::10]:
tagged_sents[fid.split(".")[0]] = [pos_tag(sent) for sent in my_texts.sents(fid)]
tagged_sents.keys()
tagged_sents["Harry Potter and the Prisoner of Azkaban"]
```
Absolute frequencies are available through NLTK's `FreqDist` method:
```
all_tags = []
all_tups = []
for k in tagged_sents.keys():
for s in tagged_sents[k]:
for t in s:
all_tags.append(t[1])
all_tups.append(t)
nltk.FreqDist(all_tags).tabulate(10)
tags = ['NN', 'VB', 'JJ']
for t in tags:
tagdict = find_tags(t, all_tups)
for tag in sorted(tagdict):
print(tag, tagdict[tag])
```
We can compare this to other genres:
```
from nltk.corpus import brown
for c in brown.categories():
tagged_words = brown.tagged_words(categories=c) #not universal tagset
tag_fd = nltk.FreqDist(tag for (word, tag) in tagged_words)
print(c.upper())
tag_fd.tabulate(10)
print()
tags = ['NN', 'VB', 'JJ']
for t in tags:
tagdict = find_tags(t, tagged_words)
for tag in sorted(tagdict):
print(tag, tagdict[tag])
print()
print()
```
We can also look at what linguistic environment words are in on a low level, below lists all the words preceding "love" in the romance category:
```
brown_news_text = brown.words(categories='romance')
sorted(set(a for (a, b) in nltk.bigrams(brown_news_text) if b == 'love'))
```
# 4) Dependency Parsing
While tagging parts of speech can be helpful for certain NLP tasks, dependency parsing is better at extracting real relationships within a sentence.
```
from nltk.parse.stanford import StanfordDependencyParser
dependency_parser = StanfordDependencyParser(path_to_jar = "/Users/chench/Documents/stanford-parser-full-2015-12-09/stanford-parser.jar",
path_to_models_jar = "/Users/chench/Documents/stanford-parser-full-2015-12-09/stanford-parser-3.6.0-models.jar")
result = dependency_parser.raw_parse_sents(['I hit a gray Steve in my sleep.', 'He tried to run away, but I caught it.'])
```
As the program takes longer to run, I will not run it on the entire corpus, but an example is below:
```
for r in result:
for o in r:
trips = list(o.triples()) # ((head word, head tag), rel, (dep word, dep tag))
for t in trips:
print(t)
# if "elephant" in t[0] or "elephant" in t[-1]:
# if t[1] != "det":
# if "elephant" in t[0]:
# print(t[-1][0])
# else:
# print(t[0][0])
# print()
```
# 5) Named Entity Recognition
After tokening, tagging, and parser, one of the last steps in the pipeline is NER. Identifying named entities can be useful in determing many different relationships, and often serves as a prerequisite to mapping textual relationships within a set of documents.
```
from nltk.tag.stanford import StanfordNERTagger
ner_tag = StanfordNERTagger(
'/Users/chench/Documents/stanford-ner-2015-12-09/classifiers/english.all.3class.distsim.crf.ser.gz',
'/Users/chench/Documents/stanford-ner-2015-12-09/stanford-ner.jar')
import pyprind
ner_sents = {}
books = ["To Kill A Mockingbird.txt", "Harry Potter and the Prisoner of Azkaban.txt"]
for fid in books:
bar = pyprind.ProgBar(len(my_texts.sents(fid)), monitor=True, bar_char="#")
tagged_sents = []
for sent in my_texts.sents(fid):
tagged_sents.append(ner_tag.tag(sent))
bar.update()
ner_sents[fid.split(".")[0]] = tagged_sents
print()
```
We can look on the low level at a single summary:
```
print(ner_sents["To Kill A Mockingbird"])
print(ner_sents["Harry Potter and the Prisoner of Azkaban"])
from itertools import groupby
from nltk import FreqDist
NER = {"LOCATION": [],
"PERSON": [],
"ORGANIZATION": [],
}
for sentence in ner_sents["To Kill A Mockingbird"]:
for tag, chunk in groupby(sentence, lambda x: x[1]):
if tag != "O":
NER[tag].append(" ".join(w for w, t in chunk))
if NER["LOCATION"]:
print("Locations:")
FreqDist(NER["LOCATION"]).tabulate()
print()
if NER["PERSON"]:
print("Persons:")
FreqDist(NER["PERSON"]).tabulate()
print()
if NER["ORGANIZATION"]:
print("Organizations")
FreqDist(NER["ORGANIZATION"]).tabulate()
```
Or between the two:
```
NER = {"LOCATION": [],
"PERSON": [],
"ORGANIZATION": [],
}
for k in ner_sents.keys():
for sentence in ner_sents[k]:
for tag, chunk in groupby(sentence, lambda x: x[1]):
if tag != "O":
NER[tag].append(" ".join(w for w, t in chunk))
if NER["LOCATION"]:
print("Locations:")
FreqDist(NER["LOCATION"]).tabulate()
print()
if NER["PERSON"]:
print("Persons:")
FreqDist(NER["PERSON"]).tabulate()
print()
if NER["ORGANIZATION"]:
FreqDist(NER["ORGANIZATION"]).tabulate()
```
# 6) Sentiment Analysis
While earlier sentiment analysis was based on simple dictionary look-up methods denoting words as positive or negative, or assigning numerical values to words, newer methods are better able to take a word's or sentence's environment into account. VADER (Valence Aware Dictionary and sEntiment Reasoner) is one such example.
```
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import numpy as np
sid = SentimentIntensityAnalyzer()
print(sid.polarity_scores("I really don't like that book.")["compound"])
for fid in books:
print(fid.upper())
sent_pols = [sid.polarity_scores(s)["compound"] for s in sent_tokenize(my_texts.raw(fid))]
for i, s in enumerate(my_texts.sents(fid)):
print(s, sent_pols[i])
print()
print()
print("Mean: ", np.mean(sent_pols))
print()
print("="*100)
print()
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from scipy.spatial.distance import cdist
# from tf.keras.models import Sequential # This does not work!
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, GRU, Embedding
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.models import save_model
import csv
import pandas as pd
from IPython.display import display, HTML
import h5py
from random import shuffle
```
### Loading the dataset
```
def load_dataset(file = 'sentiment.txt'):
with open(file, 'r') as f:
labels = []
text = []
lines = f.readlines()
shuffle(lines)
for line in lines:
data = line.split('\t')
if len(data) == 2:
labels.append(data[0])
text.append(data[1].rstrip())
return text,labels
x_train_text , y_train = load_dataset()
data_text = x_train_text
idx = 5
print(x_train_text[idx],'\n',y_train[idx])
```
### Preparing the input
```
import re
def process(txt):
out = re.sub(r'[^a-zA-Z0-9\s]', '', txt)
out = out.split()
out = [word.lower() for word in out]
return out
def tokenize(thresh = 5):
count = dict()
idx = 1
word_index = dict()
for txt in data_text:
words = process(txt)
for word in words:
if word in count.keys():
count[word] += 1
else:
count[word] = 1
most_counts = [word for word in count.keys() if count[word]>=thresh]
for word in most_counts:
word_index[word] = idx
idx+=1
return word_index
num_words = None
word_index = tokenize()
num_words = len(word_index)
print('length of the dictionary ',len(word_index))
def getMax(data):
max_tokens = 0
for txt in data:
if max_tokens < len(txt.split()):
max_tokens = len(txt.split())
return max_tokens
max_tokens = getMax(x_train_text)
def create_sequences(data):
tokens = []
for txt in data:
words = process(txt)
seq = [0] * max_tokens
i = 0
for word in words:
start = max_tokens-len(words)
if word.lower() in word_index.keys():
seq[i+start] = word_index[word]
i+=1
tokens.append(seq)
return np.array(tokens)
print(create_sequences(['awesome movie']))
x_train_tokens = create_sequences(x_train_text)
```
### Creating the model
```
model = Sequential()
embedding_size = 8
model.add(Embedding(input_dim=num_words,
output_dim=embedding_size,
input_length=max_tokens,
name='layer_embedding'))
model.add(GRU(units=16, name = "gru_1",return_sequences=True))
model.add(GRU(units=8, name = "gru_2" ,return_sequences=True))
model.add(GRU(units=4, name= "gru_3"))
model.add(Dense(1, activation='sigmoid',name="dense_1"))
optimizer = Adam(lr=1e-3)
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
```
### Training the model
```
model.fit(x_train_tokens, y_train,
validation_split=0.05, epochs=5, batch_size=32)
```
### Testing on new input
```
txt = ["awesome movie","Terrible movie","that movie really sucks","I like that movie"]
print(create_sequences(txt)[0])
pred = model.predict(create_sequences(txt))
print('\n prediction for \n',pred[:,0])
save_model(
model,
"keras.h5",
overwrite=True,
include_optimizer=True
)
model.summary()
def create_csv(file):
with open(file, 'w') as csvfile:
writer = csv.writer(csvfile)
for key in word_index.keys():
writer.writerow([key,word_index[key]])
create_csv('dict.csv')
```
| github_jupyter |
# Constant-Coefficient Homogeneus Second Order ODE - Theory
An constant-coefficient homogeneus linear second-order ODE has the following format:
$$\frac{d^2y}{dx^2}+p\frac{dy}{dx}+qy(x) = 0$$
where $p,q \in \mathbb{R}$.
## The Equation's Solution
The general solution $y(x)$ for this type of equation is a linear combination of two fundamental solutions $y_1(x)$ and $y_1(x)$:
$$y(x) = c_1y_1(x) + c_2y_2(x)$$
where $c_1$ and $c_2$ are constants that must be found by solving de initial value problem.
In general, for this type of ODE, the fundamental solutions have the following format:
$$y(x) = e^{\lambda x}$$
where $\lambda_n \in \mathbb{C}$.
### Characteristic Equation
To find the $\lambda$ for each of the two fundamental solutions, it is necessary to solve the following characteristic quadratic equation:
$$\lambda^2 e^{\lambda x} + p \lambda e^{\lambda x} + q e^{\lambda x} = 0$$
This equation is obtained by replacing $y(x)$ with the assumed solution $e^{\lambda x}$, and consequently its first derivative $y'(x)$ with $\lambda e^x$ and its second derivative $y''(x)$ with $\lambda^2 e^x$.
It is equivalent to
$$e^{\lambda x} (\lambda^2 + p \lambda + q ) = 0$$
Is fact that does not exists a $t \in \mathbb{C}$ which satisfacts $e^t = 0$, therefore:
$$\lambda^2 + p \lambda + q = 0$$
Applying the quadratic formula, we have $\lambda_1 = \frac{-p + \sqrt{\Delta}}{2}$ and $\lambda_2 = \frac{-p - \sqrt{\Delta}}{2}$, where $\Delta = p^2 - 4q$.
### Fundamental and General Solutions
The fundamental solutions $y_1(x)$ and $y_2(x)$ need to be linearly independent. This linear independence can be verified by the Wronskian. The characteristc equation can have two real roots, one real root or two complex roots, and there are a pair of fundamentally linearly independent solutions for each of these cases.
#### Case $\Delta > 0$
In this case, $\lambda_1, \lambda_2 \in \mathbb{R}$ and $\lambda_1 \neq \lambda_2$.
The fundamental solutions are $y_1(x)=e^{\lambda_1 x}$ and, $y_2(x)=e^{\lambda_2 x}$.
The general solution is:
$$y(x) = c_1 e^{\lambda_1 x} + c_2 e^{\lambda_2 x}$$
where $c_1, c_2 \in \mathbb{R}$.
#### Case $\Delta = 0$
In this case, $\lambda = \lambda_1 = \lambda_2$ and $\lambda \in \mathbb{R}$.
The fundamental solutions are $y_1(x)= e^{\lambda x}$ and $y_2(x)= x e^{\lambda x}$.
The general solution is:
$$y(x) = c_1 e^{\lambda x} + c_2 x e^{\lambda x}$$
where $c_1, c_2 \in \mathbb{R}$.
#### Case $\Delta < 0$
In this case, $\lambda_1, \lambda_2 \in \mathbb{C}$. These roots are $\lambda_1 = \alpha + \beta i$ and $\lambda_2 = \alpha - \beta i$.
The fundamental solutions are:
$$y_1(x) = e^{\lambda_1 x} = e^{x(\alpha + \beta i)} = e^{x \alpha } e^{x \beta i}$$
and
$$y_2(x) = e^{\lambda_2 x} = e^{x(\alpha - \beta i)} = e^{x \alpha } e^{-x \beta i}$$
By applying the Euler's indentity ($e^{i\theta} = \cos\theta + i\sin\theta$), we find out:
$$y_1(x) = e^{x \alpha } e^{x \beta i} = e^{x \alpha } [\cos(\beta x) + i\sin(\beta x)]$$
and
$$y_2(x) = e^{x \alpha } e^{-x \beta i} = e^{x \alpha } [\cos(\beta x) - i\sin(\beta x)] $$
The general solution is therefore:
$$y(x) = c_1 e^{\lambda_1 x} + c_2 e^{\lambda_2 x} = c_1 e^{x \alpha } [\cos(\beta x) + i\sin(\beta x)] + c_2 e^{x \alpha } [\cos(\beta x) - i\sin(\beta x)] = e^{x \alpha } [\cos(\beta x)(c_1 + c_2) + i\sin(\beta x)(c_1 - c_2)] $$
where $c_1, c_2 \in \mathbb{R}$.
Considering $k_1 = (c_1 + c_2)$ and $k_2 = i(c_1 - c_2)$, we have:
$$y(x) = e^{x \alpha } [k_1\cos(\beta x) + k_2\sin(\beta x)]$$
# Practice with Python
## Solving the Const-Coeff Homogeneus Second-Order ODE
In this practice, we will use the algebraic manipulation of the **SymPy** symbolic math package, numeric features of the **NumPy** package and the plotting from the **MatPlotLib** package. For first, we will import these packages.
```
from sympy import *
from matplotlib import pyplot as plt
import numpy as np
```
Now, we gonna define a function to give the roots of the characteristic equation.
```
def get_characteristic_eq_roots(p, q):
l = Symbol('l') #Defines the unknown l
c_eq = Eq(l**2 + p*l + q, 0) #Gets SymPy's characteristic equation
c_eq_roots = solve(c_eq, l) #Gets the l roots of the c_eq
return c_eq_roots
```
This function returns a list of symbolic roots, that can be two real roots, a single real root or two complex roots. Look that at the following example:
```
print('Real roots (p=-3, q=2): ', get_characteristic_eq_roots(-3, 2))
print('Complex roots (p=1, q=1): ', get_characteristic_eq_roots(1, 1))
print('Single root (p=2, q=1): ', get_characteristic_eq_roots(2, 1))
```
Now, we gonna define a function to give the fundamental solutions of the ODE.
```
def get_ODE_fundamental_solutions(x, p, q):
c_eq_roots = get_characteristic_eq_roots(p, q)
if len(c_eq_roots) == 1:# Single real root
l = c_eq_roots[0] #Root lambda
y1 = exp(l*x) #fundamental solution y1(x)
y2 = x*exp(l*x) #fundamental solution y2(x)
return [y1, y2]
elif len(c_eq_roots) == 2: #Two roots
if c_eq_roots[0].is_real == True and c_eq_roots[1].is_real == True: # Real roots
l1 = c_eq_roots[1] #Root lambda1
l2 = c_eq_roots[0] #Root lambda2
y1 = exp(l1*x) #fundamental solution y1(x)
y2 = exp(l2*x) #fundamental solution y2(x)
return [y1, y2]
elif c_eq_roots[0].is_real == False and c_eq_roots[1].is_real == False: # Complex roots
l1 = c_eq_roots[1] #Root lambda1
l2 = c_eq_roots[0] #Root lambda2
alpha = re(l1) #Real part of the roots
beta = abs(im(l1)) #Absolute imaginary part of the roots
y1 = exp(alpha*x)*( cos(beta*x) + I*sin(beta*x)) #fundamental solution y1(x)
y2 = exp(alpha*x)*( cos(beta*x) - I*sin(beta*x)) #fundamental solution y2(x)
return [y1, y2]
else: #Error
raise Exception('Unexpected answer from the get_characteristic_eq_roots function.')
else: #Error
raise Exception('Unexpected answer from the get_characteristic_eq_roots function.')
```
This function returns a list of symbolic fundamental solutions. Look that at the following examples:
```
x = Symbol('x')
print('p=-3, q=2: ', get_ODE_fundamental_solutions(x, -3, 2))
print('p=1, q=1: ', get_ODE_fundamental_solutions(x, 1, 1))
print('p=2, q=1: ', get_ODE_fundamental_solutions(x, 2, 1))
```
Now, we gonna define a function to give the symbolic general solution.
```
def get_ODE_solution(x, p, q, c1, c2):
fundamental_solutions = get_ODE_fundamental_solutions(x, p, q)
y1 = fundamental_solutions[0] #Fundamental solution y1(x)
y2 = fundamental_solutions[1] #Fundamental solution y2(x)
y = c1*y1 + c2*y2 #General solution y(x)
return y
```
Testing...
```
x, c1, c2 = Symbol('x'), Symbol('c1'), Symbol('c2')
print('p=-3, q=2: ', get_ODE_solution(x, -3, 2, c1, c2))
print('p=1, q=1: ', get_ODE_solution(x, 1, 1, c1, c2))
print('p=2, q=1: ', get_ODE_solution(x, 2, 1, c1, c2))
```
Finally, we will define a function to give the $y(x)$ values for any $x$ input in the ODE solution.
```
def get_ODE_function(p, q, c1, c2):
x = Symbol('x')
symbolic_solution = get_ODE_solution(x, p, q, c1, c2)
y_function = lambdify(x, symbolic_solution)
return y_function
```
## Plotting $y(x)$
Plotting $y(x)$ from $y''(x) + y'(x) + 10y(x) = 0$ with $c_1 = c_2 = 1$:
```
p, q, c1, c2 = (1, 10, 1, 1)
y = get_ODE_function(p, q, c1, c2) #y(x) function
x = np.linspace(0,20,500) #500 dots in [0, 20]
plt.plot(x,y(x).real) #Plot the real part of y(x)
plt.show()
```
Plotting $y(x)$ from $y''(x) + 10y'(x) + 10y(x) = 0$ with $c_1 = c_2 = 1$:
```
p, q, c1, c2 = (10, 10, 1, 1)
y = get_ODE_function(p, q, c1, c2) #y(x) function
x = np.linspace(0,10,500) #500 dots in [0, 10]
plt.plot(x,y(x).real) #Plot the real part of y(x)
plt.show()
```
Plotting $y(x)$ from $y''(x) + \frac{y'(x)}{2} + y(x) = 0$ with $c_1=c_2=1$:
```
p, q, c1, c2 = (1, 0.5, 1, 1)
y = get_ODE_function(p, q, c1, c2) #y(x) function
x = np.linspace(0,20,500) #500 dots in [0, 20]
plt.plot(x,y(x).real) #Plot the real part of y(x)
plt.show()
```
| github_jupyter |
## A more detailed overview of this notebook
This notebook began as a comparison between profiler chlorophyll measurements near the
surface nine times per day to surface chlorophyll observations by the
MODIS satellite once every eight days. Its scope expanded from there, considerably.
One such expansion is considering other sources of data. We have for example
a snapshot of the global ocean called GLODAP. After inspecting that on a global
scale we turn to a comparison of vertical profiles through the water column,
specifically salinity and temperature. We want to compare GLODAP profiles as somewhat
*static* snapshots with ongoing active profile measurements from ARGO drifters.
The Regional Cabled Array (RCA)
is an observatory stretching across the sea floor from the coast of Oregon 500 km out to
Axial Seamount. This observatory includes two types of profilers that rise and fall through
the water column: Both deep profilers that ascend from the sea floor and shallow profilers
that rest on platforms at 200 meters depth and ascend to within a few meters of the surface.
We begin the RCA work focused on the shallow profiler as this is where the highest
concentration of chlorophyll is found.
* Regional Cabled Array (RCA): A cabled observatory on the sea floor off the coast of Oregon
* Site: A location in the RCA
* Platform: A mechanical structure -- static or mobile -- that resides at a site.
* Instrument: An electronic device carrying one or more sensors
* Sensor: A device that measures some aspect of the ocean like pH or temperature
* Stream: A stream of data produced by a sensor as part of an instrument located on a platform at a site in the RCA
This notebook describes a Python package called **yodapy** used to obtain stream data.
Here we use the traditional data path model
* search for data
* order data
* download data
* analyze data
We prefer a newer approach where data are already in place on the public cloud and the model is
* analyze data
Since that is our end-goal some of the data for this project will be (not done yet 3-20) set
in place in advance.
Back to our process here: Once the data are in place we say that **yodapy** has finished its task.
We then turn to analysis using Python and particularly **XArray**.
## Purpose of the **yodapy** Python package
`yodapy` is a contraction of **Y**our **O**cean **DA**ta **PY**thon library. It was written
by Don Setiawan to facilitate working with **OOI** data in general (not just profiler data).
Before `yodapy` was written the process of finding and ordering data for OOI
was a bit *involved*. `yodapy` was developed to make this process more
*programmable* and to provide search capability
without having to know precise terms like
`RS01SBPS-SF01A-3D-SPKIRA101-streamed-spkir_data_record`.
This notebook uses `yodapy` to search for, identify, order and download data, all in Python.
A future extension of `yodapy` will make this process even simpler, referencing data that
are already in place on the public cloud. Rather than order and download data you simply
start working with it using the `XArray` idiom.
<BR>
> **Takeaway 1: This notebook reviews `yodapy` specific to Regional Cabled Array (RCA)
data but the pattern of use is relevant to data from other OOI segments.**
<BR>
> **Takeaway 2: A heads-up on authenticating: The OOI system requires you to *authenticate* your identity.
You do this by registering your email at their website. This is unrestricted, there is
no cost and it only takes a couple of minutes. `yodapy` helps you manage your resulting
credentials so once this is set up you are authenticated automatically.**
## Notebook features
Come back and re-write this (and never index your own book)
### Section on GLODAP and ARGO
### Regional Cabled Array and MODIS
- OOI / RCA data orders with `yodapy`
- working with `xarray` `DataArrays` and `Datasets`
- plotting with `matplotlib`
- line and scatter plots, multiple y-axes, labels, marker type and size
- profiler curtain plots: time - depth - chlorophyll (as color)
- animation of time series data
- interactivity
- color bars
- intrinsic plotting from DataArrays (with modifiers)
### Ordering and pulling ARGO data from the Coriolis system
## Data management
This Jupyter notebook resides in
a sub-directory of the User's home directory `~`. It is bundled
as an open source
[github repository](https://github.com/robfatland/chlorophyll).
(abbreviated 'repo') on GitHub using the
Linux `git` utility.
The repo is not intended for large data volumes.
The data must reside *elsewhere* in the
working environment, i.e. not within the repo directory.
I use `~/data/` with sub-directories to organize data content outside
of the `~/chlorophyll` directory.
Each data source (MODIS, GLODAP, ARGO, RCA, ...) gets a dedicated sub-directory in `~/data`.
`xarray` has a wildcard multi-file open utility: `xr.open_mfdataset("Identifier*.nc")`.
This maps multiple NetCDF files to a single Dataset.
The RCA data are ordered using a less convenient dimension, namely
observation number `obs`. This is just an ordinal integer 1, 2, 3, ...
The code in this notebook modifies this to use dimension `time`.
## Obtain Regional Cabled Array data using `yodapy`
As noted above the `yodapy` library enables Python-based access to OOI data. In this case we will focus
on the Regional Cabled Array (RCA) and particularly on the shallow profiler found at the site
**Oregon Slope Base**. This site is at the base of the continental shelf in about 3000 meters of water.
The shallow profiler rises and falls nine times per day through the upper 200 meters of the water column.
### OOI data access back-story
To order data from **OOI** requires you to pre-register (free, using your email address). This provides you
credentials when placing a data order. Orders typically take a few minutes for the OOI
servers to assemble; after which you receive an email with a download link. You download the data to local storage
and read files into memory and proceed from there, a very labor-intensive process.
### How `yodapy` helps
[`yodapy`](http://github.com/cormorack/yodapy) helps you automate OOI data access at each step.
It sets up a credentials directory within your home directory;
and in so doing helps you avoid accidentally pushing your credentials to `github` where they would be public. `yodapy`
allows you to create a Python object called an `OOI()` that includes methods for finding sensor data of interest;
for ordering time-bounded datasets for those sensors; for downloading this data; and for attaching it to a data
structure (an `xarray Dataset`) for further analysis. It is at this point when you have your data present as a
`Dataset` that `yodapy` has completed its job.
The next cell installs `yodapy`. Run this each time you start up this notebook server unless your installation
of the `yodapy` library persists.
### Getting OOI credentials
To get data from OOI you first create a User account as follows:
- Visit the [OOI website](https://ooinet.oceanobservatories.org/#)
- On the login menu (upper right) select **Register**
- Fill out the New User Registration Form
- Once you have your login credentials: Log in
- The 'Login' menu should be replaced with your User name at the upper right: Also a dropdown menu
- Use this menu to select User Profile
- At the bottom of your User Profile page you should find **API Username** and **API Token**
- These two strings comprise your authentication
- Keep them somewhere safe
- Notice that the **Refresh API Token** button permits you to regenerate them whenever you like
Use your OOI API Token with `yodapy` as described further down to automate your authentication process.
If this works as intended you can safely use OOI and not have to worry about cutting and pasting these
token strings every time you want to get data access.
## install yodapy if needed
```
# mini-source control: Last copied 29-SEP-2020: to tilt*, chlorophyll*, rca*, argo*
# last revised 09-OCT-2020
import os, sys, time, glob
from IPython.display import clear_output # use inside loop with clear_output(wait = True) followed by print(i)
import warnings # use with warnings.filterwarnings('ignore') or 'once'
home_dir = os.getenv("HOME")
this_dir = home_dir + '/chlorophyll/'
data_dir = '/data/'
data1_dir = '/data1'
from matplotlib import pyplot as plt
from matplotlib import colors as mplcolors
import numpy as np, pandas as pd, xarray as xr
from numpy import datetime64 as dt64, timedelta64 as td64
def doy(theDatetime): return 1 + int((theDatetime - dt64(str(theDatetime)[0:4] + '-01-01')) / td64(1, 'D')) # 1, 2, .... , 365, [366]
def dt64_from_doy(year, doy): return dt64(str(year) + '-01-01') + td64(doy-1, 'D')
def day_of_month_to_string(d): return str(d) if d > 9 else '0' + str(d)
print('\nJupyter Notebook running Python {}'.format(sys.version_info[0]))
# Ensure that the latest build of yodapy is installed directly from github using
!pip install git+https://github.com/cormorack/yodapy.git -q # -q cuts the stdout clutter
# this line of code verifies yodapy is installed
from yodapy.utils.creds import set_credentials_file
```
## One time only: Configure OOI credentials using `yodapy`
Only the first time through here: Carefully follow the instructions in the Python cell below.
You are (temporarily) telling `yodapy` what your `OOI username` and `token` are.
`yodapy` creates a hard-to-notice sub-directory of your home directory
that contains these credentials in a text file. As long as you are not publishing
your home directory someplace public your credentials will be hidden away.
#### 'Why am I doing this *credentials* business?'
When you use `yodapy` to order data from OOI it will use this 'hidden away' copy
of your credentials to convince OOI your order is legitimate.
```
# Run the next line of code to create authentication credentials for the OOI data system. Do this
# by ***carefully**** substituting your actual credentials in the username and token strings
# in this line of code:
if False:
set_credentials_file(data_source='ooi', username='OOIAPI-XXXXXXXXXXXXXX', token='XXXXXXXXXXXX')
# Un-comment the code and run the cell, just the one line above.
# Once it runs: Comment it out again and delete your credentials. You can obscure them with XXXXX as they are seen now.
# After you obscure your credentials: Be sure not to run this code again as it will break your authentication info.
#
# You can verify this worked by examining the .credentials file in ~/.yodapy. The credentials should match. Notice that
# this (slightly hidden) directory is directly connected to your home directory; whereas this IPython notebook
# is presumably in a distinct directory; so there should be no chance of a GitHub push sending your
# credentials to GitHub.
```
## Regional Cabled Array data for 2019
* 3 sites: OSB, AXB, OOE for Oregon Slope Base, Axial Base, Oregon Offshore Endurance
* 3 Platforms: Shallow and Deep profilers plus shallow platform (fixed at 200m depth)
* Large collection of instruments, each with one or more sensors
* CTD + Dissolved Oxygen
* PAR, Spectral Irradiance, Spectrophotometer (attenuation / absorbance), Fluorometers
* Nitrate, pH, pCO2
* Ocean velocity measurement
## Initialize the `OOI()` object
```
from yodapy.datasources import OOI
# uncomment this...
# ooi = OOI()
# use this "no-underscore" version of directory to see the primary methods for the OOI() object
# dirnou(ooi)
# uncomment and run this to see all the components or segments of OOI available
# ooi.sites
# yodapy
# We can explore these methods and attributes further. Note that yodapy has a series of
# attributes that begin with 'cava_'. 'cava' is shorthand for "cabled array value add",
# a project at the University of Washington School of Oceanography supporting cabled array
# data validation and use in ocean research.
# help(ooi.cava_sites)
ooi.cava_sites
# yodapy
print('\n\n\n')
ooi.cava_parameters
```
## `ooi.search()` first example
We will begin using `yodapy` proper to narrow down a data search.
### What resources are available?
Specifically what are the names of sites served by the Regional Cabled Array?
We begin with a broad search giving only the keyword `region`.
Then we narrow the search by adding including keywords `site`, `node`, and `instrument`
to arrive at individual *instruments* or *sensors*. These search results are used to order
datasets with a specified time range.
This first example is the broad search.
```
# ooi.search(region='endurance')
ooi.search(region='cabled')
# Attribute 'sites' gives broad results as a table of arrays, sites, descriptions, lat/lon: Across all of OOI (62 rows)
ooi.sites
# Narrow result: Within the Cabled Array region only (118 rows, 6 named columns)
print(ooi.instruments)
```
## Using `yodapy` to refine OOI searches
The `OOI()` object provided by `yodapy` starts out in a very *broad view* search state.
It sees the entire OOI project at the level of the observatory sites, by name
Endurance, Pioneer, Argentine Basin, Cabled Array, Irminger Sea, Station Papa and possibly
others I'm forgetting.
The `ooi.search()` method uses keywords (`keyword='string-value'`) to narrow this view.
In this way when the view is narrowed to a single instrument we can use the `ooi.request_data()`
method to order data from that instrument.
## `ooi.search()` second example and notes on search configuration
We narrow the search using keywords `site`, `node` and `instrument`.
The `ooi.instruments` result from above provides the vocabulary to use for keyword arguments:
- `site` keyword is taken from the `site_name` column
- for example `Oregon Slope Base Seafloor` suggests using `oregon slope base` as the keyword value
- `node` keyword is taken from the `infrastructure_name` column
- for example 'Shallow Profiler (SF01A)` suggests keyword `shallow profiler` (notice these are not case-sensitive)
- `instrument` keyword is taken from the `instrument_name` column
- for example `3-Wavelength Fluorometer` suggests keyword `fluorometer`
Once the narrow search runs we look at the `ooi.instruments` attribute to see how narrow the results are.
This prints as a table where -- as in example one -- the results are sorted into *one instrument per row*.
This can confirm whether the objective of narrowing the search down to a single instrument was met.
We run the `.data_availability()` method. This gives two outputs: A **table** and below that a
**time series graphic**. The table lists each instrument as a separate column. These columns are
then transposed for the time series graphic: One row of boxes for each instrument.
***Detail: The green `.data_availability()` chart may fail to render in some cases. Re-running the cell might help.***
```
# region='cabled' or 'endurance'
# site='slope' or 'slope base deep' or 'oregon offshore' or ...
# node='platform' or 'shallow profiler' or ...
# instrument='2-Wavelength' or 'ctd' or 'fluorometer' or ...
# ooi.search(region='endurance', site='oregon offshore', node='shallow profiler', instrument='fluorometer')
# ooi.data_availability()
# Taking out the instrument keyword we have:
# ooi.search(region='cabled', site='slope', node='shallow profiler')
# This produces (with a simple ooi.instruments attribute call) a list of the following:
# - 3-Wavelength fluorometer (flort: got it for OSB SP 2019)
# - CTD (ctdpf: got it for OSB SP 2019)
# - Photosynthetically Available Radiation (parad: got it for OSB SP 2019)
# - pH (phsen: got it for OSB SP 2019)
# - Spectral Irradiance (spkir: got)
# - Spectrophotometer (optaa: got)
# - NOT YET: Single Point Velocity Meter (velpt: )
# - Nitrate (nutnr: Got both nutnr_a_sample and nutnr_a_dark_sample)
# - pCO2 water (two streams: pco2w_a_sami_data_record and pco2w_b (no data past 2018; placed 2018 data
# instrument 2014 2015 2016 2017 2018 2019 2020
#
# Oregon Slope Base
# SP flort 3-wavelength !
# SP ctdpf !
# SP parad !
# SP phsen !
# SP spkir !
# SP optaa !
# SP velpt !
# SP nutnr_a, nutnr_a_dark !
# SP pco2w_a_sami !
# SP pco2w_b_sami ! NA
# 200m ctdpf !
# 200m flort !
# 200m phsen !
# 200m do_stable !
# DP ctdpf wfp ! NA NA NA
# DP ctdpf inst !
# DP acm (VEL3D) inst !
# DP flcdrdt inst fluorometer !
#
# Axial Base
# SP flort !
# SP ctdpf !
# SP parad !
# SP phsen !
# SP spkir ?
# SP optaa !
# SP velpt ?
# SP nutnr_a, nutnr_a_dark ?
# SP pco2w_a_sami !
# SP pco2w_b_sami ?
# 200m ctdpf !
# 200m flort !
# 200m phsen !
# 200m do_stable !
# DP ctdpf wfp !
# DP ctdpf inst !
# DP acm (VEL3D) inst ?
# DP flcdrdt inst CDOM fluorometer !
# DP fl????? inst 2-Wav fluorometer ?
# DP dissolved oxygen !
#
# filename anatomy
# deployment0005 or 0006 etc
# _RS03AXPS site: AX is Axial, SB is slope base
# -SF03A platform: SF is shallow profiler, DP is deep profiler, PC is 200m platform
# -3B number + letter: unknown
# -OPTAAD301 6-letter instrument + 'A'/'D' + 30X/10X
# -streamed 'streamed' or 'recovered_inst' or 'recovered_wfp'
# -optaa_sample instrument designator, sometimes 'dpc_xxxxx_instrument_recovered'
# _20191004T073957.414490 datetime start
# -20191014T220233.907019 datetime end
# .nc NetCDF file
#
# run this to see fluorometers available at Oregon Offshore (without using the 'node' keyword)
#
# filters endurance + oregon offshore + fluorometer turn up 7 hits...
# 2 are Oregon Offshore Surface Mooring: 3 wavelength... of future interest in expanding the MODIS connection
# 2 are Oregon Offshore deep profiler CDOM fluorometer
# 2 are Oregon Offshore deep profiler 2 wavelength... of future interest also (not sure if this is on the RCA)
# 1 is Oregon Offshore shallow profiler 3 wavelength *** Current interest: RCA MODIS connect ***
#
# ooi.search(region='endurance', site='oregon offshore', instrument='fluorometer')
# ooi.instruments
# ooi.data_availability()
# This ooi.search() call:
#
# ooi.search(region='cabled', instrument='fluorometer')
#
# produces 12 hits. Here is the breakdown; where results suggest site and node search keywords.
# Note that Deep Profiler sites have degeneracy in 'recovered_inst' versus 'recovered_wfp' (appear twice)
#
# - (4) Axial Base Deep Profiler Mooring (CDOM Fluorometer, 2-Wavelength Fluorometer)
# - (4) Oregon Slope Base Deep Profiler Mooring (CDOM Fluorometer, 2-Wavelength Fluorometer)
# - (1) Oregon Slope Base Shallow Profiler Mooring (200m Platform; 2-Wavelength Fluorometer)
# - (1) Oregon Slope Base Shallow Profiler Mooring (Shallow Profiler; 3-Wavelength Fluorometer)
# - (1) Axial Base Shallow Profiler Mooring (200m Platform; 2-Wavelength Fluorometer)
# - (1) Axial Base Shallow Profiler Mooring (Shallow Profiler; 3-Wavelength Fluorometer)
# Resulting searches: Choose one of these...
# ooi.search(region='cabled', site='oregon slope base', node='shallow', instrument='fluorometer')
# ooi.search(region='cabled', site='oregon slope base', node='200m', instrument='fluorometer')
# ooi.search(region='cabled', site='axial base', node='shallow', instrument='fluorometer')
# ooi.search(region='cabled', site='axial base', node='200m', instrument='fluorometer')
# ...and run...
# ooi.data_availability()
## Final `yodapy` section: Obtain data
Three useful Python/yodapy cells follow. The first is run iteratively to refine a
search. The second and third are run consecutively to place an order and download it
when it is ready.
> ***Strongly suggest: After data arrives move it to an out-of-repo data directory***
###################################
#
# Set up a data order
#
# Use this cell (possibly multiple times) to narrow your search. Then use the following two cells to
# order and retrieve this data. Remember that the last thing you used for .search() is stored inside
# the ooi object as its 'state'. This makes the ordering of the data in the next cell much simpler
# because you only need to specify the time range.
#
# What keywords does a Python method take?
#
# Below the ooi.search() method is provided with keywords; but how to discover what these are?
# Answer: Enter ?ooi.search to read the document string.
#
# Instructions for this cell
#
# Notice below there is an ooi.search() method call. This uses a sequence of keywords with values
# to narrow the focus of your search. The next line of code 'ooi.instrument' will print the
# results of this search as a table. In a simplest case if your resulting table is exactly one
# row then your will be ordering data from just that instrument stream.
#
# A more advanced approach is to order multiple data streams at once; but this is not described here.
#
# First step: Run the search with only 'region', 'site', and 'node' keywords. Do not include
# 'instrument' or 'stream' keywords. This gives a results table with multiple instruments (rows).
# Example: ooi.search(region='cabled', site='slope', node='shallow profiler')
#
# Second step: Refine this search by including 'instrument' or 'stream' keywords. Give corresponding
# values from the results table to get a new results table with just one row for the instrument
# you are interested in.
# Example: ooi.search(region='cabled', site='slope', node='shallow profiler', stream='velpt')
#
# At this point the search parameters in the OOI() object can be used for a focused data order.
# Place this order using the subsequent two cells.
# run `?ooi.search` to see available keywords
ooi.search(region='endurance', site='offshore', node='200', stream = 'optode, do_stable, phsen,pco2')
ooi.instruments
# ooi.data_availability()
%%time
# 2019 CTD (9 months are available ) required ~4 mins. Other sensors closer to 20 minutes
# Assume the above cell narrowed the search results to a single instrument. Assume also that we
# are interested in June 1 -- September 15 of 2019. We now use the ooi object to generate a
# data request.
#
# .request_data() generates a data request
# .to_xarray() polls the OOI system until the order completes; this will take a couple of minutes
#
begin_date = '2019-01-01'
end_date = '2020-01-01'
ooi.request_data(begin_date=begin_date, end_date=end_date)
ds = ooi.to_xarray()
len(ds)
%%time
# run this to download the data (possibly multiple files) from a completed data request from above
# one year can take between 4 and 10 minutes
#
filenamelist = ooi.download_netcdfs()
len(filenamelist)
ooi.raw()
type(ooi.raw())
```
## Two problems with this data
- The data order tends to yield multiple files that are contiguous in time. For example
The first might run June 1 to June 27 and the second might run June 28 to July 10. We
would like to consider them as a single Dataset and fortunately this is built into the
XArray package as a method: `.open_mfdataset()`. Here the `mf` abbreviates *multiple files*.
```
ds=xr.open_mfdataset(...filename description string including wildcard...)
```
- The data are ordered by a dimension called `obs` for *observation number*. This runs
`1, 2, 3, ...` for each data *file*. The coordinate `time` is available as a dependent
coordinate; but to combine multiple files into a single dataset we do not want to have
`obs = 1, 2, 3, ..., 7010, 7011, 1, 2, 3, ...` with redundant observations. We simply
want everything related to a `time` dimension that increases monotonically as all the
data are combined. For this we use the XArray Dataset `.swap_dims()` method which is
passed a small dictionary that articulates how the swap will happen.
```
ds = ds.swap_dims({'obs':'time'})
```
These two commands are orchestrated together by means of a *preprocessor* function.
## Save streamlined datasets
The following cell opens "multi-file" datasets. It uses short 'good stuff' lists to preserve
important information and dump everything else: Across dimensions, coordinates, data variables
and attributes. It then writes these simplified Datasets as NetCDF files.
```
%%time
def load_and_save_streamlined(source, output, keep_dims, keep_coords, keep_data_vars, keep_attrs):
def lass_preprocessor(fds): # per-file datasets have dimension 'obs'
return fds.swap_dims({'obs':'time'}) # ...so we pre-swap that for time
ds = xr.open_mfdataset(data_dir + source, preprocess = lass_preprocessor, concat_dim='time', combine='by_coords')
for key in ds.dims:
if key not in keep_dims: ds = ds.drop_dims(key)
for key in ds.coords:
if key not in keep_coords: ds = ds.drop(key)
for key in ds.data_vars:
if key not in keep_data_vars: ds = ds.drop(key)
attrs_dict = ds.attrs.copy()
for key in attrs_dict:
if key not in keep_attrs: ds.attrs.pop(key)
ds.to_netcdf(data_dir + output)
return ds
strRoot = 'rca/2019/depl*'
strSite = 'SBPS*'
strPlatform = 'SF*'
# particular to phsen the pH sensor
ds_phsen = load_and_save_streamlined(strRoot + strSite + strPlatform + 'phsen*.nc',
'rca/simpler/osb_sp_phsen_2019.nc',
['time'],
['time', 'int_ctd_pressure'],
['ph_seawater'],
['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
ds_nutnr_a_dark = load_and_save_streamlined(strRoot + strSite + strPlatform + 'nutnr_a_dark*.nc',
'rca/simpler/osb_sp_nutnr_a_dark_2019.nc',
['time', 'wavelength'],
['time', 'int_ctd_pressure'],
['nitrate_concentration'],
['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
ds_nutnr_a_sample = load_and_save_streamlined(strRoot + strSite + strPlatform + 'nutnr_a_sample*.nc',
'rca/simpler/osb_sp_nutnr_a_sample_2019.nc',
['time', 'wavelength'],
['time', 'int_ctd_pressure'],
['nitrate_concentration'],
['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
# ds_ctdpf = load_and_save_streamlined('rca/2019/depl*ctdpf*.nc', 'rca/simpler/osb_sp_ctdpf_2019.nc',
# ['time'],
# ['time', 'seawater_pressure'],
# ['seawater_temperature', 'practical_salinity',
# 'corrected_dissolved_oxygen', 'density'],
# ['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
# ds_parad = load_and_save_streamlined('rca/2019/depl*parad*.nc', 'rca/simpler/osb_sp_parad_2019.nc',
# ['time'],
# ['time', 'int_ctd_pressure'],
# ['par_counts_output'],
# ['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
# ds_spkir = load_and_save_streamlined('rca/2019/depl*spkir*.nc', 'rca/simpler/osb_sp_spkir_2019.nc',
# ['time', 'spectra'],
# ['time', 'int_ctd_pressure'],
# ['spkir_downwelling_vector'],
# ['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
# ds_optaa = load_and_save_streamlined('rca/2019/depl*optaa*.nc', 'rca/simpler/osb_sp_optaa_2019.nc',
# ['time', 'wavelength'],
# ['time', 'wavelength', 'int_ctd_pressure'],
# ['beam_attenuation', 'optical_absorption'],
# ['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
# ds_flort = load_and_save_streamlined('rca/2019/depl*flort*.nc', 'rca/simpler/osb_sp_flort_2019.nc',
# ['time'],
# ['time', 'int_ctd_pressure'],
# ['fluorometric_chlorophyll_a', 'fluorometric_cdom',
# 'total_volume_scattering_coefficient', 'seawater_scattering_coefficient',
# 'optical_backscatter'],
# ['node', 'id', 'geospatial_lat_min', 'geospatial_lon_min'])
```
| github_jupyter |
```
# since sift function is not supported in the latest version, we need to install the older version of opencv
!pip install opencv-python==3.4.2.17
!pip install opencv-contrib-python==3.4.2.17
from google.colab import drive
drive.mount('/content/drive')
import cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
imageId = {}
from pathlib import Path
import sklearn.metrics as metrics
# Function definition for feature extraction
def feature_extract(file_path):
"""
This function takes in list of file paths and returns the training data and labels, by computing SIFT features.
"""
path = '/content/drive/MyDrive/CVAss22/Train' #path to training dataset
c = 0 #initialize a count variable
X_train=[] #initialize a list for training input
y_train=[] #initialize a list for training output/labels
for f in file_path:
fileStr = str(f) #convert object to string
Image = cv2.imread(fileStr) #read the image
sift = cv2.xfeatures2d.SIFT_create() #create SIFT object
keypoints, descriptors = sift.detectAndCompute(Image, None) #compute the keypoints & descriptors for image
fname = fileStr.replace(path, '') #extract only the name of the file, i.e. remove the whole path till parent directory
idx = fname.index('_') #extract the index position of '_'
subId = fname[:idx] #get the subject id
#create a dictionary for mapping image ids
if subId not in imageId.keys():
imageId[subId] = c
c += 1
num_des = 0 #number of descriptors varaible initialization
for d in descriptors:
if num_des <= 50: #comparing the number of descriptors with a threshold
X_train.append(d.astype(float)) #append the descriptor to the return data
y_train.append(imageId[subId]) #append image id of the subject to the labels to be returned
num_des += 1 #increment the number of descriptors variable
else:
break
return X_train, y_train #return the data and label
# Function definition for training the model, testing and displaying the results
def model():
"""
This function trains the model,
"""
train_dir='/content/drive/MyDrive/CVAss22/Train' #path to the main train directory
train_files = Path(str(train_dir)).rglob('*.pgm') #object created to path of training files
#for testing and computing accuracy
test_dir='/content/drive/MyDrive/CVAss22/Test' #path to the main test directory
test_files = Path(str(test_dir)).rglob('*.pgm') #object created to path of test files
#same as above (for plotting roc)
test_dir1='/content/drive/MyDrive/CVAss22/Test' #path to the main test directory
test_files1 = Path(str(test_dir1)).rglob('*.pgm') #object created to path of test files
X_train, y_train = feature_extract(train_files) #function call to 'feature_extract' to get training data and labels
#convert the matrices to numpy array
X_train = np.array(X_train)
y_train = np.array(y_train)
classifier = svm.SVC(kernel = 'rbf', C = 10, gamma = 0.00001, probability=True) #support vector classification with rbf (radial basis function) kernel, regularization parameter as 10, and kernel coefficient as 0.00001
classifier.fit(X_train, y_train) #train the classifier, to fit the model to the training data
sift = cv2.xfeatures2d.SIFT_create() #create SIFT object
count = 0 #initializing a counter to count the number of test files
path = '/content/drive/MyDrive/CVAss22/Test' #path to the test dataset
accuracy = 0 #initialize accuracy
for f in test_files:
fileStr = str(f) #convert object to string
Image = cv2.imread(fileStr) #read the image
count += 1 #increment the counter
keypoints, descriptors = sift.detectAndCompute(Image, None) #compute the keypoints & descriptors for image
temp = classifier.predict(descriptors) #predict labels for keypoints of test image
predicted = temp.astype(np.int64) #convert the predicted value to int64 datatype
counts = np.bincount(predicted) #count the occurence of each predicted value (labels assigned to keypoints)
predicted_subId = np.argmax(counts) #choose the subject id assigned to maximum of the keypoints
fname = fileStr.replace(path, '') #extract only the name of the file, i.e. remove the whole path till parent directory
idx = fname.index('_') #extract the index position of '_'
subId = fname[:idx] #get the subject id
actual_subId = imageId[subId] #assign the integer associated with the subject to actual subject id
if actual_subId == predicted_subId: #check if the actual subject id (ground truth) is equal to the predicted subject id
accuracy += 1 #if yes, increment the accuracy
print(fname + " - Actual subject id: " + str(actual_subId) + " - predicted subject id: " + str(predicted_subId))
accuracy = (accuracy / count) * 100 #compute the accuracy
print("\nAccuracy: %.2f" % accuracy + "%")
"""------------------------------------ROC curve-------------------------------------"""
X_test1, y_test1 = feature_extract(test_files1) #function call to 'feature_extract' to get test data and labels
#convert the matrices to numpy array
X_test1 = np.array(X_test1)
y_test1 = np.array(y_test1)
#calculate the false positive rate (fpr) and true positive rate (tpr) for the threshold
preds = classifier.predict_proba(X_test1)[:,1] #predict the probabilities
fpr, tpr, threshold = metrics.roc_curve(y_test1, preds, pos_label=1) #receiver operating characteristics (roc) curve for the model, returns fpr, tpr and threshold
roc_auc = metrics.auc(fpr, tpr) #compute arear under the curve (auc), based on fpr and tpr
#plot the roc curve
plt.subplots(1, figsize=(10,10))
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr)
plt.plot([0, 1], ls="--")
plt.plot([0, 0], [1, 0] , c=".7"), plt.plot([1, 1] , c=".7")
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
"""The code to plot the ROC curve is referred from:
https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python"""
"""----------------------------------------------------------------------------------"""
model()
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# 1D Alfven Wave `GiRaFFEfood` Initial Data for `GiRaFFE`
## This module provides another initial data option for `GiRaFFE`, drawn from [this paper](https://arxiv.org/abs/1310.3274) .
**Notebook Status:** <font color='green'><b> Validated </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). The initial data has validated against the original `GiRaFFE`, as documented [here](Tutorial-Start_to_Finish_UnitTest-GiRaFFEfood_NRPy.ipynb).
### NRPy+ Source Code for this module: [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Alfven_Wave.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Alfven_Wave.py)
## Introduction:
### Alfvén Wave:
This is a flat-spacetime test with initial data
\begin{align}
A_x &= 0 \\
A_y &= \left \{ \begin{array}{lll}\gamma_\mu x - 0.015 & \mbox{if} & x \leq -0.1/\gamma_\mu \\
1.15 \gamma_\mu x - 0.03g(x) & \mbox{if} & -0.1/\gamma_\mu \leq x \leq 0.1/\gamma_\mu \\
1.3 \gamma_\mu x - 0.015 & \mbox{if} & x \geq 0.1/\gamma_\mu \end{array} \right. , \\
A_z &= \ y - \gamma_\mu (1-\mu)x ,
\end{align}
which generates the magnetic field in the wave frame,
\begin{align}
B'^{x'}(x') = &\ 1.0,\ B'^y(x') = 1.0, \\
B'^z(x') = &\ \left \{ \begin{array}{lll} 1.0 & \mbox{if} & x' \leq -0.1/\gamma_\mu \\
1.0+0.15 f(x') & \mbox{if} & -0.1/\gamma_\mu \leq x' \leq 0.1/\gamma_\mu \\
1.3 & \mbox{if} & x' \geq 0.1/\gamma_\mu \end{array} \right. .
\end{align}
The electric field in the wave frame is then given by
$$E'^{x'}(x') = -B'^z(0,x') \ \ , \ \ E'^y(x') = 0.0 \ \ , \ \ E'^z(x') = 1.0 .$$
These are converted to the grid frame by
\begin{align}
B^x(0,x) = &\ B'^{x'}(\gamma_\mu x) , \\
B^y(0,x) = &\ \gamma_\mu [ B'^y(\gamma_\mu x) - \mu E'^z(\gamma_\mu x) ] , \\
B^z(0,x) = &\ \gamma_\mu [ B'^z(\gamma_\mu x) + \mu E'^y(\gamma_\mu x) ] ,
\end{align}
and
\begin{align}
E^x(0,x) = &\ E'^{x'}(\gamma_\mu x) , \\
E^y(0,x) = &\ \gamma_\mu [ E'^y(\gamma_\mu x) + \mu B'^z(\gamma_\mu x) ] ,\\
E^z(0,x) = &\ \gamma_\mu [ E'^z(\gamma_\mu x) - \mu B'^y(\gamma_\mu x) ],
\end{align}
and the velocity is given by $$\mathbf{v} = \frac{\mathbf{E} \times \mathbf{B}}{B^2}$$ in flat spacetime. Additionally, $f(x)=1+\sin (5\pi x)$, $-1<\mu<1$ is the wave speed relative to the grid frame and $\gamma_\mu = (1-\mu^2)^{-1/2}$, and $g(x) = \cos (5\pi \gamma_\mu x)/\pi$.
For the eventual purpose of testing convergence, any quantity $Q$ evolves as $Q(t,x) = Q(0,x-\mu t)$
See the [Tutorial-GiRaFFEfood_NRPy](Tutorial-GiRaFFEfood_NRPy.ipynb) tutorial notebook for more general detail on how this is used.
<a id='toc'></a>
# Table of Contents:
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Import core NRPy+ modules and set NRPy+ parameters
1. [Step 2](#set_a_i): Set the vector $A_i$
1. [Step 3](#set_vi): Calculate $v^i$ from $B^i$ and $E_i$
1. [Step 4](#code_validation): Code Validation against `GiRaFFEfood_NRPy.GiRaFFEfood_NRPy` NRPy+ Module
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Import core NRPy+ modules and set NRPy+ parameters \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
Here, we will import the NRPy+ core modules and set the reference metric to Cartesian, set commonly used NRPy+ parameters, and set C parameters that will be set from outside the code eventually generated from these expressions. We will also set up a parameter to determine what initial data is set up, although it won't do much yet.
```
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data.
import reference_metric as rfm # NRPy+: Reference metric support
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = "GiRaFFEfood_NRPy_1D"
```
##### <a id='set_a_i'></a>
# Step 2: Set the vector $A_i$ \[Back to [top](#toc)\]
$$\label{set_a_i}$$
The vector potential is given as
\begin{align}
A_x &= 0 \\
A_y &= \left \{ \begin{array}{lll}\gamma_\mu x - 0.015 & \mbox{if} & x \leq -0.1/\gamma_\mu \\
1.15 \gamma_\mu x - 0.03g(x) & \mbox{if} & -0.1/\gamma_\mu \leq x \leq 0.1/\gamma_\mu \\
1.3 \gamma_\mu x - 0.015 & \mbox{if} & x \geq 0.1/\gamma_\mu \end{array} \right. , \\
A_z &= y - \gamma_\mu (1-\mu)x .
\end{align}
However, to take full advantage of NRPy+'s automated function generation capabilities, we want to write this without the `if` statements, replacing them with calls to `fabs()`. To do so, we will use the NRPy+ module `Min_Max_and_Piecewise_Expressions`.
```
mu_AW = par.Cparameters("REAL",thismodule,["mu_AW"], -0.5) # The wave speed
M_PI = par.Cparameters("#define",thismodule,["M_PI"], "")
gammamu = sp.sympify(1)/sp.sqrt(sp.sympify(1)-mu_AW**2)
bound = sp.Rational(1,10)/gammamu
def g_AW(x):
return sp.cos(sp.sympify(5)*M_PI*gammamu*x)/M_PI
```
Now, we can define the vector potential. We will rewrite $A_y$ to make use of the functions provided by `Min_Max_and_Piecewise_Expressions`. As shown below, we make sure that at each boundary, each $\leq$ is paired with a $>$. (This choice is arbitrary, we could just as easily choose $<$ and $\geq$.) This does not change the data since the function is continuous. However, it is necessary for the functions in `Min_Max_and_Piecewise_Expressions` to output the correct results.
\begin{align}
A_x &= 0 \\
A_y &= \left \{ \begin{array}{lll}\gamma_\mu x - 0.015 & \mbox{if} & x \leq -0.1/\gamma_\mu \\
1.15 \gamma_\mu x - 0.03g(x) & \mbox{if} & -0.1/\gamma_\mu < x \leq 0.1/\gamma_\mu \\
1.3 \gamma_\mu x - 0.015 & \mbox{if} & x > 0.1/\gamma_\mu \end{array} \right. , \\
A_z &= y - \gamma_\mu (1-\mu)x .
\end{align}
```
import Min_Max_and_Piecewise_Expressions as noif
def Ax_AW(x,y,z, **params):
return sp.sympify(0)
def Ay_AW(x,y,z, **params):
# \gamma_\mu x - 0.015 if x <= -0.1/\gamma_\mu
# 1.15 \gamma_\mu x - 0.03g(x) if -0.1/\gamma_\mu < x <= 0.1/\gamma_\mu
# 1.3 \gamma_\mu x - 0.015 if x > 0.1/\gamma_\mu
Ayleft = gammamu*x - sp.Rational(15,1000)
Aycenter = sp.Rational(115,100)*gammamu*x - sp.Rational(3,100)*g_AW(x)
Ayright = sp.Rational(13,10)*gammamu*x - sp.Rational(15,1000)
out = noif.coord_leq_bound(x,-bound)*Ayleft\
+noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Aycenter\
+noif.coord_greater_bound(x,bound)*Ayright
return out
def Az_AW(x,y,z, **params):
# y - \gamma_\mu (1-\mu)x
return y-gammamu*(sp.sympify(1)-mu_AW)*x
```
<a id='set_vi'></a>
# Step 3: Calculate $v^i$ from $B^i$ and $E_i$ \[Back to [top](#toc)\]
$$\label{set_vi}$$
Now, we will set the magnetic and electric fields that we will need to define the initial velocities. First, we need to define $$f(x)=1+\sin (5\pi x);$$ note that in the definition of $B^i$, we need $f(x')$ where $x'=\gamma_\mu x$.
```
def f_AW(x):
xprime = gammamu*x
return 1 + sp.sin(5*M_PI*xprime)
```
We will first set the magnetic field in the wave frame, once again rewriting $B'^z(x')$ to be compatible with `Min_Max_and_Piecewise_Expressions`:
\begin{align}
B'^{x'}(x') = &\ 1.0,\ B'^y(x') = 1.0, \\
B'^z(x') = &\ \left \{ \begin{array}{lll} 1.0 & \mbox{if} & x' \leq -0.1 \\
1.0+0.15 f(x') & \mbox{if} & -0.1 < x' \leq 0.1 \\
1.3 & \mbox{if} & x' > 0.1 \end{array} \right. .
\end{align}
Then, we will set the electric field in the wave frame:
\begin{align}
E'^{x'}(x') &= -B'^z(0,x'), \\
E'^y(x') &= 0.0, \\
E'^z(x') &= 1.0 .
\end{align}
Next, we must transform the fields into the grid frame. We'll do the magnetic fields first.
\begin{align}
B^x(0,x) = &\ B'^{x'}(\gamma_\mu x) , \\
B^y(0,x) = &\ \gamma_\mu [ B'^y(\gamma_\mu x) - \mu E'^z(\gamma_\mu x) ] , \\
B^z(0,x) = &\ \gamma_\mu [ B'^z(\gamma_\mu x) + \mu E'^y(\gamma_\mu x) ] ,
\end{align}
And finally the electric fields:
\begin{align}
E^x(0,x) = &\ E'^{x'}(\gamma_\mu x) , \\
E^y(0,x) = &\ \gamma_\mu [ E'^y(\gamma_\mu x) + \mu B'^z(\gamma_\mu x) ] ,\\
E^z(0,x) = &\ \gamma_\mu [ E'^z(\gamma_\mu x) - \mu B'^y(\gamma_\mu x) ],
\end{align}
```
#Step 3: Compute v^i from B^i and E_i
def ValenciavU_func_AW(**params):
x = rfm.xx_to_Cart[0]
Bzleft = sp.sympify(1)
Bzcenter = sp.sympify(1) + sp.Rational(15,100)*f_AW(x)
Bzright = sp.Rational(13,10)
BpU = ixp.zerorank1()
BpU[0] = sp.sympify(1)
BpU[1] = sp.sympify(1)
BpU[2] = noif.coord_leq_bound(x,-bound)*Bzleft\
+noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Bzcenter\
+noif.coord_greater_bound(x,bound)*Bzright
EpU = ixp.zerorank1()
EpU[0] = -BpU[2]
EpU[1] = sp.sympify(0)
EpU[2] = sp.sympify(1)
BU = ixp.zerorank1()
BU[0] = BpU[0]
BU[1] = gammamu*(BpU[1]-mu_AW*EpU[2])
BU[2] = gammamu*(BpU[2]+mu_AW*EpU[1])
EU = ixp.zerorank1()
EU[0] = EpU[0]
EU[1] = gammamu*(EpU[1]+mu_AW*BpU[2])
EU[2] = gammamu*(EpU[2]-mu_AW*BpU[1])
# In flat space, ED and EU are identical, so we can still use this function.
return gfcf.compute_ValenciavU_from_ED_and_BU(EU, BU)
```
<a id='code_validation'></a>
# Step 4: Code Validation against `GiRaFFEfood_NRPy.GiRaFFEfood_NRPy` NRPy+ Module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the SymPy expressions for the `GiRaFFE` Aligned Rotator initial data equations we intend to use between
1. this tutorial and
2. the NRPy+ [`GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py`](../edit/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) module.
```
import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy as gf
A_awD = gfcf.Axyz_func_Cartesian(Ax_AW,Ay_AW,Az_AW,stagger_enable = True,)
Valenciav_awD = ValenciavU_func_AW()
gf.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "AlfvenWave", stagger_enable = True)
def consistency_check(quantity1,quantity2,string):
if quantity1-quantity2==0:
print(string+" is in agreement!")
else:
print(string+" does not agree!")
sys.exit(1)
print("Consistency check between GiRaFFEfood_NRPy tutorial and NRPy+ module:")
for i in range(3):
consistency_check(Valenciav_awD[i],gf.ValenciavU[i],"ValenciavU"+str(i))
consistency_check(A_awD[i],gf.AD[i],"AD"+str(i))
```
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-GiRaFFEfood_NRPy_1D_tests.pdf](Tutorial-GiRaFFEfood_NRPy_1D_tests.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GiRaFFEfood_NRPy-Alfven_Wave",location_of_template_file=os.path.join(".."))
```
| github_jupyter |
# Introduction to scientific computing with Python
J.R. Johansson (jrjohansson at gmail.com), updated by M. V. dos Santos (marcelo.santos at df.ufcg.edu.br)
The latest version of
this [Jupyter notebook](https://jupyter.org/) lecture is available at [https://github.com/mvsantosdev/scientific-python-lectures.git](https://github.com/mvsantosdev/scientific-python-lectures.git).
The other notebooks in this lecture series are indexed at [http://jrjohansson.github.io](http://jrjohansson.github.io).
## The role of computing in science
Science has traditionally been divided into experimental and theoretical disciplines, but during the last several decades computing has emerged as a very important part of science. Scientific computing is often closely related to theory, but it also has many characteristics in common with experimental work. It is therefore often viewed as a new third branch of science. In most fields of science, computational work is an important complement to both experiments and theory, and nowadays a vast majority of both experimental and theoretical papers involve some numerical calculations, simulations or computer modeling.
<center>
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/theory-experiment-computation.png?raw=true" width="300">
</center>
In experimental and theoretical sciences there are well established codes of conducts for how results and methods are published and made available to other scientists. For example, in theoretical sciences, derivations, proofs and other results are published in full detail, or made available upon request. Likewise, in experimental sciences, the methods used and the results are published, and all experimental data should be available upon request. It is considered unscientific to withhold crucial details in a theoretical proof or experimental method, that would hinder other scientists from replicating and reproducing the results.
In computational sciences there are not yet any well established guidelines for how source code and generated data should be handled. For example, it is relatively rare that source code used in simulations for published papers are provided to readers, in contrast to the open nature of experimental and theoretical work. And it is not uncommon that source code for simulation software is withheld and considered a competitive advantage (or unnecessary to publish).
However, this issue has recently started to attract increasing attention, and a number of editorials in high-profile journals have called for increased openness in computational sciences. Some prestigious journals, including Science, have even started to demand of authors to provide the source code for simulation software used in publications to readers upon request.
Discussions are also ongoing on how to facilitate distribution of scientific software, for example as supplementary materials to scientific papers.
### References
* [Reproducible Research in Computational Science](http://dx.doi.org/10.1126/science.1213847), Roger D. Peng, Science 334, 1226 (2011).
* [Shining Light into Black Boxes](http://dx.doi.org/10.1126/science.1218263), A. Morin et al., Science 336, 159-160 (2012).
* [The case for open computer programs](http://dx.doi.org/doi:10.1038/nature10836), D.C. Ince, Nature 482, 485 (2012).
## Requirements on scientific computing
**Replication** and **reproducibility** are two of the cornerstones in the scientific method. With respect to numerical work, complying with these concepts have the following practical implications:
* Replication: An author of a scientific paper that involves numerical calculations should be able to rerun the simulations and replicate the results upon request. Other scientist should also be able to perform the same calculations and obtain the same results, given the information about the methods used in a publication.
* Reproducibility: The results obtained from numerical simulations should be reproducible with an independent implementation of the method, or using a different method altogether.
In summary: A sound scientific result should be reproducible, and a sound scientific study should be replicable.
To achieve these goals, we need to:
* Keep and take note of *exactly* which source code and version that was used to produce data and figures in published papers.
* Record information of which version of external software that was used. Keep access to the environment that was used.
* Make sure that old codes and notes are backed up and kept for future reference.
* Be ready to give additional information about the methods used, and perhaps also the simulation codes, to an interested reader who requests it (even years after the paper was published!).
* Ideally codes should be published online, to make it easier for other scientists interested in the codes to access it.
### Tools for managing source code
Ensuring replicability and reprodicibility of scientific simulations is a *complicated problem*, but there are good tools to help with this:
* Revision Control System (RCS) software.
* Good choices include:
* git - http://git-scm.com
* mercurial - http://mercurial.selenic.com. Also known as `hg`.
* subversion - http://subversion.apache.org. Also known as `svn`.
* Online repositories for source code. Available as both private and public repositories.
* Some good alternatives are
* Github - http://www.github.com
* Bitbucket - http://www.bitbucket.com
* Privately hosted repositories on the university's or department's servers.
#### Note
Repositories are also excellent for version controlling manuscripts, figures, thesis files, data files, lab logs, etc. Basically for any digital content that must be preserved and is frequently updated. Again, both public and private repositories are readily available. They are also excellent collaboration tools!
## What is Python?
[Python](http://www.python.org/) is a modern, general-purpose, object-oriented, high-level programming language.
General characteristics of Python:
* **clean and simple language:** Easy-to-read and intuitive code, easy-to-learn minimalistic syntax, maintainability scales well with size of projects.
* **expressive language:** Fewer lines of code, fewer bugs, easier to maintain.
Technical details:
* **dynamically typed:** No need to define the type of variables, function arguments or return types.
* **automatic memory management:** No need to explicitly allocate and deallocate memory for variables and data arrays. No memory leak bugs.
* **interpreted:** No need to compile the code. The Python interpreter reads and executes the python code directly.
Advantages:
* The main advantage is ease of programming, minimizing the time required to develop, debug and maintain the code.
* Well designed language that encourage many good programming practices:
* Modular and object-oriented programming, good system for packaging and re-use of code. This often results in more transparent, maintainable and bug-free code.
* Documentation tightly integrated with the code.
* A large standard library, and a large collection of add-on packages.
Disadvantages:
* Since Python is an interpreted and dynamically typed programming language, the execution of python code can be slow compared to compiled statically typed programming languages, such as C and Fortran.
* Somewhat decentralized, with different environment, packages and documentation spread out at different places. Can make it harder to get started.
## What makes python suitable for scientific computing?
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/optimizing-what.png?raw=true" width="600">
* Python has a strong position in scientific computing:
* Large community of users, easy to find help and documentation.
* Extensive ecosystem of scientific libraries and environments
* numpy: http://numpy.scipy.org - Numerical Python
* scipy: http://www.scipy.org - Scientific Python
* matplotlib: http://www.matplotlib.org - graphics library
* Great performance due to close integration with time-tested and highly optimized codes written in C and Fortran:
* blas, atlas blas, lapack, arpack, Intel MKL, ...
* Good support for
* Parallel processing with processes and threads
* Interprocess communication (MPI)
* GPU computing (OpenCL and CUDA)
* Readily available and suitable for use on high-performance computing clusters.
* No license costs, no unnecessary use of research budget.
### The zen of Python
```
import this
```
### The scientific python software stack
<!-- <img src="files/images/scientific-python-stack.svg" width="300"> -->
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/scientific-python-stack.png?raw=true" width="300">
### Python environments
Python is not only a programming language, but often also refers to the standard implementation of the interpreter (technically referred to as [CPython](http://en.wikipedia.org/wiki/CPython)) that actually runs the python code on a computer.
There are also many different environments through which the python interpreter can be used. Each environment has different advantages and is suitable for different workflows. One strength of python is that it is versatile and can be used in complementary ways, but it can be confusing for beginners so we will start with a brief survey of python environments that are useful for scientific computing.
### Python interpreter
The standard way to use the Python programming language is to use the Python interpreter to run python code. The python interpreter is a program that reads and execute the python code in files passed to it as arguments. At the command prompt, the command ``python`` is used to invoke the Python interpreter.
For example, to run a file ``my-program.py`` that contains python code from the command prompt, use::
$ python my-program.py
We can also start the interpreter by simply typing ``python`` at the command line, and interactively type python code into the interpreter.
<!-- <img src="files/images/python-screenshot.jpg" width="600"> -->
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/python-screenshot.jpg?raw=true" width="600">
This is often how we want to work when developing scientific applications, or when doing small calculations. But the standard python interpreter is not very convenient for this kind of work, due to a number of limitations.
### IPython
IPython is an interactive shell that addresses the limitation of the standard python interpreter, and it is a work-horse for scientific use of python. It provides an interactive prompt to the python interpreter with a greatly improved user-friendliness.
<!-- <img src="files/images/ipython-screenshot.jpg" width="600"> -->
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/ipython-screenshot.jpg?raw=true" width="600">
Some of the many useful features of IPython includes:
* Command history, which can be browsed with the up and down arrows on the keyboard.
* Tab auto-completion.
* In-line editing of code.
* Object introspection, and automatic extract of documentation strings from python objects like classes and functions.
* Good interaction with operating system shell.
* Support for multiple parallel back-end processes, that can run on computing clusters or cloud services like Amazon EC2.
### Jupyter notebook
[Jupyter notebook](https://jupyter.org/) is an HTML-based notebook environment for Python, similar to Mathematica or Maple. It is based on the IPython shell, but provides a cell-based environment with great interactivity, where calculations can be organized and documented in a structured way.
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/jupyterpreview.png?raw=true" width="600">
Although using a web browser as graphical interface, IPython notebooks are usually run locally, from the same computer that run the browser. To start a new IPython notebook session, run the following command:
$ jupyter notebook
from a directory where you want the notebooks to be stored. This will open a new browser window (or a new tab in an existing window) with an index page where existing notebooks are shown and from which new notebooks can be created.
### Spyder
[Spyder](http://code.google.com/p/spyderlib/) is a MATLAB-like IDE for scientific computing with python. It has the many advantages of a traditional IDE environment, for example that everything from code editing, execution and debugging is carried out in a single environment, and work on different calculations can be organized as projects in the IDE environment.
<!-- <img src="files/images/spyder-screenshot.jpg" width="800"> -->
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/spyder-screenshot.jpg?raw=true" width="800">
Some advantages of Spyder:
* Powerful code editor, with syntax high-lighting, dynamic code introspection and integration with the python debugger.
* Variable explorer, IPython command prompt.
* Integrated documentation and help.
### Google Colab Notebooks
Colaboratory is a Google research project created to help disseminate machine learning education and research. It's a Jupyter notebook environment that requires no setup to use and runs entirely in the cloud.
<img src="https://github.com/mvsantosdev/scientific-python-lectures/blob/master/images/colab.png?raw=true" width="600">
## Versions of Python
There are currently two versions of python: Python 2 and Python 3. On January 1, 2020, the 2.x branch of the Python programming language will no longer be supported by its creators, the Python Software Foundation.
To see which version of Python you have, run
$ python --version
Python 3.7.4
## Installation
### Conda
The best way set-up an scientific Python environment is to use the cross-platform package manager `conda` from Continuum Analytics. First download and install miniconda http://conda.pydata.org/miniconda.html or Anaconda (see below). Next, to install the required libraries for these notebooks, simply run:
$ conda install ipython ipython-notebook spyder numpy scipy sympy matplotlib cython
This should be sufficient to get a working environment on any platform supported by `conda`.
## Further reading
* [Python](http://www.python.org). The official Python web site.
* [Python tutorials](http://docs.python.org/2/tutorial). The official Python tutorials.
* [Think Python](http://www.greenteapress.com/thinkpython). A free book on Python.
## Python and module versions
Since there are several different versions of Python and each Python package has its own release cycle and version number (for example scipy, numpy, matplotlib, etc., which we installed above and will discuss in detail in the following lectures), it is important for the reproducibility of an IPython notebook to record the versions of all these different software packages. If this is done properly it will be easy to reproduce the environment that was used to run a notebook, but if not it can be hard to know what was used to produce the results in a notebook.
To encourage the practice of recording Python and module versions in notebooks, I've created a simple IPython extension that produces a table with versions numbers of selected software components. I believe that it is a good practice to include this kind of table in every notebook you create.
To install this IPython extension, use `pip install version_information`:
```
# you only need to do this once
!pip install --upgrade version_information
```
or alternatively run (deprecated method):
# you only need to do this once
%install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py
Now, to load the extension and produce the version table
```
%load_ext version_information
%version_information numpy, scipy, matplotlib, sympy, pandas, version_information
```
| github_jupyter |
```
%matplotlib inline
```
Saving and loading models across devices in PyTorch
===================================================
There may be instances where you want to save and load your neural
networks across different devices.
Introduction
------------
Saving and loading models across devices is relatively straightforward
using PyTorch. In this recipe, we will experiment with saving and
loading models across CPUs and GPUs.
Setup
-----
In order for every code block to run properly in this recipe, you must
first change the runtime to “GPU” or higher. Once you do, we need to
install ``torch`` if it isn’t already available.
::
pip install torch
Steps
-----
1. Import all necessary libraries for loading our data
2. Define and intialize the neural network
3. Save on a GPU, load on a CPU
4. Save on a GPU, load on a GPU
5. Save on a CPU, load on a GPU
6. Saving and loading ``DataParallel`` models
1. Import necessary libraries for loading our data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For this recipe, we will use ``torch`` and its subsidiaries ``torch.nn``
and ``torch.optim``.
```
import torch
import torch.nn as nn
import torch.optim as optim
```
2. Define and intialize the neural network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For sake of example, we will create a neural network for training
images. To learn more see the Defining a Neural Network recipe.
```
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
```
3. Save on GPU, Load on CPU
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When loading a model on a CPU that was trained with a GPU, pass
``torch.device('cpu')`` to the ``map_location`` argument in the
``torch.load()`` function.
```
# Specify a path to save to
PATH = "model.pt"
# Save
torch.save(net.state_dict(), PATH)
# Load
device = torch.device('cpu')
model = Net()
model.load_state_dict(torch.load(PATH, map_location=device))
```
In this case, the storages underlying the tensors are dynamically
remapped to the CPU device using the ``map_location`` argument.
4. Save on GPU, Load on GPU
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When loading a model on a GPU that was trained and saved on GPU, simply
convert the initialized model to a CUDA optimized model using
``model.to(torch.device('cuda'))``.
Be sure to use the ``.to(torch.device('cuda'))`` function on all model
inputs to prepare the data for the model.
```
# Save
torch.save(net.state_dict(), PATH)
# Load
device = torch.device("cuda")
model = Net()
model.load_state_dict(torch.load(PATH))
model.to(device)
```
Note that calling ``my_tensor.to(device)`` returns a new copy of
``my_tensor`` on GPU. It does NOT overwrite ``my_tensor``. Therefore,
remember to manually overwrite tensors:
``my_tensor = my_tensor.to(torch.device('cuda'))``.
5. Save on CPU, Load on GPU
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When loading a model on a GPU that was trained and saved on CPU, set the
``map_location`` argument in the ``torch.load()`` function to
``cuda:device_id``. This loads the model to a given GPU device.
Be sure to call ``model.to(torch.device('cuda'))`` to convert the
model’s parameter tensors to CUDA tensors.
Finally, also be sure to use the ``.to(torch.device('cuda'))`` function
on all model inputs to prepare the data for the CUDA optimized model.
```
# Save
torch.save(net.state_dict(), PATH)
# Load
device = torch.device("cuda")
model = Net()
# Choose whatever GPU device number you want
model.load_state_dict(torch.load(PATH, map_location="cuda:0"))
# Make sure to call input = input.to(device) on any input tensors that you feed to the model
model.to(device)
```
6. Saving ``torch.nn.DataParallel`` Models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``torch.nn.DataParallel`` is a model wrapper that enables parallel GPU
utilization.
To save a ``DataParallel`` model generically, save the
``model.module.state_dict()``. This way, you have the flexibility to
load the model any way you want to any device you want.
```
# Save
torch.save(net.module.state_dict(), PATH)
# Load to whatever device you want
```
Congratulations! You have successfully saved and loaded models across
devices in PyTorch.
Learn More
----------
Take a look at these other recipes to continue your learning:
- TBD
- TBD
| github_jupyter |
```
import scanpy as sc
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import colors
import seaborn as sb
import rpy2.rinterface_lib.callbacks
import logging
import scrublet as scr
from rpy2.robjects import pandas2ri
import anndata2ri
# Ignore R warning messages
#Note: this can be commented out to get more verbose R output
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR)
# Automatically convert rpy2 outputs to pandas dataframes
pandas2ri.activate()
anndata2ri.activate()
%load_ext rpy2.ipython
plt.rcParams['figure.figsize']=(8,8) #rescale figures
sc.settings.verbosity = 3
#sc.set_figure_params(dpi=200, dpi_save=300)
sc.logging.print_header()
%%R
# Load libraries from correct lib Paths for my environment - ignore this!
.libPaths("/home/spuccio/anaconda3/envs/singlecell/lib/R/library/")
library(clustree)
colors2 = plt.cm.Reds(np.linspace(0, 1, 128))
colors3 = plt.cm.Greys_r(np.linspace(0.7,0.8,20))
colorsComb = np.vstack([colors3, colors2])
mymap = colors.LinearSegmentedColormap.from_list('my_colormap', colorsComb)
#adata = sc.read_h5ad("/mnt/lugli/spuccio/SP028_Autoimmunity/Cariplo/IBD_counts/h5files/DESC_obj_CD4_dirty.h5ad")
adata =sc.read_h5ad("/mnt/lugli/spuccio/SP028_Autoimmunity/Cariplo/IBD_counts/h5files/CD4_after_DESC_clean.h5ad")
adata.X
sc.set_figure_params(dpi=100, color_map = 'viridis_r')
sc.tl.rank_genes_groups(adata, 'desc_0.6', method='t-test',use_raw=True)
sc.pl.rank_genes_groups(adata, n_genes=25, sharey=False)
result = adata.uns['rank_genes_groups']
groups = result['names'].dtype.names
pd.DataFrame(
{group + '_' + key[:]: result[key][group]
for group in groups for key in ['names','pvals','pvals_adj','logfoldchanges']}).to_csv("/mnt/lugli/spuccio/SP028_Autoimmunity/Cariplo/CD4_res06_DEGS_beforeMAGIC.csv",header=True,index=False)
sc.pp.normalize_total(adata, target_sum=1e4)
sc.pp.sqrt(adata)
import scanpy.external as sce
adata_magic = sce.pp.magic(adata[:, adata[:,].to_df().sum(axis=0) > 0.01], name_list="all_genes", knn=5,copy=True,n_jobs=20)
adata_magic.raw = adata_magic
#pd.DataFrame(adata_magic.X,index=adata_magic.obs.index,columns=adata_magic.var.index).round(2).to_csv("/mnt/lugli/spuccio/SP028_Autoimmunity/Cariplo/IBD_counts/h5files/CD4_magic_imputed.tsv",sep="\t",header=True,index=True)
pd.DataFrame(adata.X,index=adata.obs.index,columns=adata.var.index).pow(2).round(3).to_csv("/mnt/lugli/spuccio/SP028_Autoimmunity/Cariplo/IBD_counts/h5files/CD4_magic_imputed.tsv",sep="\t",header=True,index=True)
#a = np.power(pd.DataFrame(adata.X,index=adata.obs.index,columns=adata.var.index), 2)
#a.round(2)
sc.set_figure_params(dpi=100, color_map = 'viridis_r')
sc.tl.rank_genes_groups(adata_magic, 'desc_0.6', method='t-test',use_raw=True)
sc.pl.rank_genes_groups(adata_magic, n_genes=25, sharey=False)
result = adata_magic.uns['rank_genes_groups']
groups = result['names'].dtype.names
pd.DataFrame(
{group + '_' + key[:]: result[key][group]
for group in groups for key in ['names','pvals','pvals_adj','logfoldchanges']}).to_csv("/mnt/lugli/spuccio/SP028_Autoimmunity/Cariplo/CD4_res06_DEGS_afterMAGIC.csv",header=True,index=False)
groups = result['names'].dtype.names
pd.DataFrame(
{group + '_' + key[:]: result[key][group]
for group in groups for key in ['names','pvals','pvals_adj','logfoldchanges']})
adata_magic.uns['rank_genes_groups']
```
| github_jupyter |
# Deep learning for computer vision
This notebook will teach you to build and train convolutional networks for image recognition. Brace yourselves.
```
# if you're running in colab,
# 1. go to Runtime -> Change Runtimy Type -> GPU
# 2. Run this
!wget https://raw.githubusercontent.com/yandexdataschool/Practical_DL/spring2019/week03_convnets/tiny_img.py -O tiny_img.py
```
# Tiny ImageNet dataset
This week, we shall focus on the image recognition problem on Tiny Image Net dataset
* 100k images of shape 3x64x64
* 200 different classes: snakes, spiders, cats, trucks, grasshoppers, gulls, etc.
```
import os
import torch, torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
from tiny_img import download_tinyImg200
if not os.path.exists('./tiny-imagenet-200/'):
download_tinyImg200('.')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dataset = torchvision.datasets.ImageFolder('tiny-imagenet-200/train', transform=transforms.ToTensor())
test_dataset = torchvision.datasets.ImageFolder('tiny-imagenet-200/val', transform=transforms.ToTensor())
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [80000, 20000])
test_dataset, val_dataset = torch.utils.data.random_split(val_dataset, [10000, 10000])
import matplotlib.pyplot as plt
%matplotlib inline
for i in range(10):
xi, yi = train_dataset[i]
plt.imshow(xi.data.numpy().transpose(1, 2, 0))
plt.title('class = %i' % yi)
plt.show()
```
# Building a network
Simple neural networks with layers applied on top of one another can be implemented as `torch.nn.Sequential` - just add a list of pre-built modules and let it train.
```
import torch, torch.nn as nn
import torch.nn.functional as F
# a special module that converts [batch, channel, w, h] to [batch, units]
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.shape[0], -1)
```
Let's start with a dense network for our baseline:
```
model = nn.Sequential()
# reshape from "images" to flat vectors
model.add_module('flatten', Flatten())
# dense "head"
model.add_module('dense1', nn.Linear(3 * 64 * 64, 1064))
model.add_module('dense2', nn.Linear(1064, 512))
model.add_module('dropout0', nn.Dropout(0.05))
model.add_module('dense3', nn.Linear(512, 256))
model.add_module('dropout1', nn.Dropout(0.05))
model.add_module('dense4', nn.Linear(256, 64))
model.add_module('dropout2', nn.Dropout(0.05))
model.add_module('dense1_relu', nn.ReLU())
model.add_module('dense2_logits', nn.Linear(64, 200)) # logits for 200 classes
model = model.to(device=device)
```
As in our basic tutorial, we train our model with negative log-likelihood aka crossentropy.
```
def compute_loss(X_batch, y_batch):
X_batch = torch.as_tensor(X_batch, dtype=torch.float32, device=device)
y_batch = torch.as_tensor(y_batch, dtype=torch.int64, device=device)
logits = model(X_batch)
return F.cross_entropy(logits, y_batch).mean()
```
### Training on minibatches
* We got 100k images, that's way too many for a full-batch SGD. Let's train on minibatches instead
* Below is a function that splits the training sample into minibatches
```
opt = torch.optim.SGD(model.parameters(), lr=0.01)
train_loss = []
val_accuracy = []
import numpy as np
import time
num_epochs = 100 # total number of full passes over data until training finishes
batch_size = 64 # number of images in one minibatch
for epoch in range(100):
start_time = time.time()
model.train(True) # enable dropout / batch_norm training behavior
for (X_batch, y_batch) in DataLoader(train_dataset, batch_size=batch_size, shuffle=True):
# train on batch
loss = compute_loss(X_batch, y_batch)
loss.backward()
opt.step()
opt.zero_grad()
train_loss.append(loss.cpu().data.numpy())
model.train(False) # disable dropout / use averages for batch_norm
for X_batch, y_batch in torch.utils.data.DataLoader(val_dataset, batch_size=batch_size):
logits = model(torch.as_tensor(X_batch, device=device, dtype=torch.float32))
y_pred = logits.max(1)[1].data
val_accuracy.append(np.mean((y_batch.cpu() == y_pred.cpu()).numpy()))
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration): \t{:.6f}".format(
np.mean(train_loss[-len(train_dataset) // batch_size :])))
print(" validation accuracy: \t\t\t{:.2f} %".format(
np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100))
```
Don't wait for full 100 epochs. You can interrupt training after 5-20 epochs once validation accuracy stops going up.
```
```
```
```
```
```
```
```
```
```
### Final test
```
model.train(False) # disable dropout / use averages for batch_norm
test_batch_acc = []
for X_batch, y_batch in DataLoader(test_dataset, batch_size=batch_size):
logits = model(torch.as_tensor(X_batch, device='cuda', dtype=torch.float32))
y_pred = logits.max(1)[1].data
test_batch_acc.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() ))
test_accuracy = np.mean(test_batch_acc)
print("Final results:")
print(" test accuracy:\t\t{:.2f} %".format(
test_accuracy * 100))
if test_accuracy * 100 > 70:
print("U'r freakin' amazin'!")
elif test_accuracy * 100 > 50:
print("Achievement unlocked: 110lvl Warlock!")
elif test_accuracy * 100 > 40:
print("Achievement unlocked: 80lvl Warlock!")
elif test_accuracy * 100 > 30:
print("Achievement unlocked: 70lvl Warlock!")
elif test_accuracy * 100 > 20:
print("Achievement unlocked: 60lvl Warlock!")
else:
print("We need more magic! Follow instructons below")
```
## Task I: small convolution net
### First step
Let's create a mini-convolutional network with roughly such architecture:
* Input layer
* 3x3 convolution with 128 filters and _ReLU_ activation
* 2x2 pooling (or set previous convolution stride to 3)
* Flatten
* Dense layer with 1024 neurons and _ReLU_ activation
* 30% dropout
* Output dense layer.
__Convolutional layers__ in torch are just like all other layers, but with a specific set of parameters:
__`...`__
__`model.add_module('conv1', nn.Conv2d(in_channels=3, out_channels=128, kernel_size=3)) # convolution`__
__`model.add_module('pool1', nn.MaxPool2d(2)) # max pooling 2x2`__
__`...`__
Once you're done (and compute_loss no longer raises errors), train it with __Adam__ optimizer with default params (feel free to modify the code above).
If everything is right, you should get at least __16%__ validation accuracy.
__HACK_OF_THE_DAY__ :the number of channels must be in the order of the number of class_labels
```
model = nn.Sequential()
<describe convnet here>
model.add_module('dense1_logits', nn.Linear(<...>, 200)) # logits for 200 classes
opt = torch.optim.SGD(model.parameters(), lr=0.01)
train_loss = []
val_accuracy = []
from torchsummary import summary
summary(model.cuda(), (3, 64, 64))
```
## Train it ##
```
num_epochs = 100 # total number of full passes over data until training finishes
batch_size = 64 # number of images in one minibatch
for epoch in range(100):
start_time = time.time()
model.train(True) # enable dropout / batch_norm training behavior
for (X_batch, y_batch) in DataLoader(train_dataset, batch_size=batch_size, shuffle=True):
# train on batch
loss = compute_loss(X_batch, y_batch)
loss.backward()
opt.step()
opt.zero_grad()
train_loss.append(loss.cpu().data.numpy())
model.train(False) # disable dropout / use averages for batch_norm
for X_batch, y_batch in torch.utils.data.DataLoader(val_dataset, batch_size=batch_size):
logits = model(torch.as_tensor(X_batch, device=device, dtype=torch.float32))
y_pred = logits.max(1)[1].data
val_accuracy.append(np.mean((y_batch.cpu() == y_pred.cpu()).numpy()))
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration): \t{:.6f}".format(
np.mean(train_loss[-len(train_dataset) // batch_size :])))
print(" validation accuracy: \t\t\t{:.2f} %".format(
np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100))
model.train(False) # disable dropout / use averages for batch_norm
test_batch_acc = []
for X_batch, y_batch in DataLoader(test_dataset, batch_size=batch_size):
logits = model(torch.as_tensor(X_batch, device='cuda', dtype=torch.float32))
y_pred = logits.max(1)[1].data
test_batch_acc.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() ))
test_accuracy = np.mean(test_batch_acc)
print("Final results:")
print(" test accuracy:\t\t{:.2f} %".format(
test_accuracy * 100))
if test_accuracy * 100 > 70:
print("U'r freakin' amazin'!")
elif test_accuracy * 100 > 50:
print("Achievement unlocked: 110lvl Warlock!")
elif test_accuracy * 100 > 40:
print("Achievement unlocked: 80lvl Warlock!")
elif test_accuracy * 100 > 30:
print("Achievement unlocked: 70lvl Warlock!")
elif test_accuracy * 100 > 20:
print("Achievement unlocked: 60lvl Warlock!")
else:
print("We need more magic! Follow instructons below")
```
```
```
```
```
```
```
```
```
```
```
__Hint:__ If you don't want to compute shapes by hand, just plug in any shape (e.g. 1 unit) and run compute_loss. You will see something like this:
__`RuntimeError: size mismatch, m1: [5 x 1960], m2: [1 x 64] at /some/long/path/to/torch/operation`__
See the __1960__ there? That's your actual input shape.
## Task 2: adding normalization
* Add batch norm (with default params) between convolution and ReLU
* nn.BatchNorm*d (1d for dense, 2d for conv)
* usually better to put them after linear/conv but before nonlinearity
* Re-train the network with the same optimizer, it should get at least 20% validation accuracy at peak.
To know more about **batch_norm** and **data covariate shift**
https://towardsdatascience.com/batch-normalization-in-neural-networks-1ac91516821c
https://www.youtube.com/watch?v=nUUqwaxLnWs
```
model = nn.Sequential()
<your model here>
model.add_module('dense1_logits', nn.Linear(<...>, 200)) # logits for 200 classes
opt = torch.optim.SGD(model.parameters(), lr=0.01)
train_loss = []
val_accuracy = []
< YOUR CODE: training loop. it's okay to copy from above :) >
< YOUR CODE: evaluate final test error >
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
## Task 3: Data Augmentation
** Augmenti - A spell used to produce water from a wand (Harry Potter Wiki) **
<img src="https://github.com/yandexdataschool/mlhep2019/blob/master/notebooks/day-3/HagridsHut_PM_B6C28_Hagrid_sHutFireHarryFang.jpg?raw=1" style="width:80%">
There's a powerful torch tool for image preprocessing useful to do data preprocessing and augmentation.
Here's how it works: we define a pipeline that
* makes random crops of data (augmentation)
* randomly flips image horizontally (augmentation)
* then normalizes it (preprocessing)
When testing, we don't need random crops, just normalize with same statistics.
```
import torchvision
from torchvision import transforms
means = np.array((0.4914, 0.4822, 0.4465))
stds = np.array((0.2023, 0.1994, 0.2010))
transform_augment = transforms.Compose([
# decribe transformation here
])
dataset = torchvision.datasets.ImageFolder('tiny-imagenet-200/train', transform=transform_augment)
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [90000, 10000])
num_epochs = 100 # total number of full passes over data until training finishes
batch_size = 64 # number of images in one minibatch
for epoch in range(100):
start_time = time.time()
model.train(True) # enable dropout / batch_norm training behavior
for (X_batch, y_batch) in DataLoader(train_dataset, batch_size=batch_size, shuffle=True):
# train on batch
loss = compute_loss(X_batch, y_batch)
loss.backward()
opt.step()
opt.zero_grad()
train_loss.append(loss.cpu().data.numpy())
model.train(False) # disable dropout / use averages for batch_norm
for X_batch, y_batch in torch.utils.data.DataLoader(val_dataset, batch_size=batch_size):
logits = model(torch.as_tensor(X_batch, device=device, dtype=torch.float32))
y_pred = logits.max(1)[1].data
val_accuracy.append(np.mean((y_batch.cpu() == y_pred.cpu()).numpy()))
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration): \t{:.6f}".format(
np.mean(train_loss[-len(train_dataset) // batch_size :])))
print(" validation accuracy: \t\t\t{:.2f} %".format(
np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100))
```
We need for test data __only normalization__, not cropping and rotation
```
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(means, stds),
])
test_dataset = <YOUR CODE>
<evaluate your final accuracy here>
```
| github_jupyter |
# Exercises For Session 3
Enter your answers in the preallocated empty cells.
### Exercise 1: Variables, Numbers and Strings
You have a variable of type *int* and another one of type *float*.
```
a = 40
b = 4.0
print(type(a))
print(type(b))
```
Convert the integer *a* to a *float*, and viceversa.
Sum, subtract, multiply and divide the two variable above by one another. Print the output of each operation.
Now, *a* is a *float* and *b* is an *integer*. Divide them by one another to get an *integer*. (Hint: a / b won't work. and "/" is different from "//" )
Let's now move onto **strings**. Create two strings, one with the text *Hello* and the other with *World*. Print them.
Sum the strings above to obtain *Hello, World!*. Print the result.
### Exercise 2: Lists
Create a list including *a*, *b*, and *"Hello, World!"*. Print the entire list and its third element separately. (Hint = the index of the first element in Python is 0)
Remove the element that is not a number, and replace it with an *int*.
Print the length of the list defined above.
### Exercise 3: Tuples, sets and dictionaries
Create a tuple and a dictionary including a, b, and *"Hello, World!".
Add another *int* to the tuple.
Print the value associated to the first key of the dictionary defined above.
### Exercise 4: Functions and logical statements
Create a function that takes an *int*, a *float* and a *string* as inputs and returns a list containing the three of them.
Use a lambda function to create the achieve the same result.
Let's move into something slightly more involving. This function checks the type of the input.
```
def func_2(x):
if type(x) == int:
print(f"{x} is an integer")
elif type(x) == float:
print(f"{x} is a float")
```
Notice that the function only checks if the input is an *int* or a *float*. Add an extra condition to check if the input is a *string*
### Exercise 5: For and While Loops
Python allows you to loop over several type of objects. A simple loop looks like
```
for x in range(10):
print(x, end=" ")
```
Modify the script above to loop over a list containing both numbers (*int* and/or *float*) and *string*. Print the type of the input at each iteration.
The next chunk of code fills in an empty list with a sequence of integers.
```
list_1 = []
for i in range(10):
list_1.append(i)
print(list_1)
```
Produce the same result using a while loop instead (Hint: create a new list, call it *list_2*. Make sure to update the counter at each iteration of the loop).
| github_jupyter |
# Understanding PaccMann
```
%%capture
# import all the needed libraries
import numpy as np
import pandas as pd
import tempfile
from rdkit import Chem
from sklearn.model_selection import train_test_split
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
from IPython.display import SVG, display
from depiction.models.examples.paccmann import PaccMannSmiles, PaccMannCellLine
from depiction.models.examples.paccmann.smiles import (
get_smiles_language, smiles_attention_to_svg,
process_smiles, get_atoms
)
from depiction.core import Task, DataType
from depiction.interpreters.u_wash.u_washer import UWasher
cache_dir = tempfile.mkdtemp()
```
## Data
```
# Parse data from GDSC
# drugs
drugs = pd.read_csv(
'../data/paccmann/gdsc.smi', sep='\t',
index_col=1, header=None,
names=['smiles']
)
# cell lines
cell_lines = pd.read_csv('../data/paccmann/gdsc.csv.gz', index_col=1)
genes = cell_lines.columns[3:].tolist()
# sensitivity data
drug_sensitivity = pd.read_csv('../data/paccmann/gdsc_sensitivity.csv.gz', index_col=0)
# labels
class_names = ['Not Effective', 'Effective']
```
## Interpretability on the drug level for a cell line of interest
### LIME and Anchor
```
# pick a cell line
selected_cell_line = 'NCI-H1648'
# filter and prepare data
selected_drug_sensitivity = drug_sensitivity[
drug_sensitivity['cell_line'] == selected_cell_line
]
selected_drugs = drugs.reindex(selected_drug_sensitivity['drug']).dropna()
selected_drug_sensitivity = selected_drug_sensitivity.set_index('drug').reindex(
selected_drugs.index
).dropna()
# setup a classifier for the specific cell line
classifier = PaccMannSmiles(cell_lines.loc[selected_cell_line][genes].values, cache_dir=cache_dir)
# interpretablity methods
def interpret_smiles_with_lime(example):
explanation_configs = {
'labels': (1,),
}
interpreter_params = {
'class_names': class_names,
'split_expression': list,
'bow': False,
'char_level': True
}
explainer = UWasher('lime', classifier, **interpreter_params)
explainer.interpret(example, explanation_configs=explanation_configs)
def interpret_smiles_with_anchor(example):
explanation_configs = {
'use_proba': False,
'batch_size': 32,
}
interpreter_params = {
'class_names': class_names,
'nlp': get_smiles_language(),
'unk_token': '*',
'sep_token': '',
'use_unk_distribution': True
}
explainer = UWasher('anchors', classifier, **interpreter_params)
def predict_wrapper(samples):
return np.argmax(classifier.predict(samples), axis=1)
explainer.interpret(example, explanation_configs=explanation_configs, callback=predict_wrapper)
def interpret_smiles(interpreter, drug):
if interpreter == 'lime':
interpret_smiles_with_lime(drugs.loc[drug].item())
else:
interpret_smiles_with_anchor(drugs.loc[drug].item())
interact_manual(
interpret_smiles, interpreter=['lime', 'anchor'],
drug=drugs.index
);
```
### What about PaccMann's attention?
```
# pick a cell line
selected_cell_line = 'NCI-H1648'
# setup a classifier for the specific cell line
classifier = PaccMannSmiles(cell_lines.loc[selected_cell_line][genes].values, cache_dir=cache_dir)
def attention_smiles(drug):
try:
smiles = drugs.loc[drug].item()
molecule = Chem.MolFromSmiles(smiles)
atoms = get_atoms(smiles)
_ = classifier.predict([smiles])
smiles_attention = next(classifier.predictor.predictions)['smiles_attention'][0]
display(SVG(smiles_attention_to_svg(smiles_attention, atoms, molecule)))
except:
print('Structure visualization not supported')
interact(
attention_smiles,
drug=drugs.index
);
```
## Interpretability on the cell line level for a drug of interest
### LIME and Anchor
```
# pick a drug
selected_drug = 'Imatinib'
# filter and prepare data
selected_drug_sensitivity = drug_sensitivity[
drug_sensitivity['drug'] == selected_drug
]
selected_cell_lines = cell_lines.reindex(selected_drug_sensitivity['cell_line']).dropna()
selected_drug_sensitivity = selected_drug_sensitivity.set_index('cell_line').reindex(
selected_cell_lines.index
).dropna()
X_train, X_test, y_train, y_test = train_test_split(
selected_cell_lines[genes].values, selected_drug_sensitivity['effective'].values
)
X_test, X_valid, y_test, y_valid = train_test_split(
X_test, y_test
)
# setup a classifier for the specific drug
classifier = PaccMannCellLine(drugs.loc[selected_drug].item(), cache_dir=cache_dir)
# interpretablity methods
def interpret_cell_line_with_lime(example):
explanation_configs = {
'labels': (1,),
}
interpreter_params = {
'training_data': X_train,
'training_labels': y_train,
'feature_names': genes,
'class_names': class_names,
'discretize_continuous': False,
'sample_around_instance': True
}
explainer = UWasher('lime', classifier, **interpreter_params)
explainer.interpret(example, explanation_configs=explanation_configs)
def interpret_cell_line_with_anchor(example):
explanation_configs = {}
interpreter_params = {
'feature_names': genes,
'class_names': class_names,
'categorical_names': {}
}
explainer = UWasher('anchors', classifier, **interpreter_params)
explainer.explainer.fit(
X_train, y_train, X_valid, y_valid
)
def predict_wrapper(samples):
return np.argmax(classifier.predict(samples), axis=1)
explainer.interpret(example, explanation_configs=explanation_configs, callback=predict_wrapper)
def interpret_cell_line(interpreter, cell_line):
if interpreter == 'lime':
interpret_cell_line_with_lime(
cell_lines.loc[cell_line][genes].values
)
else:
interpret_cell_line_with_anchor(
cell_lines.loc[cell_line][genes].values
)
interact_manual(
interpret_cell_line, interpreter=['lime', 'anchor'],
cell_line=cell_lines.index
);
```
### What about PaccMann's attention?
```
# pick a drug
selected_drug = 'Imatinib'
classifier = PaccMannCellLine(drugs.loc[selected_drug].item(), cache_dir=cache_dir)
def attention_cell_line(cell_line, top_k=10):
try:
_ = classifier.predict([cell_lines.loc[cell_line][genes].values])
gene_attention = next(classifier.predictor.predictions)['gene_attention'][0]
pd.Series(dict(zip(genes, gene_attention))).sort_values(ascending=False)[:top_k].plot.bar()
except:
print('Cell line visualization not supported')
interact(
attention_cell_line, cell_line=cell_lines.index,
top_k=(1, 30, 1)
);
```
| github_jupyter |
# Charge Line
We'll be creating a 2D design and adding a single transmon qcomponent with a charge line.
Create a standard pocket transmon qubit with charge line for a ground plane,
with two pads connected by a junction.
```
# So, let us dive right in. For convenience, let's begin by enabling
# automatic reloading of modules when they change.
%load_ext autoreload
%autoreload 2
import qiskit_metal as metal
from qiskit_metal import designs, draw
from qiskit_metal import MetalGUI, Dict, open_docs
# Each time you create a new quantum circuit design,
# you start by instantiating a QDesign class.
# The design class `DesignPlanar` is best for 2D circuit designs.
design = designs.DesignPlanar()
#Launch Qiskit Metal GUI to interactively view, edit, and simulate QDesign: Metal GUI
gui = MetalGUI(design)
# To force overwrite a QComponent with an existing name.
# This is useful when re-running cells in a notebook.
design.overwrite_enabled = True
```
### A transmon qubit with charge line.
You can create a ready-made transmon qubit from the QComponent Library, `qiskit_metal.qlibrary.qubits`.
`transmon_pocket_cl.py` is the file containing our qubit so `transmon_pocket_cl` is the module we import.
The `TransmonPocketCL` class is our transmon qubit. Like all quantum components, `TransmonPocketCL` inherits from `QComponent`.
Connector lines can be added using the `connection_pads` dictionary.
Each connector pad has a name and a list of default properties.
A charge line can be added with options that start with "cl_" as shown in the next two cells.
```
from qiskit_metal.qlibrary.qubits.transmon_pocket_cl import TransmonPocketCL
# Be aware of the default_options that can be overridden by user.
TransmonPocketCL.get_template_options(design)
transmon_options_cl = dict(
pos_x = '1mm',
pos_y = '2mm',
orientation = '180',
connection_pads=dict(
a = dict(loc_W=+1, loc_H=-1, pad_width='70um', cpw_extend = '50um'),
b = dict(loc_W=-1, loc_H=-1, pad_width='125um', cpw_extend = '50um', pad_height='60um'),
c = dict(loc_W=+1, loc_H=+1, pad_width='110um', cpw_extend = '50um')
),
# What side of the pocket the charge line is.
# -180 to +180 from the 'west edge', will round to the nearest 90.
cl_pocket_edge = '90',
cl_off_center = '-40um',
gds_cell_name='FakeJunction_01',
)
# Create a new Transmon Pocket object with name 'Q1'
q1 = TransmonPocketCL(design, 'Q1', options=transmon_options_cl)
gui.rebuild() # rebuild the design and plot
gui.autoscale() # resize GUI to see QComponent
gui.zoom_on_components(['Q1']) #Can also gui.zoom_on_components([q1.name])
```
Let's see what the Q1 object looks like
```
q1 #print Q1 information
```
Save screenshot as a .png formatted file.
```
gui.screenshot()
# Screenshot the canvas only as a .png formatted file.
gui.figure.savefig('shot.png')
from IPython.display import Image, display
_disp_ops = dict(width=500)
display(Image('shot.png', **_disp_ops))
```
## Closing the Qiskit Metal GUI
```
gui.main_window.close()
```
| github_jupyter |
# dftdecompose - Illustrate the decomposition of the image in primitive 2-D waves
This demonstration illustrates the decomposition of a step function image into cossenoidal waves of increasing frequencies.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from numpy.fft import fft2
from numpy.fft import ifft2
import sys,os
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
f = 50 * np.ones((128,128))
f[:, : 32] = 200
f[:,64+32: ] = 200
plt.imshow(f,cmap='gray')
plt.title('Original image')
plt.colorbar()
plt.show()
```
- Demonstração da recontrução parcial cumulativa das "telhas" primitivas da imagem sintética acima. É exibida cada telha primitiva, fazendo a reconstrução da iDFT de apenas valores F(u,0) e F(-u,0) para u entre 0 e M-1.
```
H,W = f.shape
N = W;
rows = (W//2)//(2/2)+1
plt.figure(figsize=[4,rows*2])
#1) Encontre a F = DFT(f) - Transformada Discreta de Fourier;
F = fft2(f)
E = ia.dftview(F)
ia.adshow(E, title='DFT')
#2) Crie um Faux zerada de mesmo tipo e shape de F. Neste Faux, primeiro coloque o Faux[0,0] = F[0,0] e calcule a inversa de Faux.
Faux = np.zeros_like(F)
Faux[0,0] = F[0,0]
plt.subplot(rows,2,1)
plt.imshow(np.real(ifft2(Faux)), cmap='gray');
plt.title("DFT inverse (u=0)")
Fsma = np.zeros_like(F)
Fsma = Fsma + Faux
plt.subplot(rows,2,2)
plt.imshow(np.real(ifft2(Fsma)),cmap='gray')
plt.title("Acumulative (u=%s)" % 0)
#3) repita com u variando de 1 a N/2: copie também F[0,u] e F[0,-u] e calcule a inversa. Lembrar que -u = N-u, pois F é periódica.
# Desta forma você vai estar mostrando a reconstrução gradativa da imagem, acrescentando cada vez mais cossenoides.
# Eu estou pedindo também para mostrar as cossenoides individuais que serão somadas gradativamente.
row_count = 2;
for u in range(1,N//2):
Faux = np.zeros_like(F)
Faux[:,u] = F[:,u]
Faux[:,N-u] = F[:,N-u] #-u = N-u
row_count = row_count + 1;
plt.subplot(rows,2,row_count)
plt.imshow(np.real(ifft2(Faux)), cmap='gray');
plt.title("DFT inverse (u=%s)" % u)
#print('\nFaux: \n', Faux)
row_count = row_count + 1;
Fsma = Fsma + Faux
plt.subplot(rows,2,row_count)
plt.imshow(np.real(ifft2(Fsma)),cmap='gray')
plt.title("Acumulative (u=%s)" % u)
#print('\nFsma: \n', Fsma)
plt.tight_layout()
plt.show()
diff = np.abs(np.abs(ifft2(Fsma)) - f).sum() # compare the orignal and acumlated image
print('Difference between original image and reconstructed: ', diff, " (almost zero)")
```
## Contributions
Lucas de Vasconcellos Teixeira, 1st semester 2017
| github_jupyter |
> Texto fornecido sob a Creative Commons Attribution license, CC-BY. Todo o código está disponível sob a FSF-approved BSD-3 license.<br>
> (c) Original por Lorena A. Barba, Gilbert F. Forsyth em 2017, traduzido por Felipe N. Schuch em 2020.<br>
> [@LorenaABarba](https://twitter.com/LorenaABarba) - [@fschuch](https://twitter.com/fschuch)
12 passos para Navier-Stokes
======
***
Essa tarefa é um complemento para as aulas do primeiro módulo interativo online [CFD com Python](https://github.com/fschuch/CFDPython-BR), por Prof. Lorena A. Barba, denominado **12 Passos para Navier-Stokes**. Esse notebook foi escrito pelo estudante de graduação Gilbert Forsyth.
Operações com arranjos em NumPy
----------------
Para aplicações computacionais mais intensivas, o uso das funções embutidas em NumPy podem fornecer um aumento na velocidade de execução de muitas vezes. Como um exemplo simples, considere a seguinte equação:
$$
u^{n+1}_i = u^n_i-u^n_{i-1}
$$
Agora, para um dado vetor $u^n = [0, 1, 2, 3, 4, 5]\ \ $, nós podemos calcular o valor de $u^{n+1}$ ao iterar sobre os valores de $u^{n+1}$ com um laço `for`.
```
import numpy
u = numpy.array((0, 1, 2, 3, 4, 5))
for i in range(1, len(u)):
print(u[i] - u[i-1])
```
Esse é o resultado esperado, e o tempo de execução foi praticamente instantâneo. Se efetuamos o mesmo procedimento como uma operação de arranjos, em vez de calcularmos $u^n_i-u^n_{i-1}$ separadamente por 5 vezes, podemos fatiar o arranjo $u$ e calcular cada operação com uma linha de comando:
```
u[1:] - u[0:-1]
```
O que esse comando diz é para subtrair o 0.º, 1.º, 2.º, 3.º, 4.º e 5.º elementos de $u$ do 1.º, 2.º, 3.º, 4.º, 5.º e 6.º elementos de $u$.
### Aumento de Velocidade
Para o arranjo de 6 elementos, o benefício da operação de arranjos é bastante pequena. Não haverá diferença significativa no tempo de execução porque existem apenas algumas poucas operações ocorrento. Mas se revisitarmos a equação de convecção linear 2D, podemos ver um ganho de velocidade substancial.
```
nx = 81
ny = 81
nt = 100
c = 1
dx = 2 / (nx - 1)
dy = 2 / (ny - 1)
sigma = .2
dt = sigma * dx
x = numpy.linspace(0, 2, nx)
y = numpy.linspace(0, 2, ny)
u = numpy.ones((nx, ny)) ##Cria um vetor ny x nx com 1
un = numpy.ones((nx, ny))
###Assinala a condição Inicial
u[int(.5 / dx):int(1 / dx + 1), int(.5 / dy): int(1 / dy + 1)] = 2
```
Com nossa condição inicial definida, vamos primeiro executar a forma original de dois laços `for` aninhados, fazendo uso da função "mágica" do Notebook `%%timeit`, a qual vai nos ajudar a mensurar a performace do nosso código.
**Nota:** A função mágica `%%timeit` vai executar o código diversas vezes e nos fornecer como resultado o tempo médio de execução. Se tivermos uma figura sendo produzida dentro da célula onde executamos `%%timeit`, ela será executada repetitivamente, o que pode causar uma leve confusão.
O tempo de execução abaixo vai variar de máquina para máquina. Não espere que seus tempos correspondam e esses, mas você _deve_ verificar a mesma tendência geral de diminuição no tempo de execução conforme mudamos para operações de arranjos.
```
%%timeit
u = numpy.ones((nx, ny))
u[int(.5 / dx):int(1 / dx + 1), int(.5 / dy): int(1 / dy + 1)] = 2
for n in range(nt + 1): ##Laço sobre o número de passos de tempo
un = u.copy()
row, col = u.shape
for i in range(1, row):
for j in range(1, col):
u[i, j] = (un[i, j] - (c * dt / dx *
(un[i, j] - un[i - 1, j])) -
(c * dt / dy *
(un[i, j] - un[i, j - 1])))
u[0, :] = 1
u[-1, :] = 1
u[:, 0] = 1
u[:, -1] = 1
```
Com o código "bruto" acima, observe a média de execução atingida. Tenha em mente que com esses três laços aninhados, que tudo declarado dentro do laço **j** está sendo executado mais de 650.000 vezes. Vamos comparar isso com o desempenho do mesmo código implementado com operações de arranjos:
```
%%timeit
u = numpy.ones((ny, nx))
u[int(.5 / dx):int(1 / dx + 1), int(.5 / dy): int(1 / dy + 1)] = 2
for n in range(nt + 1): ##Laço sobre o número de passos
un = u.copy()
u[1:, 1:] = (un[1:, 1:] - (c * dt / dx * (un[1:, 1:] - un[0:-1, 1])) -
(c * dt / dy * (un[1:, 1:] - un[1, 0:-1])))
u[0, :] = 1
u[-1, :] = 1
u[:, 0] = 1
u[:, -1] = 1
```
Como você pode ver, o tempo de execução caiu substancialmente. Pode não ter parecido muito tanto nesse exemplo, mas tenha em mente que o ganho de velocidade vai crescer exponencialmente com o tamanho e complexidade do problema que está sendo resolvido.
```
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
> A célula acima executa o estilo para esse notebook. Nós modificamos o estilo encontrado no GitHub de [CamDavidsonPilon](https://github.com/CamDavidsonPilon), [@Cmrn_DP](https://twitter.com/cmrn_dp).
| github_jupyter |
# Sympy: Symbolic Mathematics in Python
SymPy is a Python library for symbolic mathematics. It aims to be an alternative to systems such as Mathematica and Wolfram Walpha while keeping the code as simple as possible and easily extensible. SymPy is written entire in Python and does not require any external libraries.
SymPy documentation and packages for installation can be found at https://www.sympy.org/. Much of this material is drawn from the SciPy lecture notes, found here: https://www.scipy-lectures.org/packages/sympy.html.
```
import sympy as sym
```
SymPy allows for control of the display of the output. From here we can use the following setting for fancy printing:
```
sym.init_printing()
```
## First Steps
SymPy defines three numerical types: `Real`, `Rational` and `Integer`.
The Rational class represents a rational number as a pair of two Integers: the numerator and the denominator, so `Rational(1, 2)` represents $\frac{1}{2}$, `Rational(5, 2)` represents $\frac{5}{2}$ and so on.
```
a = sym.Rational(1,2)
a
a*2
```
SymPy uses `mpmath` in the background, which makes it possible to perform computations using arbitrary-precision arithmetic. That way, some constants such as $e$, $\pi$, $\inf$, are treated as symbols and can be evaluated with arbitrary precision:
```
sym.pi**2
sym.pi.evalf()
(sym.pi + sym.exp(1)).evalf()
```
`evalf` evaluates the expression to a floating-point number. This can be potentially up to $n$ precision, as needed:
```
sym.pi.evalf(50)
```
## Symbols
In constrast to many other Computer Algebra systems, in SymPy you have to declare symbolic variables explicitly:
```
x = sym.Symbol("x")
y = sym.Symbol("y")
```
Then you can manipulate them:
```
x + y + x - y
(x + y)**2
```
Multiple symbols can be defined at once, as:
```
x, y, z = sym.symbols("x y z")
x
```
## Basic Operations
Here we show some of the most basic operations needed for expression manipulation in SymPy.
### Substitution
One of the most common things you might want to do with a mathematical expression is *substitution*. Substitution replaces all instances of something in an expression with something else. It is achieved using the `subs` method.
```
expr = sym.cos(x) + 1
expr.subs(x, y)
```
Substitution is usually done for one of two reasons:
1. Evaluating an expression at a point. For instance, if our expression is $\cos(x)+1$ and we want to evaluate it at the point $x=0$, such that we get $\cos(0)+1=2$.
```
expr.subs(x, 0)
```
2. Replacing a subexpression with another subexpression. There are a number of reasons why we want to do this. The first is that if we are trying to build an expression with symmetry, such as x^x^x.
```
expr = x ** y
expr
expr = expr.subs(y, x**y)
expr
expr = expr.subs(y, x**x)
expr
```
Another case is if we want to perform controlled simplification, or a simplification that SymPy is otherwise unable to do. For example, take $\sin(2x)+\cos(2x)$, and we want to replace $\sin(2x)$ with $2\sin(x)\cos(x)$. The function `expand_trig()` achieves this, but will also expand $\cos(2x)$, which we may not want. One of the easiest ways to prevent this is a manual substitution:
```
expr = sym.sin(2*x) + sym.cos(2*x)
sym.expand_trig(expr)
expr.subs(sym.sin(2*x), 2*sym.sin(x)*sym.cos(x))
```
### Converting Strings to SymPy Expressions
The function `sympify` can be used to convert Python strings into SymPy expressions:
```
str_expr = "x**2 + 3*x - 1/2"
sym.sympify(str_expr)
```
**WARNING**: `sympify` uses `eval`. Does use it on unsanitized input.
### lambdify
`subs` and `evalf` are good if you wish to do simple evaluation, but if you intend to evaluate an expression at many points, there are more efficient ways. For example, if you wanted to evaluate an expression with 1000 points, using SymPy would be far slower than it needs to be, especially if you care about machine precision. Instead, use libraries like `NumPy` and `SciPy`.
Alternatively, a SymPy expression can be converted to be numerically evaluated using `lambdify`. This acts as a lambda function, except it converts the SymPy names to the names of the given numerical library, usually NumPy.
```
import numpy as np
a = np.arange(10)
expr = sym.sin(x)
f = sym.lambdify(x, expr, "numpy")
f(a)
```
## Algebraic Manipulations
SymPy is capable of performing powerful algebraic manipulations. We'll take a look into some of the most frequently used: expand and simplify.
When we expand an algebraic expression, SymPy will try to denest powers and multiplications:
```
sym.expand((x + y) ** 3)
```
Further options can be given in form on keywords:
```
sym.expand(x + y, complex=True)
sym.expand(sym.cos(x + y), trig=True)
```
We can use simplify to transform an expression into a *simpler* form:
```
sym.simplify((x + x * y) / x)
sym.simplify(sym.gamma(x) / sym.gamma(x - 2))
```
Where `sym.gamma(x)` is $\Gamma(x)$, the gamma function.
`collect()` collects common powers of a term in an expression, for example:
```
expr = x*y + x - 3 + 2*x**2 - z*x**2 + x**3
expr
sym.collect(expr, x)
```
### Power Simplifications
Before we introduce the power simplification functions, we will cover the basic mathematical identities held by powers. There are three kinds of identities satisfied by exponents:
1. $x^ax^b=x^{a+b}$
2. $x^ay^a=(xy)^a$
3. $(x^a)^b=x^{ab}$
Identity 1 is always true. Identities 2 and 3 are not always true, with specific examples not covered in this material.
```
a, b = sym.symbols("a b", real=True)
x, y = sym.symbols("x y", positive=True)
sym.powsimp(x**a*x**b)
sym.powsimp(x**a*y**a)
```
### Exponential and logarithms
Logarithms have similar issues as powers, there are two main identities:
1. $\log(xy)=\log(x) + \log(y)$
2. $\log(x^n)=n\log(x)$
Neither identity is true for arbitrary complex $x$ and $y$, due to the branch cut in the complex plane for the complex algorithm. The identities hold if $x$ and $y$ are positive and $n$ is real.
```
n = sym.Symbol("n", real=True)
```
Note that identity:
$$
\log\left(\frac{x}{y}\right)=\log(x)-\log(y)
$$
is a special case of identities 1 and 2, and holds if $x$ and $y$ are positive. In addition,
$$
\log(e^x)=x \\
\log(e^x)=x\log(e)=x
$$
and holds when $x$ is real.
```
sym.expand_log(sym.log(x*y))
sym.expand_log(sym.log(x/y))
```
### Special Functions
```
sym.factorial(n)
sym.binomial(n, k)
sym.gamma(z)
```
## Calculus
Now we get into the juicy stuff! SymPy can perform a host of impressive calculus operations:
### Limits
Limits are easy to use in SymPy, they follow the syntax `limit(function, variable, point)`, so to compute the limit of $f(x)$ as $x \to 0$, you would issue `limit(f, x, 0)`:
```
sym.limit(sym.sin(x) / x, x, 0)
```
We can also calculate the limit at infinity:
$$
\lim_{x \to \infty} x
$$
```
sym.limit(x, x, sym.oo)
```
$$
\lim_{x \to \infty} \frac{1}{x}
$$
```
sym.limit(1 / x, x, sym.oo)
```
$$
\lim_{x \to 0} x^x
$$
```
sym.limit(x**x, x, 0)
```
Limit as an unevaluated counterpart, `Limit`:
```
expr = sym.Limit((sym.cos(x) - 1)/x, x, 0)
expr
expr.doit()
```
### Differentiation
You can differentiate any SymPy expression using `diff(func, var)`, for example:
$$
\frac{\text{d}\cos(x)}{\text{d}x}=-\sin(x)
$$
```
sym.diff(sym.cos(x), x)
```
$$
\frac{\text{d}\sin(2x)}{\text{d}x}=2\cos(2x)
$$
```
sym.diff(sym.sin(2*x), x)
sym.diff(sym.tan(x), x)
```
Higher derivatives can be calculated by adding the parameter $n$ to the method, as follows:
```
sym.diff(sym.sin(2*x), x, 2)
sym.diff(sym.sin(2*x), x, 3)
```
`diff` can also take multiple derivatives at once. To take multiple derivatives, pass the variable as many times as you wish to differentiate, or pass a number:
```
sym.diff(x**4, x, x, x)
```
You can also take derivatives with respect to many variables at once. Just pass each derivative in order, using the same single as for single variable derivatives:
```
expr = sym.exp(x*y*z)
expr
sym.diff(expr, x, y, y, z, z, x)
```
To create an unevaluated derivative, use the `Derivative` class. It has the same syntax as `diff()`:
```
deriv = sym.Derivative(expr, x, y, y, z, z, x)
deriv
deriv.doit()
```
### Series Expansion
SymPy also knows how to compute the Taylor series of an expression at a point. Use `series(expr, var)`, like so:
```
sym.series(sym.cos(x), x)
sym.series(1 / sym.cos(x), x)
expr = sym.exp(sym.sin(x))
expr.series(x, 0, 4)
```
### Integration
SymPy has support for indefinite and definite integration of transcendental elementary and special functions via `integrate()`, which uses the powerful extended Risch-Norman algorithm, some heuristics and pattern matching. You can integrate elementary functions:
$$
\int_{-\infty}^{\infty} 6x^5 dx
$$
```
sym.integrate(6 * x**5, x)
```
$$
\int_{-\infty}^{\infty} \sin(x) dx
$$
```
sym.integrate(sym.sin(x), x)
```
$$
\int_{-\infty}^{\infty} \log(x) dx
$$
```
sym.integrate(sym.log(x), x)
```
$$
\int_{-\infty}^{\infty} 2x + \sinh(x)
$$
```
sym.integrate(2*x + sym.sinh(x), x)
```
Also special functions are handled rather nicely:
$$
\int_{-\infty}^{\infty} e^{x^2} \text{erf}(x)
$$
```
sym.integrate(sym.exp(-x**2) * sym.erf(x), x)
```
It is possible to compute definite integrals assuming that the lower and upper bounds of an interval are provided:
$$
\int_{-1}^1 x^3 dx
$$
```
sym.integrate(x**3, (x, -1, 1))
```
$$
\int_0^{\pi/2} \sin(x) dx
$$
```
sym.integrate(sym.sin(x), (x, 0, sym.pi / 2))
```
$$
\int_{-\pi/2}^{\pi/2} \cos(x) dx
$$
```
sym.integrate(sym.cos(x), (x, -sym.pi / 2, sym.pi / 2))
```
Definite integrals also support improper integrals:
$$
\int_0^{\infty} e^{-x} dx
$$
```
sym.integrate(sym.exp(-x), (x, 0, sym.oo))b
```
As with `Derivative`, you can create an unevaluated integral using `Integral`. To later evaluate this integral, call `doit()`:
```
expr = sym.Integral(sym.log(x)**2, x)
expr
expr.doit()
y = sym.symbols("y")
integ = sym.Integral(x**y*sym.exp(-x), (x, 0, sym.oo))
integ.doit()
```
### Finite Differences
So far we have looked at expressions with analytic derivatives and primitive functions respectively. But what if we want to have an expression to estimate a derivative of a curve for which we lack a closed form representation, or for which we don't know the functional values for yet. One approach is using a *finite difference* approach.
```
f, g = sym.symbols("f g", cls=sym.Function)
sym.differentiate_finite(f(x)*g(x))
```
If we want to expand the intermediate derivative we can pass the flag `evaluate=True`:
```
sym.differentiate_finite(f(x)*g(x), evaluate=True)
```
This method can be applied to `Derivative` instances as such:
```
f = sym.Function("f")
dfdx = f(x).diff(x)
dfdx.as_finite_difference()
h = sym.Symbol("h")
d2fdx2 = f(x).diff(x, 2)
d2fdx2.as_finite_difference([-3*h, -h, 2*h])
```
## Equation solving
SymPy is able to solve algebraic equations, in one and several variables using the `solveset()` function:
```
sym.solveset(x**4 - 1,b x)
```
As you can see it takes as a first argument an expression that is supposed to be equal to $0$. It also has limited support for transcendental equations:
```
sym.solveset(sym.exp(x) + 1, x)
```
## Systems of linear equations
SymPy is able to solve a large part of polynomial equations, and is also capable of solving multiple equations with respect to multiple variables, by providing a tuple as a secondary argument. To do this we use the `solve()` command:
```
(x + 5 * y - 2, -3 * x + 6 * y - 15)
sol = sym.solve((x + 5 * y - 2, -3 * x + 6 * y - 15), (x, y))
sol
```
Another alternative in the case of polynomial equations is _factor_. Factor returns the polynomial factorized into irreducible terms, and is capable of computing the factorization over various domains:
```
f = x ** 4 - 3 * x ** 2 + 1
sym.factor(f)
sym.factor(f, modulus=5)
```
SymPy is also able to solve boolean equations, that is, to decide if a certain boolean equation is satisfiable or not. For this, we have the function `satisfiable()`:
```
sym.satisfiable(x & ~x)
```
## Linear Algebra
Now we've covered most of the basics, we probably want to scale the concepts mentioned previously up to *vectors* and *matrices*:
## Matrices
Matrices are created as instances from a `Matrix` class:
```
sym.Matrix([[1, 0], [0, 1]])
```
Unlike a NumPy array, you can also put Symbols in it:
```
x, y = sym.symbols("x, y")
A = sym.Matrix([[1, x], [y, 1]])
A
A**2
```
SymPy also allows different manners of matrix creation, such as a list of values with the dimensional inputs separately:
```
sym.Matrix(2, 3, [1, 2, 3, 4, 5, 6])
```
More interestingly, we can use a 2-variable function or `lambda` to make one. Here we create an indicator function which is 1 on the diagonal and then use it to make the *identity matrix*:
```
def f(i, j):
if i == j:
return 1
else:
return 0
sym.Matrix(4,4,f)
sym.Matrix(3, 4, lambda i,j: 1 - (i+j) % 2)
```
There are a number of built-in special constructors for quick matrix construction - such as `eye`, `zeros`, `ones` and `diag`:
```
sym.eye(3)
sym.zeros(2)
```
### Differential Equations
SymPy is capable of solving some Ordinary Differential. To solve differential equations, SymPy uses a function called `dsolve()`. First we create an undefined function by passed `cls=Function` to the symbols function:
```
f,g = sym.symbols("f g", cls=sym.Function)
f(x)
```
$f$ and $g$ are now undefined functions. We can call $f(x)$, and it will represent an unknown function.
```
f(x).diff(x, x) + f(x)
sym.dsolve(f(x).diff(x, x) + f(x), f(x))
```
Keywords arguments can be given to this function in order to help to find the best possible reso0lution system. For example, if you know that we are dealing with separable equations, you can use keyword `hint='separable'` to force `dsolve` to resolve it as such:
```
sym.dsolve(sym.sin(x) * sym.cos(f(x)) + sym.cos(x) * sym.sin(f(x)) * f(x).diff(x),
f(x), hint="separable")
h = x*f(x).diff(x) + f(x)-f(x)**2
sym.dsolve(h, f(x))
```
We've barely scratched the surface of SymPy, please see the documentation and the main GitHub repository here for more information: https://github.com/sympy/sympy/wiki/Tutorial.
## Tasks
### Task 1
| github_jupyter |
# Gradient Checking
Welcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking.
You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker.
But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".
Let's do it!
```
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector,
vector_to_dictionary, gradients_to_vector
```
## 1) How does gradient checking work?
Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.
Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$.
Let's look back at the definition of a derivative (or gradient):
$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small."
We know the following:
- $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly.
- You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct.
Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct!
## 2) 1-dimensional gradient checking
Consider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input.
You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct.
<img src="images/1Dgrad_kiank.png" style="width:600px;height:250px;">
<caption><center> <u> **Figure 1** </u>: **1D linear model**<br> </center></caption>
The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation").
**Exercise**: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions.
```
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented
in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
### START CODE HERE ### (approx. 1 line)
J = np.dot(theta, x)
### END CODE HERE ###
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
```
**Expected Output**:
<table style=>
<tr>
<td> ** J ** </td>
<td> 8</td>
</tr>
</table>
**Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$.
```
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
```
**Expected Output**:
<table>
<tr>
<td> ** dtheta ** </td>
<td> 2 </td>
</tr>
</table>
**Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking.
**Instructions**:
- First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow:
1. $\theta^{+} = \theta + \varepsilon$
2. $\theta^{-} = \theta - \varepsilon$
3. $J^{+} = J(\theta^{+})$
4. $J^{-} = J(\theta^{-})$
5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$
- Then compute the gradient using backward propagation, and store the result in a variable "grad"
- Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula:
$$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$
You will need 3 Steps to compute this formula:
- 1'. compute the numerator using np.linalg.norm(...)
- 2'. compute the denominator. You will need to call np.linalg.norm(...) twice.
- 3'. divide them.
- If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.
```
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon = 1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated
radient with formula(1)
Returns:
difference -- difference (2) between the approximated
gradient and the backward propagation gradient
"""
# Compute gradapprox using left side of formula (1).
#epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = forward_propagation(x, thetaplus) # Step 3
J_minus = forward_propagation(x, thetaminus) # Step 4
gradapprox = (J_plus - J_minus)/ (2*epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output
#of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = backward_propagation(x, theta)
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = np.divide(numerator, denominator) # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print ("The gradient is correct!")
else:
print ("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
```
**Expected Output**:
The gradient is correct!
<table>
<tr>
<td> ** difference ** </td>
<td> 2.9193358103083e-10 </td>
</tr>
</table>
Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`.
Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it!
## 3) N-dimensional gradient checking
The following figure describes the forward and backward propagation of your fraud detection model.
<img src="images/NDgrad_kiank.png" style="width:600px;height:400px;">
<caption><center> <u> **Figure 2** </u>: **deep neural network**<br>*LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*</center></caption>
Let's look at your implementations for forward propagation and backward propagation.
```
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1",
"W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) +
np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
```
Now, run backward propagation.
```
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect
to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T) * 2
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
```
You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct.
**How does gradient checking work?**.
As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still:
$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.
The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary.
<img src="images/dictionary_to_vector.png" style="width:600px;height:400px;">
<caption><center> <u> **Figure 2** </u>: **dictionary_to_vector() and vector_to_dictionary()**<br> You will need these functions in gradient_check_n()</center></caption>
We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that.
**Exercise**: Implement gradient_check_n().
**Instructions**: Here is pseudo-code that will help you implement the gradient check.
For each i in num_parameters:
- To compute `J_plus[i]`:
1. Set $\theta^{+}$ to `np.copy(parameters_values)`
2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$
3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`.
- To compute `J_minus[i]`: do the same thing with $\theta^{-}$
- Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$
Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute:
$$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$
```
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient
of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of
the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient
with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and
the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs t
#wo parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus))# Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i])/ (2*epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator/denominator # Step 3'
### END CODE HERE ###
if difference > 2e-7:
print ("\033[93m" + "There is a mistake in the backward propagation!
difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine!
difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
```
**Expected output**:
<table>
<tr>
<td> ** There is a mistake in the backward propagation!** </td>
<td> difference = 0.285093156781 </td>
</tr>
</table>
It seems that there were errors in the `backward_propagation_n` code we gave you! Good that you've implemented the gradient check. Go back to `backward_propagation` and try to find/correct the errors *(Hint: check dW2 and db1)*. Rerun the gradient check when you think you've fixed it. Remember you'll need to re-execute the cell defining `backward_propagation_n()` if you modify the code.
Can you get gradient check to declare your derivative computation correct? Even though this part of the assignment isn't graded, we strongly urge you to try to find the bug and re-run gradient check until you're convinced backprop is now correctly implemented.
**Note**
- Gradient Checking is slow! Approximating the gradient with $\frac{\partial J}{\partial \theta} \approx \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon}$ is computationally costly. For this reason, we don't run gradient checking at every iteration during training. Just a few times to check if the gradient is correct.
- Gradient Checking, at least as we've presented it, doesn't work with dropout. You would usually run the gradient check algorithm without dropout to make sure your backprop is correct, then add dropout.
Congrats, you can be confident that your deep learning model for fraud detection is working correctly! You can even use this to convince your CEO. :)
<font color='blue'>
**What you should remember from this notebook**:
- Gradient checking verifies closeness between the gradients from backpropagation and the numerical approximation of the gradient (computed using forward propagation).
- Gradient checking is slow, so we don't run it in every iteration of training. You would usually run it only to make sure your code is correct, then turn it off and use backprop for the actual learning process.
| github_jupyter |
<a href="https://colab.research.google.com/github/https-deeplearning-ai/tensorflow-1-public/blob/adding_C3/C3/W1/assignment/C3_W1_Assignment_Solution.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
**Note:** This notebook can run using TensorFlow 2.5.0
```
#!pip install tensorflow==2.5.0
# bbc-text.csv
!gdown --id 1rX10xeI3eUJmOLsc4pOPY6AnCLO8DxNj
import csv
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
#Stopwords list from https://github.com/Yoast/YoastSEO.js/blob/develop/src/config/stopwords.js
# Convert it to a Python list and paste it here
# stopwords = #YOUR CODE HERE
stopwords = [ "a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "could", "did", "do", "does", "doing", "down", "during", "each", "few", "for", "from", "further", "had", "has", "have", "having", "he", "he'd", "he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself", "his", "how", "how's", "i", "i'd", "i'll", "i'm", "i've", "if", "in", "into", "is", "it", "it's", "its", "itself", "let's", "me", "more", "most", "my", "myself", "nor", "of", "on", "once", "only", "or", "other", "ought", "our", "ours", "ourselves", "out", "over", "own", "same", "she", "she'd", "she'll", "she's", "should", "so", "some", "such", "than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then", "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "this", "those", "through", "to", "too", "under", "until", "up", "very", "was", "we", "we'd", "we'll", "we're", "we've", "were", "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom", "why", "why's", "with", "would", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves" ]
sentences = []
labels = []
with open("./bbc-text.csv", 'r') as csvfile:
### START CODE HERE
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
labels.append(row[0])
sentence = row[1]
for word in stopwords:
token = " " + word + " "
sentence = sentence.replace(token, " ")
sentence = sentence.replace(" ", " ")
sentences.append(sentence)
### END CODE HERE
print(len(sentences))
print(sentences[0])
#Expected output
# 2225
# tv future hands viewers home theatre systems plasma high-definition tvs digital video recorders moving living room way people watch tv will radically different five years time. according expert panel gathered annual consumer electronics show las vegas discuss new technologies will impact one favourite pastimes. us leading trend programmes content will delivered viewers via home networks cable satellite telecoms companies broadband service providers front rooms portable devices. one talked-about technologies ces digital personal video recorders (dvr pvr). set-top boxes like us s tivo uk s sky+ system allow people record store play pause forward wind tv programmes want. essentially technology allows much personalised tv. also built-in high-definition tv sets big business japan us slower take off europe lack high-definition programming. not can people forward wind adverts can also forget abiding network channel schedules putting together a-la-carte entertainment. us networks cable satellite companies worried means terms advertising revenues well brand identity viewer loyalty channels. although us leads technology moment also concern raised europe particularly growing uptake services like sky+. happens today will see nine months years time uk adam hume bbc broadcast s futurologist told bbc news website. likes bbc no issues lost advertising revenue yet. pressing issue moment commercial uk broadcasters brand loyalty important everyone. will talking content brands rather network brands said tim hanlon brand communications firm starcom mediavest. reality broadband connections anybody can producer content. added: challenge now hard promote programme much choice. means said stacey jolna senior vice president tv guide tv group way people find content want watch simplified tv viewers. means networks us terms channels take leaf google s book search engine future instead scheduler help people find want watch. kind channel model might work younger ipod generation used taking control gadgets play them. might not suit everyone panel recognised. older generations comfortable familiar schedules channel brands know getting. perhaps not want much choice put hands mr hanlon suggested. end kids just diapers pushing buttons already - everything possible available said mr hanlon. ultimately consumer will tell market want. 50 000 new gadgets technologies showcased ces many enhancing tv-watching experience. high-definition tv sets everywhere many new models lcd (liquid crystal display) tvs launched dvr capability built instead external boxes. one example launched show humax s 26-inch lcd tv 80-hour tivo dvr dvd recorder. one us s biggest satellite tv companies directtv even launched branded dvr show 100-hours recording capability instant replay search function. set can pause rewind tv 90 hours. microsoft chief bill gates announced pre-show keynote speech partnership tivo called tivotogo means people can play recorded programmes windows pcs mobile devices. reflect increasing trend freeing multimedia people can watch want want.
tokenizer = Tokenizer(oov_token="<OOV>") ### YOUR CODE HERE
tokenizer.fit_on_texts(sentences) ### YOUR CODE HERE)
word_index = tokenizer.word_index ### YOUR CODE HERE
print(len(word_index)) ### YOUR CODE HERE)
# Expected output
# 29714
sequences = tokenizer.texts_to_sequences(sentences) ### YOUR CODE HERE
padded = pad_sequences(sequences, padding='post') ### YOUR CODE HERE
print(padded[0])
print(padded.shape)
# Expected output
# [ 96 176 1158 ... 0 0 0]
# (2225, 2442)
### START CODE HERE
label_tokenizer = Tokenizer()
label_tokenizer.fit_on_texts(labels)
label_word_index = label_tokenizer.word_index
label_seq = label_tokenizer.texts_to_sequences(labels)
### END CODE HERE
print(label_seq)
print(label_word_index)
# Expected Output
# [[4], [2], [1], [1], [5], [3], [3], [1], [1], [5], [5], [2], [2], [3], [1], [2], [3], [1], [2], [4], [4], [4], [1], [1], [4], [1], [5], [4], [3], [5], [3], [4], [5], [5], [2], [3], [4], [5], [3], [2], [3], [1], [2], [1], [4], [5], [3], [3], [3], [2], [1], [3], [2], [2], [1], [3], [2], [1], [1], [2], [2], [1], [2], [1], [2], [4], [2], [5], [4], [2], [3], [2], [3], [1], [2], [4], [2], [1], [1], [2], [2], [1], [3], [2], [5], [3], [3], [2], [5], [2], [1], [1], [3], [1], [3], [1], [2], [1], [2], [5], [5], [1], [2], [3], [3], [4], [1], [5], [1], [4], [2], [5], [1], [5], [1], [5], [5], [3], [1], [1], [5], [3], [2], [4], [2], [2], [4], [1], [3], [1], [4], [5], [1], [2], [2], [4], [5], [4], [1], [2], [2], [2], [4], [1], [4], [2], [1], [5], [1], [4], [1], [4], [3], [2], [4], [5], [1], [2], [3], [2], [5], [3], [3], [5], [3], [2], [5], [3], [3], [5], [3], [1], [2], [3], [3], [2], [5], [1], [2], [2], [1], [4], [1], [4], [4], [1], [2], [1], [3], [5], [3], [2], [3], [2], [4], [3], [5], [3], [4], [2], [1], [2], [1], [4], [5], [2], [3], [3], [5], [1], [5], [3], [1], [5], [1], [1], [5], [1], [3], [3], [5], [4], [1], [3], [2], [5], [4], [1], [4], [1], [5], [3], [1], [5], [4], [2], [4], [2], [2], [4], [2], [1], [2], [1], [2], [1], [5], [2], [2], [5], [1], [1], [3], [4], [3], [3], [3], [4], [1], [4], [3], [2], [4], [5], [4], [1], [1], [2], [2], [3], [2], [4], [1], [5], [1], [3], [4], [5], [2], [1], [5], [1], [4], [3], [4], [2], [2], [3], [3], [1], [2], [4], [5], [3], [4], [2], [5], [1], [5], [1], [5], [3], [2], [1], [2], [1], [1], [5], [1], [3], [3], [2], [5], [4], [2], [1], [2], [5], [2], [2], [2], [3], [2], [3], [5], [5], [2], [1], [2], [3], [2], [4], [5], [2], [1], [1], [5], [2], [2], [3], [4], [5], [4], [3], [2], [1], [3], [2], [5], [4], [5], [4], [3], [1], [5], [2], [3], [2], [2], [3], [1], [4], [2], [2], [5], [5], [4], [1], [2], [5], [4], [4], [5], [5], [5], [3], [1], [3], [4], [2], [5], [3], [2], [5], [3], [3], [1], [1], [2], [3], [5], [2], [1], [2], [2], [1], [2], [3], [3], [3], [1], [4], [4], [2], [4], [1], [5], [2], [3], [2], [5], [2], [3], [5], [3], [2], [4], [2], [1], [1], [2], [1], [1], [5], [1], [1], [1], [4], [2], [2], [2], [3], [1], [1], [2], [4], [2], [3], [1], [3], [4], [2], [1], [5], [2], [3], [4], [2], [1], [2], [3], [2], [2], [1], [5], [4], [3], [4], [2], [1], [2], [5], [4], [4], [2], [1], [1], [5], [3], [3], [3], [1], [3], [4], [4], [5], [3], [4], [5], [2], [1], [1], [4], [2], [1], [1], [3], [1], [1], [2], [1], [5], [4], [3], [1], [3], [4], [2], [2], [2], [4], [2], [2], [1], [1], [1], [1], [2], [4], [5], [1], [1], [4], [2], [4], [5], [3], [1], [2], [3], [2], [4], [4], [3], [4], [2], [1], [2], [5], [1], [3], [5], [1], [1], [3], [4], [5], [4], [1], [3], [2], [5], [3], [2], [5], [1], [1], [4], [3], [5], [3], [5], [3], [4], [3], [5], [1], [2], [1], [5], [1], [5], [4], [2], [1], [3], [5], [3], [5], [5], [5], [3], [5], [4], [3], [4], [4], [1], [1], [4], [4], [1], [5], [5], [1], [4], [5], [1], [1], [4], [2], [3], [4], [2], [1], [5], [1], [5], [3], [4], [5], [5], [2], [5], [5], [1], [4], [4], [3], [1], [4], [1], [3], [3], [5], [4], [2], [4], [4], [4], [2], [3], [3], [1], [4], [2], [2], [5], [5], [1], [4], [2], [4], [5], [1], [4], [3], [4], [3], [2], [3], [3], [2], [1], [4], [1], [4], [3], [5], [4], [1], [5], [4], [1], [3], [5], [1], [4], [1], [1], [3], [5], [2], [3], [5], [2], [2], [4], [2], [5], [4], [1], [4], [3], [4], [3], [2], [3], [5], [1], [2], [2], [2], [5], [1], [2], [5], [5], [1], [5], [3], [3], [3], [1], [1], [1], [4], [3], [1], [3], [3], [4], [3], [1], [2], [5], [1], [2], [2], [4], [2], [5], [5], [5], [2], [5], [5], [3], [4], [2], [1], [4], [1], [1], [3], [2], [1], [4], [2], [1], [4], [1], [1], [5], [1], [2], [1], [2], [4], [3], [4], [2], [1], [1], [2], [2], [2], [2], [3], [1], [2], [4], [2], [1], [3], [2], [4], [2], [1], [2], [3], [5], [1], [2], [3], [2], [5], [2], [2], [2], [1], [3], [5], [1], [3], [1], [3], [3], [2], [2], [1], [4], [5], [1], [5], [2], [2], [2], [4], [1], [4], [3], [4], [4], [4], [1], [4], [4], [5], [5], [4], [1], [5], [4], [1], [1], [2], [5], [4], [2], [1], [2], [3], [2], [5], [4], [2], [3], [2], [4], [1], [2], [5], [2], [3], [1], [5], [3], [1], [2], [1], [3], [3], [1], [5], [5], [2], [2], [1], [4], [4], [1], [5], [4], [4], [2], [1], [5], [4], [1], [1], [2], [5], [2], [2], [2], [5], [1], [5], [4], [4], [4], [3], [4], [4], [5], [5], [1], [1], [3], [2], [5], [1], [3], [5], [4], [3], [4], [4], [2], [5], [3], [4], [3], [3], [1], [3], [3], [5], [4], [1], [3], [1], [5], [3], [2], [2], [3], [1], [1], [1], [5], [4], [4], [2], [5], [1], [3], [4], [3], [5], [4], [4], [2], [2], [1], [2], [2], [4], [3], [5], [2], [2], [2], [2], [2], [4], [1], [3], [4], [4], [2], [2], [5], [3], [5], [1], [4], [1], [5], [1], [4], [1], [2], [1], [3], [3], [5], [2], [1], [3], [3], [1], [5], [3], [2], [4], [1], [2], [2], [2], [5], [5], [4], [4], [2], [2], [5], [1], [2], [5], [4], [4], [2], [2], [1], [1], [1], [3], [3], [1], [3], [1], [2], [5], [1], [4], [5], [1], [1], [2], [2], [4], [4], [1], [5], [1], [5], [1], [5], [3], [5], [5], [4], [5], [2], [2], [3], [1], [3], [4], [2], [3], [1], [3], [1], [5], [1], [3], [1], [1], [4], [5], [1], [3], [1], [1], [2], [4], [5], [3], [4], [5], [3], [5], [3], [5], [5], [4], [5], [3], [5], [5], [4], [4], [1], [1], [5], [5], [4], [5], [3], [4], [5], [2], [4], [1], [2], [5], [5], [4], [5], [4], [2], [5], [1], [5], [2], [1], [2], [1], [3], [4], [5], [3], [2], [5], [5], [3], [2], [5], [1], [3], [1], [2], [2], [2], [2], [2], [5], [4], [1], [5], [5], [2], [1], [4], [4], [5], [1], [2], [3], [2], [3], [2], [2], [5], [3], [2], [2], [4], [3], [1], [4], [5], [3], [2], [2], [1], [5], [3], [4], [2], [2], [3], [2], [1], [5], [1], [5], [4], [3], [2], [2], [4], [2], [2], [1], [2], [4], [5], [3], [2], [3], [2], [1], [4], [2], [3], [5], [4], [2], [5], [1], [3], [3], [1], [3], [2], [4], [5], [1], [1], [4], [2], [1], [5], [4], [1], [3], [1], [2], [2], [2], [3], [5], [1], [3], [4], [2], [2], [4], [5], [5], [4], [4], [1], [1], [5], [4], [5], [1], [3], [4], [2], [1], [5], [2], [2], [5], [1], [2], [1], [4], [3], [3], [4], [5], [3], [5], [2], [2], [3], [1], [4], [1], [1], [1], [3], [2], [1], [2], [4], [1], [2], [2], [1], [3], [4], [1], [2], [4], [1], [1], [2], [2], [2], [2], [3], [5], [4], [2], [2], [1], [2], [5], [2], [5], [1], [3], [2], [2], [4], [5], [2], [2], [2], [3], [2], [3], [4], [5], [3], [5], [1], [4], [3], [2], [4], [1], [2], [2], [5], [4], [2], [2], [1], [1], [5], [1], [3], [1], [2], [1], [2], [3], [3], [2], [3], [4], [5], [1], [2], [5], [1], [3], [3], [4], [5], [2], [3], [3], [1], [4], [2], [1], [5], [1], [5], [1], [2], [1], [3], [5], [4], [2], [1], [3], [4], [1], [5], [2], [1], [5], [1], [4], [1], [4], [3], [1], [2], [5], [4], [4], [3], [4], [5], [4], [1], [2], [4], [2], [5], [1], [4], [3], [3], [3], [3], [5], [5], [5], [2], [3], [3], [1], [1], [4], [1], [3], [2], [2], [4], [1], [4], [2], [4], [3], [3], [1], [2], [3], [1], [2], [4], [2], [2], [5], [5], [1], [2], [4], [4], [3], [2], [3], [1], [5], [5], [3], [3], [2], [2], [4], [4], [1], [1], [3], [4], [1], [4], [2], [1], [2], [3], [1], [5], [2], [4], [3], [5], [4], [2], [1], [5], [4], [4], [5], [3], [4], [5], [1], [5], [1], [1], [1], [3], [4], [1], [2], [1], [1], [2], [4], [1], [2], [5], [3], [4], [1], [3], [4], [5], [3], [1], [3], [4], [2], [5], [1], [3], [2], [4], [4], [4], [3], [2], [1], [3], [5], [4], [5], [1], [4], [2], [3], [5], [4], [3], [1], [1], [2], [5], [2], [2], [3], [2], [2], [3], [4], [5], [3], [5], [5], [2], [3], [1], [3], [5], [1], [5], [3], [5], [5], [5], [2], [1], [3], [1], [5], [4], [4], [2], [3], [5], [2], [1], [2], [3], [3], [2], [1], [4], [4], [4], [2], [3], [3], [2], [1], [1], [5], [2], [1], [1], [3], [3], [3], [5], [3], [2], [4], [2], [3], [5], [5], [2], [1], [3], [5], [1], [5], [3], [3], [2], [3], [1], [5], [5], [4], [4], [4], [4], [3], [4], [2], [4], [1], [1], [5], [2], [4], [5], [2], [4], [1], [4], [5], [5], [3], [3], [1], [2], [2], [4], [5], [1], [3], [2], [4], [5], [3], [1], [5], [3], [3], [4], [1], [3], [2], [3], [5], [4], [1], [3], [5], [5], [2], [1], [4], [4], [1], [5], [4], [3], [4], [1], [3], [3], [1], [5], [1], [3], [1], [4], [5], [1], [5], [2], [2], [5], [5], [5], [4], [1], [2], [2], [3], [3], [2], [3], [5], [1], [1], [4], [3], [1], [2], [1], [2], [4], [1], [1], [2], [5], [1], [1], [4], [1], [2], [3], [2], [5], [4], [5], [3], [2], [5], [3], [5], [3], [3], [2], [1], [1], [1], [4], [4], [1], [3], [5], [4], [1], [5], [2], [5], [3], [2], [1], [4], [2], [1], [3], [2], [5], [5], [5], [3], [5], [3], [5], [1], [5], [1], [3], [3], [2], [3], [4], [1], [4], [1], [2], [3], [4], [5], [5], [3], [5], [3], [1], [1], [3], [2], [4], [1], [3], [3], [5], [1], [3], [3], [2], [4], [4], [2], [4], [1], [1], [2], [3], [2], [4], [1], [4], [3], [5], [1], [2], [1], [5], [4], [4], [1], [3], [1], [2], [1], [2], [1], [1], [5], [5], [2], [4], [4], [2], [4], [2], [2], [1], [1], [3], [1], [4], [1], [4], [1], [1], [2], [2], [4], [1], [2], [4], [4], [3], [1], [2], [5], [5], [4], [3], [1], [1], [4], [2], [4], [5], [5], [3], [3], [2], [5], [1], [5], [5], [2], [1], [3], [4], [2], [1], [5], [4], [3], [3], [1], [1], [2], [2], [2], [2], [2], [5], [2], [3], [3], [4], [4], [5], [3], [5], [2], [3], [1], [1], [2], [4], [2], [4], [1], [2], [2], [3], [1], [1], [3], [3], [5], [5], [3], [2], [3], [3], [2], [4], [3], [3], [3], [3], [3], [5], [5], [4], [3], [1], [3], [1], [4], [1], [1], [1], [5], [4], [5], [4], [1], [4], [1], [1], [5], [5], [2], [5], [5], [3], [2], [1], [4], [4], [3], [2], [1], [2], [5], [1], [3], [5], [1], [1], [2], [3], [4], [4], [2], [2], [1], [3], [5], [1], [1], [3], [5], [4], [1], [5], [2], [3], [1], [3], [4], [5], [1], [3], [2], [5], [3], [5], [3], [1], [3], [2], [2], [3], [2], [4], [1], [2], [5], [2], [1], [1], [5], [4], [3], [4], [3], [3], [1], [1], [1], [2], [4], [5], [2], [1], [2], [1], [2], [4], [2], [2], [2], [2], [1], [1], [1], [2], [2], [5], [2], [2], [2], [1], [1], [1], [4], [2], [1], [1], [1], [2], [5], [4], [4], [4], [3], [2], [2], [4], [2], [4], [1], [1], [3], [3], [3], [1], [1], [3], [3], [4], [2], [1], [1], [1], [1], [2], [1], [2], [2], [2], [2], [1], [3], [1], [4], [4], [1], [4], [2], [5], [2], [1], [2], [4], [4], [3], [5], [2], [5], [2], [4], [3], [5], [3], [5], [5], [4], [2], [4], [4], [2], [3], [1], [5], [2], [3], [5], [2], [4], [1], [4], [3], [1], [3], [2], [3], [3], [2], [2], [2], [4], [3], [2], [3], [2], [5], [3], [1], [3], [3], [1], [5], [4], [4], [2], [4], [1], [2], [2], [3], [1], [4], [4], [4], [1], [5], [1], [3], [2], [3], [3], [5], [4], [2], [4], [1], [5], [5], [1], [2], [5], [4], [4], [1], [5], [2], [3], [3], [3], [4], [4], [2], [3], [2], [3], [3], [5], [1], [4], [2], [4], [5], [4], [4], [1], [3], [1], [1], [3], [5], [5], [2], [3], [3], [1], [2], [2], [4], [2], [4], [4], [1], [2], [3], [1], [2], [2], [1], [4], [1], [4], [5], [1], [1], [5], [2], [4], [1], [1], [3], [4], [2], [3], [1], [1], [3], [5], [4], [4], [4], [2], [1], [5], [5], [4], [2], [3], [4], [1], [1], [4], [4], [3], [2], [1], [5], [5], [1], [5], [4], [4], [2], [2], [2], [1], [1], [4], [1], [2], [4], [2], [2], [1], [2], [3], [2], [2], [4], [2], [4], [3], [4], [5], [3], [4], [5], [1], [3], [5], [2], [4], [2], [4], [5], [4], [1], [2], [2], [3], [5], [3], [1]]
# {'sport': 1, 'business': 2, 'politics': 3, 'tech': 4, 'entertainment': 5}
```
| github_jupyter |
```
import os
import json
data_folder = os.path.join(os.path.expanduser("~"), "Data", "twitter")
friends_filename = os.path.join(data_folder, "python_friends.json")
with open(friends_filename) as inf:
friends = json.load(inf)
friends = {user: set(friends[user]) for user in friends}
def compute_similarity(friends1, friends2):
set_friends1 = set(friends1)
set_friends2 = set(friends2)
return len(set_friends1 & set_friends2) / len(set_friends1 | set_friends2)
import networkx as nx
def create_graph(friends, threshold=0):
G = nx.Graph()
weights = []
for user1 in friends.keys():
for user2 in friends.keys():
if user1 == user2:
continue
weight = compute_similarity(friends[user1], friends[user2])
weights.append(weight)
if weight >= threshold:
G.add_node(user1)
G.add_node(user2)
G.add_edge(user1, user2, weight=weight)
return G
G = create_graph(friends, 0)
%matplotlib inline
from matplotlib import pyplot as plt
plt.figure(figsize=(10,10))
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, node_size=500)
edgewidth = [ d['weight'] for (u,v,d) in G.edges(data=True)]
nx.draw_networkx_edges(G, pos, width=edgewidth)
G = create_graph(friends, 0.1)
sub_graphs = nx.connected_component_subgraphs(G)
for i, sub_graph in enumerate(sub_graphs):
n_nodes = len(sub_graph.nodes())
print("Subgraph {0} has {1} nodes".format(i, n_nodes))
G = create_graph(friends, 0.15)
sub_graphs = nx.connected_component_subgraphs(G)
for i, sub_graph in enumerate(sub_graphs):
n_nodes = len(sub_graph.nodes())
print("Subgraph {0} has {1} nodes".format(i, n_nodes))
sub_graphs = nx.connected_component_subgraphs(G)
label_dict = {}
for i, sub_graph in enumerate(sub_graphs):
for node in sub_graph.nodes():
label_dict[node] = i
labels = [label_dict[node] for node in G.nodes()]
plt.figure(figsize=(10,10))
nx.draw(G,node_color=labels,cmap=plt.cm.Paired, node_size=500)
sub_graphs = nx.connected_component_subgraphs(G)
plt.figure(figsize=(10,10))
pos = nx.spring_layout(G)
for i, sub_graph in enumerate(sub_graphs):
nodes = sub_graph.nodes()
edges = sub_graph.edges()
nx.draw_networkx_nodes(G, pos, nodes,node_size=500)
nx.draw_networkx_edges(G, pos, edges)
sub_graphs = nx.connected_component_subgraphs(G)
n_subgraphs = nx.number_connected_components(G)
fig = plt.figure(figsize=(20, (n_subgraphs * 3)))
for i, sub_graph in enumerate(sub_graphs):
ax = fig.add_subplot(int(n_subgraphs / 2), 2, i)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, sub_graph.nodes(), ax=ax, node_size=500)
nx.draw_networkx_edges(G, pos, sub_graph.edges(), ax=ax)
#from sklearn.metrics import silhouette_score
import numpy as np
def compute_silhouette(threshold, friends):
G = create_graph(friends, threshold=threshold)
if len(G.nodes()) == 0:
return -99 # Invalid graph
sub_graphs = nx.connected_component_subgraphs(G)
if not (2 <= nx.number_connected_components(G) < len(G.nodes()) - 1):
return -99 # Invalid number of components, Silhouette not defined
label_dict = {}
for i, sub_graph in enumerate(sub_graphs):
for node in sub_graph.nodes():
label_dict[node] = i
labels = np.array([label_dict[node] for node in G.nodes()])
X = nx.to_scipy_sparse_matrix(G).todense()
X = 1 - X
return silhouette_score(X, labels, metric='precomputed')
print(compute_silhouette(0.1, friends))
from scipy.optimize import minimize #(fun, x0, args=(),
def invert(func):
def inverted_function(*args, **kwds):
return -func(*args, **kwds)
return inverted_function
result = minimize(invert(compute_silhouette), 0.1, method='nelder-mead', args=(friends,), options={'maxiter':10, })
print(result)
G = create_graph(friends, threshold=0.135)
sub_graphs = nx.connected_component_subgraphs(G)
for i, sub_graph in enumerate(sub_graphs):
n_nodes = len(sub_graph.nodes())
print("Subgraph {0} has {1} nodes".format(i, n_nodes))
labels
X = 1-nx.to_scipy_sparse_matrix(G).todense()
def silhouette_score(X, labels, metric='precomputed'):
labels = np.array(labels)
print(labels.shape)
return np.mean(silhouette_samples(X, labels, metric=metric))
def silhouette_samples(X, labels, metric='precomputed'):
print(X.shape)
distances = X #pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = (labels == labels[i])
mask[i] = False
mask = mask.reshape(distances_row.shape)
#print("Cluster {}".format(i))
#print(mask)
#print(distances_row.flatten())
#print(distances_row.flatten()[mask])
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[(labels == cur_label).reshape(distances_row.shape)])
for cur_label in set(labels) if not cur_label == label])
return b
silhouette_score(X, labels, metric='precomputed')
```
| github_jupyter |
```
import requests
import pandas as pd
import matplotlib
import numpy as np
from config import DATA_HOLIDAYS_PROCESSED_FILE
API_KEY = '##b13b8edeebf4b7dd082e7df702fc94702bef7327##'
END_POINT='https://calendarific.com/api/v2/holidays?&api_key='
YEARS = ['2018','2019']
COUNTRY = 'SG'
def f(x, hl):
if x in hl:
return 1
else:
return 0
def f_remove(x, hl):
if x in hl:
return 0
else:
return x[1]
#get holidays from API
holidays_list = []
for year in YEARS:
url = END_POINT+API_KEY+'&country='+COUNTRY+"&year="+year
holidays_list_dicts = requests.get(url).json()['response']['holidays']
holidays_list.extend([x['date']['iso'] for x in holidays_list_dicts])
#clean it up
holidays_list = [x.split('T')[0] for x in holidays_list]
earliest_date = '2018-01-01'
latest_date = '2019-12-31'
holidays_list_datetime = pd.to_datetime(holidays_list)
data = pd.DataFrame({'timestamp':pd.date_range(start=earliest_date, end=latest_date, freq='D')})
data['holiday'] = data['timestamp'].apply(lambda x: f(x, holidays_list_datetime))
school_holiday = pd.to_datetime(['2018-03-05',
'2018-03-06',
'2018-03-07',
'2018-03-08',
'2018-03-09',
'2018-10-01',
'2018-10-02',
'2018-10-03',
'2018-10-04',
'2018-10-05',
'2019-03-04',
'2019-03-05',
'2019-03-06',
'2019-03-07',
'2019-03-08',
'2019-09-30',
'2019-10-01',
'2019-10-02',
'2019-10-03',
'2019-10-04'])
data['school_holiday'] = data['timestamp'].apply(lambda x: f(x, school_holiday))
#get teaching dates form NTU calendar
teaching0 = pd.date_range(start='2018-01-15', end='2018-03-02', freq='D')
teaching1 = pd.date_range(start='2018-03-12', end='2018-05-11', freq='D')
teaching2 = pd.date_range(start='2018-08-13', end='2018-09-28', freq='D')
teaching3 = pd.date_range(start='2018-10-08', end='2018-12-07', freq='D')
teaching4 = pd.date_range(start='2019-01-14', end='2019-03-01', freq='D')
teaching5 = pd.date_range(start='2019-03-11', end='2019-05-10', freq='D')
teaching6 = pd.date_range(start='2019-08-13', end='2019-11-15', freq='D')
teaching_time_datetime = teaching0.append(teaching1).append(teaching2).append(teaching3).append(teaching4).append(teaching5).append(teaching6)
data['teaching_time'] = data['timestamp'].apply(lambda x: f(x,teaching_time_datetime))
# take out public holidays and weekends (no teaching)
data['dayofweek'] = np.array(data['timestamp'].dt.dayofweek, dtype=np.uint8)
data['weekend_days'] = data['dayofweek'].apply(lambda x: 1 if x>=5 else 0)
# data['teaching_time'] = data[['timestamp','teaching_time']].apply(lambda x: f_remove(x, holidays_list_datetime), axis=1)
# data['teaching_time'] = data[['teaching_time','weekend_days']].apply(lambda x: 0 if x[1]==1 else x[0], axis=1)
visual = data.set_index('timestamp')
idx = ["","False","","","","","True"]
ax = visual[['holiday', 'school_holiday', 'teaching_time']].plot(color = ['C0', 'C9', 'C8'])
font = {'family' : 'Arial',
'size' : 18}
ax.set_xlabel("")
ax.set_ylabel("Boolean [-]")
ax.legend(["Public Holiday", 'School Holiday', "Working Day"]);
ax.set_yticklabels(idx)
matplotlib.rc('font', **font)
visual = data.set_index('timestamp')
idx = ["","False","","","","","True"]
ax = visual[['teaching_time']].plot(color = ['C0', 'C9', 'C8'])
font = {'family' : 'Arial',
'size' : 18}
ax.set_xlabel("")
ax.set_ylabel("Boolean [-]")
ax.legend(["Public Holiday", 'School Holiday', "Working Day"]);
ax.set_yticklabels(idx)
matplotlib.rc('font', **font)
data[['timestamp','holiday', 'school_holiday', 'teaching_time']].to_csv(DATA_HOLIDAYS_PROCESSED_FILE, index=False)
```
| github_jupyter |
# TREC-50
# BERT finetuning
## Librairy
```
# !pip install transformers==4.8.2
# !pip install datasets==1.7.0
import os
import time
import pickle
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from transformers import BertTokenizer, BertTokenizerFast
from transformers import BertForSequenceClassification, AdamW
from transformers import Trainer, TrainingArguments
from transformers import EarlyStoppingCallback
from transformers.data.data_collator import DataCollatorWithPadding
from datasets import load_dataset, Dataset, concatenate_datasets
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
device
```
## Global variables
```
BATCH_SIZE = 24 # cf. paper Sun et al.
NB_EPOCHS = 4 # cf. paper Sun et al.
CURRENT_PATH = '~/Results/BERT_finetune' # put your path here
RESULTS_FILE = os.path.join(CURRENT_PATH, 'trec-50_results.pkl')
RESULTS_DIR = os.path.join(CURRENT_PATH,'trec-50/')
CACHE_DIR = '~/Data/huggignface/' # put your path here
```
## Dataset
```
# download dataset
raw_datasets = load_dataset('trec', cache_dir=CACHE_DIR)
# tokenize
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
def tokenize_function(examples):
return tokenizer(examples["text"], padding=True, truncation=True)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
tokenized_datasets = tokenized_datasets.rename_column('label-fine', 'labels') # 'label-fine'
tokenized_datasets.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
train_dataset = tokenized_datasets["train"].shuffle(seed=42)
train_val_datasets = train_dataset.train_test_split(train_size=0.8)
train_dataset = train_val_datasets['train']
val_dataset = train_val_datasets['test']
test_dataset = tokenized_datasets["test"].shuffle(seed=42)
# get number of labels
num_labels = len(set(train_dataset['labels'].tolist()))
num_labels = 50
# num_labels
```
## Model
#### Model
```
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=num_labels)
model.to(device)
```
#### Training
```
training_args = TrainingArguments(
# output
output_dir=RESULTS_DIR,
# params
num_train_epochs=NB_EPOCHS, # nb of epochs
per_device_train_batch_size=BATCH_SIZE, # batch size per device during training
per_device_eval_batch_size=BATCH_SIZE, # cf. paper Sun et al.
learning_rate=2e-5, # cf. paper Sun et al.
# warmup_steps=500, # number of warmup steps for learning rate scheduler
warmup_ratio=0.1, # cf. paper Sun et al.
weight_decay=0.01, # strength of weight decay
# # eval
evaluation_strategy="steps", # cf. paper Sun et al.
eval_steps=50, # 20 # cf. paper Sun et al.
# evaluation_strategy='no', # no more evaluation, takes time
# log
logging_dir=RESULTS_DIR+'logs',
logging_strategy='steps',
logging_steps=50, # 20
# save
save_strategy='steps',
save_total_limit=1,
# save_steps=20, # default 500
load_best_model_at_end=True, # cf. paper Sun et al.
metric_for_best_model='eval_loss'
)
def compute_metrics(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
return {"val_accuracy": accuracy}
trainer = Trainer(
model=model,
args=training_args,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=val_dataset,
# compute_metrics=compute_metrics,
# callbacks=[EarlyStoppingCallback(early_stopping_patience=5)]
)
results = trainer.train()
training_time = results.metrics["train_runtime"]
training_time_per_epoch = training_time / training_args.num_train_epochs
training_time_per_epoch
trainer.save_model(os.path.join(RESULTS_DIR, 'checkpoint_best_model'))
```
## Results
```
# load model
model_file = os.path.join(RESULTS_DIR, 'checkpoint_best_model')
finetuned_model = BertForSequenceClassification.from_pretrained(model_file, num_labels=num_labels)
finetuned_model.to(device)
finetuned_model.eval()
# compute test acc
test_trainer = Trainer(finetuned_model, data_collator=DataCollatorWithPadding(tokenizer))
raw_preds, labels, _ = test_trainer.predict(test_dataset)
preds = np.argmax(raw_preds, axis=1)
test_acc = accuracy_score(y_true=labels, y_pred=preds)
# save results
results_d = {}
results_d['accuracy'] = test_acc
results_d['training_time'] = training_time
results_d
# save results
with open(RESULTS_FILE, 'wb') as fh:
pickle.dump(results_d, fh)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
def plot_feat_importance(path_out, imp, oob, oos, method, tag=0, sim_num=0, **kwargs):
plt.figure(figure=(10, imp.shape[0] / 5.))
imp = imp.sort_values('mean', ascending=True)
ax = imp['mean'].plot(kin='barh', color='b', alpha=.25, xerr=imp['std'],
error_kw={'error': 'r'})
if method == 'MDI':
plt.xlim([0,imp.sum(axis = 1).max()])
plt.axvline(1./imp.shape[0],linewidth = 1,color = 'r',linestyle = 'dotted')
ax.get_yaxis().set_visible(False)
def test_func(n_features=40, n_informative=10, n_redundant=10, n_estimators=1000,
n_samples=10000, n_splits=10):
X, cont = get_test_data(n_features, n_informative, n_redundant, n_samples)
config = {'min_w_leaf': [0.], 'scoring': ['accuracy'], 'method': ['MDI', 'MDA', 'SFI'],
'max_samples': [1.]}
jobs = [dict(zip(config.keys(), conf)) for conf in product(*config.values())]
kwargs = {'path_out': './test_func/', 'n_estimators': n_estimators,
'tag': 'test_func', 'n_splits': n_splits}
for job in jobs:
job['sim_num'] = job['method']+'_'+job['scoring']+'_'+'%.2f'%job['min_w_leaf']\
+ '_'+str(job['max_samples'])
print(job['sim_num'])
kwargs.udpate(job)
imp, oob, oos = feat_importance(X=X, cont=cont, **kwargs)
plot_feat_importance(imp=imp, oob=oob, oos=oos, **kwargs)
df0 = imp[['mean']] / imp['mean'].abs().sum()
df0['type'] = [i[0] for i in df0.index]
df0 = df0.groupby('type')['mean'].sum().to_dict()
df0.update({'oob': oob, 'oos': oos})
df0.update(job)
out.append(df0)
out = pd.DataFrame(out).sort_values(['method', 'scoring', 'min_w_leaf', 'max_sampels'])
out = out['method','scoring','min_w_leaf','max_samples','I','R','N','oob','oos']
out.to_csv(kwargs['path_out'] + 'stats.cvs')
```
# 8.1
```
from sklearn.datasets import make_classification
def get_test_data(n_features=40, n_informative=10, n_redundant=10, n_samples=10000):
X, cont = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, n_redundant=n_redundant,
random_state=0, shuffle=False)
time_idx = pd.DatetimeIndex(periods=n_samples, freq=pd.tseries.offsets.BDay(),
end=pd.datetime.today())
X = pd.DataFrame(X, index=time_idx)
cont = pd.Series(cont, index=time_idx).to_frame('bin')
# Create name of columns
columns = ['I_' + str(i) for i in range(n_informative)]
columns += ['R_' + str(i) for i in range(n_redundant)]
columns += ['N_' + str(i) for i in range(n_features - len(columns))]
X.columns = columns
cont['w'] = 1. / cont.shape[0]
cont['t1'] = pd.Series(cont.index, index=cont.index)
return X, cont
X, cont = get_test_data()
X.head()
cont.head()
def get_e_vec(dot, var_thres):
e_val, e_vec = np.linalg.eigh(dot)
# Descending order
idx = e_val.argsort()[::-1]
e_val = e_val[idx]
e_vec = e_vec[:, idx]
# Use only positive ones
e_val = pd.Series(e_val, index=['PC_' + str(i + 1) for i in range(e_val.shape[0])])
e_vec = pd.DataFrame(e_vec, index=dot.index, columns=e_val.index)
e_vec = e_vec.loc[:, e_val > 0]
e_val = e_val.loc[e_val > 0]
# Reduce dimension with threashold
cum_var = e_val.cumsum() / e_val.sum()
dim = cum_var.values.searchsorted(var_thres)
e_val = e_val.iloc[:dim+1]
e_vec = e_vec.iloc[:, :dim+1]
return e_val, e_vec
def orth_feats(dfX, var_thres=.95):
dfZ = dfX.sub(dfX.mean(), axis=1).div(dfX.std(), axis=1)
dot = pd.DataFrame(np.dot(dfZ.T, dfZ), index=dfX.columns, columns=dfX.columns)
e_val, e_vec = get_e_vec(dot, var_thres)
dfP = pd.DataFrame(np.dot(dfZ, e_vec), index=dfZ.index, columns=e_vec.columns)
return dfP
dfP = orth_feats(X)
dfP.shape
dfP.head()
from sklearn.metrics import log_loss, accuracy_score
from finance_ml.model_selection import PurgedKFold
from finance_ml.model_selection import cv_score
def feat_imp_MDI(forest, feat_names):
imp_dict = {i:tree.feature_importances_ for i, tree in enumerate(forest.estimators_)}
imp_df = pd.DataFrame.from_dict(imp_dict, orient='index')
imp_df.columns = feat_names
# 0 simply means not used for splitting
imp_df = imp_df.replace(0, np.nan)
imp = pd.concat({'mean': imp_df.mean(),
'std': imp_df.std() * np.sqrt(imp_df.shape[0])},
axis=1)
imp /= imp['mean'].sum()
return imp
def feat_imp_MDA(clf, X, y, n_splits, sample_weight, t1, pct_embargo, scoring='neg_log_loss'):
if scoring not in ['neg_log_loss', 'accuracy']:
raise Exception('wrong scoring method')
cv_gen = PurgedKFold(n_splits=n_splits, t1=t1, pct_embargo=pct_embargo)
index = np.arange(n_splits)
scores = pd.Series(index=index)
scores_perm = pd.DataFrame(index=index, columns=X.columns)
for idx, (train, test) in zip(index, cv_gen.split(X=X)):
X_train = X.iloc[train]
y_train = y.iloc[train]
w_train = sample_weight.iloc[train]
X_test = X.iloc[test]
y_test = y.iloc[test]
w_test = sample_weight.iloc[test]
clf_fit = clf.fit(X_train, y_train, sample_weight=w_train.values)
if scoring == 'neg_log_loss':
prob = clf_fit.predict_proba(X_test)
scores.loc[idx] = -log_loss(y_test, prob, sample_weight=w_test.values,
labels=clf_fit.classes_)
else:
pred = clf_fit.predict(X_test)
scores.loc[idx] = accuracy_score(y_test, pred, sample_weight=w_test.values)
for col in X.columns:
X_test_ = X_test.copy(deep=True)
# Randomize certain feature to make it not effective
np.random.shuffle(X_test_[col].values)
if scoring == 'neg_log_loss':
prob = clf_fit.predict_proba(X_test_)
scores_perm.loc[idx, col] = -log_loss(y_test, prob, sample_weight=w_test.value,
labels=clf_fit.classes_)
else:
pred = clf_fit.predict(X_test_)
scores_perm.loc[idx, col] = accuracy_score(y_test, pred, sample_weight=w_test.values)
# (Original score) - (premutated score)
imprv = (-scores_perm).add(scores, axis=0)
# Relative to maximum improvement
if scoring == 'neg_log_loss':
max_imprv = -scores_perm
else:
max_imprv = 1. - scores_perm
imp = imprv / max_imprv
imp = pd.DataFrame({'mean': imp.mean(), 'std': imp.std() * np.sqrt(imp.shape[0])})
return imp, scores.mean()
def aux_feat_imp_SFI(feat_names, clf, X, cont, scoring, cv_gen):
imp = pd.DataFrame(columns=['mean', 'std'])
for feat_name in feat_names:
scores = cv_score(clf, X=X[[feat_name]], y=cont['bin'],
sample_weight=cont['w'],
scoring=scoring,
cv_gen=cv_gen)
imp.loc[feat_name, 'mean'] = scores.mean()
imp.loc[feat_name, 'std'] = scores.std() * np.sqrt(scores.shape[0])
return imp
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from finance_ml.multiprocessing import mp_pandas_obj
from finance_ml.model_selection import PurgedKFold
def feat_importance(X, cont, clf=None, n_estimators=1000, n_splits=10, max_samples=1.,
num_threads=24, pct_embargo=0., scoring='accuracy',
method='SFI', min_w_leaf=0., **kwargs):
n_jobs = (-1 if num_threads > 1 else 1)
# Build classifiers
if clf is None:
base_clf = DecisionTreeClassifier(criterion='entropy', max_features=1,
class_weight='balanced',
min_weight_fraction_leaf=min_w_leaf)
clf = BaggingClassifier(base_estimator=base_clf, n_estimators=n_estimators,
max_features=1., max_samples=max_samples,
oob_score=True, n_jobs=n_jobs)
fit_clf = clf.fit(X, cont['bin'], sample_weight=cont['w'].values)
if hasattr(fit_clf, 'oob_score_'):
oob = fit_clf.oob_score_
else:
oob = None
if method == 'MDI':
imp = feat_imp_MDI(fit_clf, feat_names=X.columns)
oos = cv_score(clf, X=X, y=cont['bin'], n_splits=n_splits,
sample_weight=cont['w'], t1=cont['t1'],
pct_embargo=pct_embargo, scoring=scoring).mean()
elif method == 'MDA':
imp, oos = feat_imp_MDA(clf, X=X, y=cont['bin'], n_splits=n_splits,
sample_weight=cont['w'], t1=cont['t1'],
pct_embargo=pct_embargo, scoring=scoring)
elif method == 'SFI':
cv_gen = PurgedKFold(n_splits=n_splits, t1=cont['t1'], pct_embargo=pct_embargo)
oos = cv_score(clf, X=X, y=cont['bin'], sample_weight=cont['w'],
scoring=scoring, cv_gen=cv_gen)
clf.n_jobs = 1
imp = mp_pandas_obj(aux_feat_imp_SFI, ('feat_names', X.columns),
num_threads, clf=clf, X=X, cont=cont,
scoring=scoring, cv_gen=cv_gen)
return imp, oob, oos
%%time
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(oob_score=True, n_estimators=100)
imp_MDI, oob_MDI, oos_MDI = feat_importance(dfP, cont, clf=clf, method='MDI')
print(imp_MDI.head())
print(oob_MDI)
print(oos_MDI)
%%time
clf = RandomForestClassifier(oob_score=True, n_estimators=100)
imp_MDA, oob_MDA, oos_MDA = feat_importance(dfP, cont, clf=clf, method='MDA')
print(imp_MDA.head())
print(oob_MDA)
print(oos_MDA)
%%time
clf = RandomForestClassifier(oob_score=True, n_estimators=100)
imp_SFI, oob_SFI, oos_SFI = feat_importance(dfP, cont, clf=clf, method='SFI')
print(imp_SFI.head())
print(oob_SFI)
print(oos_SFI)
imp_SFI.sort_values('mean', ascending=False).index
imp_MDI.sort_values('mean', ascending=False).index
imp_MDA.sort_values('mean', ascending=False).index
```
They agree on each other somewhat because PCA reduces duplication of features. Only SFI is not impacted by substitution effects.
# 8.2
```
X_tilde = pd.concat((X, dfP), axis=1)
X_tilde.shape
%%time
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(oob_score=True, n_estimators=100)
imp_MDI, oob_MDI, oos_MDI = feat_importance(X_tilde, cont, clf=clf, method='MDI')
print(imp_MDI.head())
print(oob_MDI)
print(oos_MDI)
%%time
clf = RandomForestClassifier(oob_score=True, n_estimators=100)
imp_MDA, oob_MDA, oos_MDA = feat_importance(X_tilde, cont, clf=clf, method='MDA')
print(imp_MDA.head())
print(oob_MDA)
print(oos_MDA)
%%time
clf = RandomForestClassifier(oob_score=True, n_estimators=100)
imp_SFI, oob_SFI, oos_SFI = feat_importance(X_tilde, cont, clf=clf, method='SFI')
print(imp_SFI.head())
print(oob_SFI)
print(oos_SFI)
print("MDI")
print(imp_MDI.sort_values('mean', ascending=False).index)
print("MDA")
print(imp_MDA.sort_values('mean', ascending=False).index)
print("SFI")
print(imp_SFI.sort_values('mean', ascending=False).index)
```
They do not agree on each other because of substitution effects.
# 8.3
MDI and MDA changes the columns.
| github_jupyter |
# A neural-network framework for modelling auditory sensory cells and synapses
Python notebook for reproducing the evaluation results of the proposed CoNNear IHC-ANF model.
**Light version - uses the CoNNear cochlear model**
## Prerequisites
To run the light version of the notebook, you just need numpy, scipy, keras and tensorflow.
## Import required python packages and functions
Import required python packages and define the necessary parameters.
**Notice that for all the simulations, the reference IHC model operates at 100 kHz, while the reference AN model and all the CoNNear models operate at 20 kHz.**
```
import scipy.signal as sp_sig
import numpy as np
import keras
import tensorflow as tf
import matplotlib.pyplot as plt
from Verhulstetal2018 import inner_hair_cell2018 as ihc_ref
from Verhulstetal2018 import auditory_nerve2018 as anf_ref
from extra_functions import *
from time import time
# Define model specific variables
fs_connear = 20e3
fs_tl = 100e3
p0 = 2e-5 # calibrate to 2e-5 Pascal
# load CFs
CF_connear = np.loadtxt('connear/cf.txt')*1e3
# scaling values for the CoNNear models
cochlea_scaling = 1e6
ihc_scaling = 1e1
an_scaling = 1e-2
# CoNNear model directory
modeldir = 'connear/'
# reference model parameters
magic_constant=0.118 # constant used for the estimation of the IHC output
# Define the CoNNear cochlea model hyperparameters
context_left_cochlea = 256
context_right_cochlea = 256
Nenc_cochlea = 4 # number of layers in the encoder - check for the input size
# Load the cochlea model - keep the uncropped output
cochlea = load_connear_model(modeldir,json_name="/cochlea.json",weights_name="/cochlea.h5",name="cochlea_model",crop=0)
#cochlea.summary()
```
## IHC stage
The input dimensions for the CoNNear models are (b x L x Ncf), where b is the batch-size (for loading multiple stimuli simultaneously), L is the input length (including the context) and Ncf are the frequency channels.
The ihc.json file can be loaded for the full-channel model (201 channels) or the ihc_1cf.json file for the 1-channel model. At the first block of each individual section, the necessary model is loaded and the rest of the parameters are defined.
### IHC excitation patterns
Compare the simulated average IHC receptor potentials across CF for tone stimuli presented at levels between 0 and 90 dB SPL.
**You can change the `f_tone` variable to have tone stimuli of different frequencies, say 500Hz, 1kHz, 2kHz, etc..**
```
time_elapsed = time()
# Define the IHC model hyperparameters
context_left = 256
context_right = 256
Nenc = 3 # number of layers in the encoder - check for the input size
# Load the 201-channel IHC model to simulate for all CFs
N_cf = 201
ihc = load_connear_model(modeldir,json_name="/ihc.json",weights_name="/ihc.h5",name="ihc_model")
ihc.summary()
# Define the pure tone stimulus
f_tone = 1e3 # frequency of the pure tone
L = np.arange(0., 91., 10.) # levels from 0 to 90dB SPL
stim_dur = 102.4e-3 # duration of the stimulus - 102.4 ms correspond to 2048 samples (fs_connear= 20 kHz)
silence_left = 12.8e-3 # silence before the onset of the stimulus - 12.8 ms correspond to 256 samples (context)
silence_right = 12.8e-3 # silence after the stimulus - 256 samples
win_dur = 5.0e-3 # 5ms long hanning window for gradual onset
# make stimulus
t = np.arange(0., stim_dur, 1./fs_connear)
stim_sin = np.sin(2 * np.pi * f_tone * t) # generate the pure tone
# apply hanning window
winlength = int(2*win_dur * fs_connear)
win = sp_sig.windows.hann(winlength) # double-sided hanning window
stim_sin[:int(winlength/2)] = stim_sin[:int(winlength/2)] * win[:int(winlength/2)]
stim_sin[-int(winlength/2):] = stim_sin[-int(winlength/2):] * win[int(winlength/2):]
total_length = int(silence_left * fs_connear) + len(stim_sin) + int(silence_right * fs_connear)
stim = np.zeros((len(L), total_length))
stimrange = range(int(silence_left * fs_connear), int(silence_left * fs_connear) + len(stim_sin))
for i in range(len(L)):
stim[i, stimrange] = p0 * 10**(L[i]/20) * stim_sin / rms(stim_sin) # calibrate
############ CoNNear cochlea ########
stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# check the time dimension size
if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate the cochlear output
tl_connear = cochlea.predict(stim, verbose=1)
tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
############ Verhulstetal ############
tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
ihc_target = np.zeros(tl_target.shape)
for i in range (tl_target.shape[0]):
ihc_target[i,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[i,:,:]*magic_constant,fs_tl) # IHC output
ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
# remove context from the corresponding output
ihc_target = ihc_target[:, context_left:-context_right,:]
# compute the mean across CF for each level
ihc_target_mean = np.mean(ihc_target,axis=1)
############ CoNNear #################
# check the time dimension size
if tl_connear.shape[1] % 2**Nenc: # input size needs to be a multiple of 8
Npad = int(np.ceil(tl_connear.shape[1]/(2**Nenc)))*(2**Nenc)-tl_connear.shape[1]
tl_connear = np.pad(tl_connear,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate
ihc_connear = ihc.predict(tl_connear)
ihc_connear = ihc_connear / ihc_scaling # scaling for the IHC output
# compute the mean across CF for each level
ihc_connear_mean = np.mean(ihc_connear,axis=1)
############ Plots ###################
CF_rep=np.tile(CF_connear, (len(L),1))
# Plot the mean Vihc patterns for the reference model
plt.semilogx(CF_rep.T/1e3, 1e3*ihc_target_mean.T)
plt.xlim(0.25,8.), plt.grid(which='both'),
plt.xticks(ticks=(0.25, 0.5, 1., 2., 4., 8.) , labels=(0.25, 0.5, 1., 2., 4., 8.))
#plt.ylim(-59, -49.5)
plt.xlabel('CF (kHz)')
plt.ylabel('Mean of V_m (mV)')
plt.title('IHC Target')
plt.legend(L.astype(int), frameon=False)
plt.show()
# Plot the mean Vihc patterns for the CoNNear model
plt.semilogx(CF_rep.T/1e3, 1e3*ihc_connear_mean.T)
plt.xlim(0.25,8.), plt.grid(which='both'),
plt.xticks(ticks=(0.25, 0.5, 1., 2., 4., 8.) , labels=(0.25, 0.5, 1., 2., 4., 8.))
#plt.ylim(-59, -49.5)
plt.xlabel('CF (kHz)')
plt.ylabel('Mean of V_m (mV)')
plt.title('CoNNear Predicted')
plt.legend(L.astype(int), frameon=False)
plt.show()
del ihc # remove the connear model variable to free-up some memory
```
### IHC AC-DC ratio
Compare the ratio of the AC and DC components of the IHC responses across CF.
```
# Define the IHC model hyperparameters
context_left = 256
context_right = 256
Nenc = 3 # number of layers in the encoder - check for the input size
# Load the 1-channel IHC model for this section - the results are computed for individual CFs
N_cf = 1
ihc = load_connear_model(modeldir,json_name="/ihc_1cf.json",weights_name="/ihc.h5",name="ihc_model")
#ihc.summary()
# Define the pure tone stimuli
N = 10 # number of frequencies to simulate between 0.15 and 8 kHz
f_tones = np.logspace(np.log10(150),np.log10(8000),num=N) # pick N frequencies in logarithmic spacing
# match the tone frequencies to the corresponding CFs
for j, f_tone in enumerate(f_tones):
fno, _ = min(enumerate(CF_connear), key=lambda x: abs( x [1]- f_tone))
f_tones[j] = CF_connear[int(fno)]
L = [80.] # 80 dB SPL
stim_dur = 80e-3 # duration of the stimulus
silence_left = 35.2e-3 # silence before the onset of the stimulus - 12.8 ms correspond to 256 samples (context)
silence_right = 12.8e-3 # silence after the stimulus - 256 samples
win_dur = 5.0e-3 # 5ms long ramp window for gradual onset
# indicate the time points of the response for computing the AC and DC components
t_ac_start = silence_left + 50e-3 # 50 ms after the stimulus onset
t_ac_dur = 20e-3 # 20 ms after t_ac_start
t_dc_start = 7.4e-3 # 7.4 ms after the left context (15 ms before the stimulus onset)
t_dc_dur = 10e-3 # 10 ms after t_dc_start
# region of the response for computing the AC component (50 - 70 ms after the stimulus onset)
ac_start = int(t_ac_start * fs_connear) - context_left
ac_end = ac_start + int(t_ac_dur * fs_connear)
ac_reg = np.arange(ac_start,ac_end,1)
# region of the response for computing the DC component (5 - 15 ms before the stimulus onset)
dc_start = int(t_dc_start * fs_connear)
dc_end = dc_start + int(t_dc_dur * fs_connear)
dc_reg = np.arange(dc_start,dc_end,1)
# make stimuli
t = np.arange(0., stim_dur, 1./fs_connear)
winlength = int(2*win_dur * fs_connear)
win = sp_sig.windows.bartlett(winlength) # double-sided ramp
total_length = int(silence_left * fs_connear) + len(t) + int(silence_right * fs_connear)
stim = np.zeros((len(f_tones), total_length))
for j, f_tone in enumerate(f_tones):
stim_sin = np.sin(2 * np.pi * f_tone * t) # generate the pure tone
# apply ramp window
stim_sin[:int(winlength/2)] = stim_sin[:int(winlength/2)] * win[:int(winlength/2)]
stim_sin[-int(winlength/2):] = stim_sin[-int(winlength/2):] * win[int(winlength/2):]
stimrange = range(int(silence_left * fs_connear), int(silence_left * fs_connear) + len(stim_sin))
stim[j, stimrange] = p0 * 10**(L[0]/20) * stim_sin / rms(stim_sin) # calibrate
############ CoNNear cochlea ########
stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# check the time dimension size
if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate the cochlear output
tl_connear = cochlea.predict(stim, verbose=1)
tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
############ Verhulstetal ############
tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
# define a temporary array for keeping one channel
tl_connear_no = np.zeros((tl_connear.shape[0],tl_connear.shape[1],N_cf))
# define the ihc output array accordingly
ihc_target = np.zeros((tl_target.shape[0],tl_target.shape[1],N_cf))
for j, f_tone in enumerate(f_tones):
# find the CF closest to the stimulus frequency
No, _ = min(enumerate(CF_connear), key=lambda x: abs(x[1] - f_tone))
No = int(No)
# keep the outputs only for the specific CF
tl_connear_no[j,:,:] = tl_connear[j,:,[No]].T # transpose to get the same size
ihc_target[j,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[j,:,[No]].T*magic_constant,fs_tl) # IHC output
ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
# keep the 1-channel output
tl_connear = tl_connear_no
# remove context from the corresponding output
ihc_target = ihc_target[:, context_left:-context_right,:]
# compute the AC component for each frequency
acm_target = (np.max(ihc_target[:,ac_reg],axis=1)-np.min(ihc_target[:,ac_reg],axis=1))/2
ac_target = acm_target/np.sqrt(2.)
# compute the DC component for each frequency
dcm_target = (np.max(ihc_target[:,dc_reg],axis=1)-np.min(ihc_target[:,dc_reg],axis=1))/2
dc_target = (np.min(ihc_target[:,ac_reg],axis=1)+acm_target)-(np.min(ihc_target[:,dc_reg],axis=1)+dcm_target)
############ CoNNear #################
# check the time dimension size
if tl_connear.shape[1] % 2**Nenc: # input size needs to be a multiple of 8
Npad = int(np.ceil(tl_connear.shape[1]/(2**Nenc)))*(2**Nenc)-tl_connear.shape[1]
tl_connear = np.pad(tl_connear,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate
ihc_connear = ihc.predict(tl_connear, verbose=1)
ihc_connear = ihc_connear / ihc_scaling # scaling for the IHC output
# compute the AC component for each frequency
acm_connear = (np.max(ihc_connear[:,ac_reg],axis=1)-np.min(ihc_connear[:,ac_reg],axis=1))/2
ac_connear = acm_connear/np.sqrt(2.)
# compute the DC component for each frequency
dcm_connear = (np.max(ihc_connear[:,dc_reg],axis=1)-np.min(ihc_connear[:,dc_reg],axis=1))/2
dc_connear = (np.min(ihc_connear[:,ac_reg],axis=1)+acm_connear)-(np.min(ihc_connear[:,dc_reg],axis=1)+dcm_connear)
# Plot an example of how the AC and DC components are computed from the IHC response
fi = 500 # find closest tone to 500 Hz
i, _ = min(enumerate(f_tones), key=lambda x: abs(x[1] - fi))
i = int(i)
t_ds = np.arange(0., ihc_target.shape[1]/fs_connear, 1./fs_connear)
plt.plot(1e3*t_ds,ihc_target[i,:])
plt.plot(1e3*t_ds[dc_reg],ihc_target[i,dc_reg])
plt.plot(1e3*t_ds[ac_reg],ihc_target[i,ac_reg])
plt.xlabel('Time (ms)')
plt.ylabel('IHC receptor potential (V)')
plt.title('Pure-tone - ' + str(np.around(f_tones[i],decimals=2)) + ' Hz')
plt.legend(['IHC response','DC component','AC component'], frameon=False)
plt.show()
############ Plots ###################
# Plot the logarithmic decrease of the AC/DC ratio across frequency
plt.loglog(f_tones/1e3, abs(ac_target/dc_target),'o-')
plt.loglog(f_tones/1e3, abs(ac_connear/dc_connear),'o-')
plt.xlim(0.1,10.), plt.grid(which='both'),
plt.xticks(ticks=(0.1,1,10) , labels=(0.1,1,10))
plt.xlabel('CF (kHz)')
plt.ylabel('AC component / DC component')
plt.title('AC-DC ratio')
plt.legend(['IHC Target','CoNNear Predicted'], frameon=False)
plt.show()
del ihc # remove the connear model variable to free-up some memory
```
### IHC level growth
Compare the growth of the half-wave rectified IHC receptor potential as a function of sound level.
```
# Define the IHC model hyperparameters
context_left = 256
context_right = 256
Nenc = 3 # number of layers in the encoder - check for the input size
# Load the 1-channel IHC model
N_cf = 1
ihc = load_connear_model(modeldir,json_name="/ihc_1cf.json",weights_name="/ihc.h5",name="ihc_model")
#ihc.summary()
# Define the pure tone stimuli
f_tone = 4e3 # frequency of the pure tone
# match the tone frequency to the corresponding CF
fno, _ = min(enumerate(CF_connear), key=lambda x: abs( x [1]- f_tone))
f_tone = CF_connear[int(fno)]
L = np.arange(0.,101.,10.) # levels from 0 to 100dB SPL
stim_dur = 80e-3 # duration of the stimulus
silence_left = 35.2e-3 # silence before the onset of the stimulus - 12.8 ms correspond to 256 samples (context)
silence_right = 12.8e-3 # silence after the stimulus - 256 samples
win_dur = 5.0e-3 # 5ms long ramp window for gradual onset
# make stimulus
t = np.arange(0., stim_dur, 1./fs_connear)
stim_sin = np.sin(2 * np.pi * f_tone * t) # generate the pure tone
# apply hanning window
winlength = int(2*win_dur * fs_connear)
win = sp_sig.windows.bartlett(winlength) # double-sided ramp
stim_sin[:int(winlength/2)] = stim_sin[:int(winlength/2)] * win[:int(winlength/2)]
stim_sin[-int(winlength/2):] = stim_sin[-int(winlength/2):] * win[int(winlength/2):]
total_length = int(silence_left * fs_connear) + len(stim_sin) + int(silence_right * fs_connear)
stim = np.zeros((len(L), total_length))
stimrange = range(int(silence_left * fs_connear), int(silence_left * fs_connear) + len(stim_sin))
for i in range(len(L)):
stim[i, stimrange] = p0 * 10**(L[i]/20) * stim_sin / rms(stim_sin) # calibrate
############ CoNNear cochlea ########
stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# check the time dimension size
if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate the cochlear output
tl_connear = cochlea.predict(stim, verbose=1)
tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
# find the CF closest to the stimulus frequency
No, _ = min(enumerate(CF_connear), key=lambda x: abs(x[1] - f_tone))
No = int(No)
# keep the outputs only for the specific CF
tl_connear = tl_connear[:, :, [No]]
tl_target = tl_target[:, :, [No]]
############ Verhulstetal ############
tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
ihc_target = np.zeros(tl_target.shape)
for i in range (tl_target.shape[0]):
ihc_target[i,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[i,:,:]*magic_constant,fs_tl) # IHC output
ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
# remove context from the corresponding output
ihc_target = ihc_target[:, context_left:-context_right,:]
dcm_target = np.mean(ihc_target[:,dc_reg],axis=1) # compute the DC component
vihc_target = np.zeros((ihc_target.shape))
for i in range(len(L)):
vihc_target[i,:] = ihc_target[i,:] - dcm_target[i] #half-wave rectify the response by substracting the DC component
vihc_target_rms = rms(vihc_target,axis=1)
############ CoNNear #################
# check the time dimension size
if tl_connear.shape[1] % 2**Nenc: # input size needs to be a multiple of 8
Npad = int(np.ceil(tl_connear.shape[1]/(2**Nenc)))*(2**Nenc)-tl_connear.shape[1]
tl_connear = np.pad(tl_connear,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate
ihc_connear = ihc.predict(tl_connear, verbose=1)
ihc_connear = ihc_connear / ihc_scaling # scaling for the IHC output
dcm_connear = np.mean(ihc_connear[:,dc_reg],axis=1) # compute the DC component
vihc_connear = np.zeros((ihc_connear.shape))
for i in range(len(L)):
vihc_connear[i,:] = ihc_connear[i,:] - dcm_connear[i] #half-wave rectify the response by substracting the DC component
vihc_connear_rms = rms(vihc_connear,axis=1)
############ Plots ###################
# Plot the RMS of the half-wave rectified response across level
plt.plot(L, 1e3*vihc_target_rms,'o-')
plt.plot(L, 1e3*vihc_connear_rms,'o-')
plt.grid(which='both'),
plt.yticks(ticks=np.arange(0.,10.,1.))
plt.xlabel('Stimulus level (dB-SPL)')
plt.ylabel('Half-wave rectified $\mathregular{V_{IHC}}$ (mV)')
plt.title('RMS of half-wave rectified $\mathregular{V_{IHC}}$')
plt.legend(['IHC Target','CoNNear Predicted'], frameon=False)
plt.show()
del ihc # remove the connear model variable to free-up some memory
```
### Speech Input
Here, a sentence from the TIMIT dataset is read from a wavfile and is used as input to the reference model and the CoNNear IHC model. The `frame_dur` parameter is used to define a shorter segment of the sentence.
```
# # Define the IHC model hyperparameters
# context_left = 256
# context_right = 256
# Nenc = 3 # number of layers in the encoder - check for the input size
# # Load the 201-channel IHC model to simulate for all CFs
# N_cf = 201
# ihc = load_connear_model(modeldir,json_name="/ihc.json",weights_name="/ihc.h5",name="ihc_model")
# #ihc.summary()
# #load the wavfile
# wavfile = 'sx228.wav'
# L = [70.] #sound-level of 70 dB SPL
# frame_dur = 150e-3 #define segment duration
# onset_dur = 125e-3 # omit initial silence of the wavfile
# signal, fs_signal = wavfile_read(wavfile, fs_connear) # fs_tl as an argument resamples the signal to the given sampling frequency
# stim_full = np.zeros((len(L), signal.size))
# for j in range(len(L)):
# stim_full[j,:] = p0 * 10**(L[j]/20) * signal / rms(signal) # calibrate
# stim_length = int(fs_tl * frame_dur + context_left + context_right) # define the segment length (including context)
# stim = stim_full[:,int(fs_connear*onset_dur):int(fs_connear*onset_dur)+stim_length] # keep the segment
# total_length = stim.shape[1]
# ############ CoNNear cochlea ########
# stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# # check the time dimension size
# if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
# Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
# stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# # simulate the cochlear output
# tl_connear = cochlea.predict(stim, verbose=1)
# tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
# ############ Verhulstetal ############
# tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
# ihc_target = np.zeros(tl_target.shape)
# for i in range (tl_target.shape[0]):
# ihc_target[i,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[i,:,:]*magic_constant,fs_tl) # IHC output
# ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
# # remove context from the corresponding output
# ihc_target = ihc_target[:, context_left:-context_right,:]
# ############ CoNNear #################
# # check the time dimension size
# if tl_connear.shape[1] % 2**Nenc:# input size needs to be a multiple of 8
# Npad = int(np.ceil(tl_connear.shape[1]/(2**Nenc)))*(2**Nenc)-tl_connear.shape[1]
# tl_connear = np.pad(tl_connear,((0,0),(0,Npad),(0,0))) # zero-pad
# # simulate
# ihc_connear = ihc.predict(tl_connear)
# ihc_connear = ihc_connear / ihc_scaling # scaling for the IHC output
# ################ Plots ######################################
# stim = stim[:,:,0] # make stim 2D again
# fig, axarr = plt.subplots(3)
# #axarr[0].set_ylim(-0.35, 0.35)
# axarr[0].plot(stim[0,context_left:-context_right].T)
# axarr[0].set_title('Audio Input')
# cax1 = axarr[1].imshow(ihc_target[0,:,:].T, cmap='bwr',aspect='auto', vmin=-0.1, vmax=0)
# axarr[1].set_title('Output of reference IHC model')
# axarr[1].set(ylabel='CF')
# cax2 = axarr[2].imshow(ihc_connear[0,:,:].T, cmap='bwr',aspect='auto', vmin=-0.1, vmax=0)
# axarr[2].set_title('Output of CoNNear IHC model')
# axarr[2].set(ylabel='CF')
# plt.show()
# del ihc # remove the connear model variable to free-up some memory
# if 'time_elapsed' in locals() or 'time_elapsed' in globals():
# print("IHC stage - Time elapsed: {:.{}f} mins".format((time() - time_elapsed)/60, 2 ))
```
## ANF stage
The input dimensions for the CoNNear models are (b x L x Ncf), where b is the batch-size (for loading multiple stimuli simultaneously), L is the input length (including the context) and Ncf are the frequency channels.
The anfX.json file can be loaded for the full-channel model (201 channels) or the anfX_1cf.json file for the 1-channel model, where X corresponds to the AN fiber type (h for HSR, m for MSR or l for LSR ANF model). At the first block of each individual section, the necessary model is loaded and the rest of the parameters are defined.
### ANF firing rates
Compare the simulated ANF firing rates across time for tone stimuli presented at 70 dB SPL.
**You can change the `f_tone` variable to have tone stimuli of different frequencies, the `f_m` variable for generating amplitude-modulated tones or the `L` variable for having different levels.**
```
time_elapsed = time()
# Define the ANF model hyperparameters
context_left = 7936 # longer left-sided context for the ANF models
context_right = 256
Nenc = 14 # number of layers in the encoder - check for the input size
# Load the 1-channel ANF models
N_cf = 1
anfh = load_connear_model(modeldir,json_name="/anfh_1cf.json",weights_name="/anfh.h5",name="anfh_model")
anfh.summary()
anfm = load_connear_model(modeldir,json_name="/anfm_1cf.json",weights_name="/anfm.h5",name="anfm_model")
anfm.summary()
anfl = load_connear_model(modeldir,json_name="/anfl_1cf.json",weights_name="/anfl.h5",name="anfl_model")
anfl.summary()
# Define the pure tone stimulus
f_tone = 4e3 # frequency of the pure tone
L = [70.] # stimulus level
m = 1 # modulation percentage
f_m = 100 # modulation frequency - leave empty for pure-tone stimulus (no modulation applied)
#f_m = [] # uncomment for pure-tone stimulus
stim_dur = 400e-3 # duration of the stimulus - 409.6 ms correspond to 8192 samples (fs_connear= 20 kHz)
silence_left = 396.8e-3 + 5e-3 # silence before the onset of the stimulus - 396.8 ms correspond to 7936 samples (left context)
silence_right = 12.8e-3 # silence after the stimulus - 256 samples (right context)
win_dur = 7.8e-3 # 5ms long ramp window for gradual onset
# match the tone frequency to the corresponding CF
fno, _ = min(enumerate(CF_connear), key=lambda x: abs( x [1]- f_tone))
f_tone = CF_connear[int(fno)]
# make stimulus
t = np.arange(0., stim_dur, 1./fs_connear)
if f_m: # if f_m is defined make a SAM tone
stim_sin = (1 + m * np.cos(2 * np.pi * f_m * t + np.pi)) * np.sin(2 * np.pi * f_tone * t) # generate the SAM tone
else:
stim_sin = np.sin(2 * np.pi * f_tone * t) # generate the pure tone
# apply ramp
winlength = int(2*win_dur * fs_connear)
win = sp_sig.windows.bartlett(winlength) # double-sided ramp window
stim_sin[:int(winlength/2)] = stim_sin[:int(winlength/2)] * win[:int(winlength/2)]
stim_sin[-int(winlength/2):] = stim_sin[-int(winlength/2):] * win[int(winlength/2):]
total_length = int(silence_left * fs_connear) + len(stim_sin) + int(silence_right * fs_connear)
stim = np.zeros((len(L), total_length))
stimrange = range(int(silence_left * fs_connear), int(silence_left * fs_connear) + len(stim_sin))
for i in range(len(L)):
stim[i, stimrange] = p0 * 10**(L[i]/20) * stim_sin / rms(stim_sin) # calibrate
############ CoNNear cochlea ########
stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# check the time dimension size
if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate the cochlear output
tl_connear = cochlea.predict(stim, verbose=1)
tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
# find the CF closest to the stimulus frequency
No, _ = min(enumerate(CF_connear), key=lambda x: abs(x[1] - f_tone))
No = int(No)
# keep the outputs only for the specific CF
tl_connear = tl_connear[:, :, [No]]
tl_target = tl_target[:, :, [No]]
############ Verhulstetal ############
tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
ihc_target = np.zeros(tl_target.shape)
anfh_target = np.zeros(tl_connear.shape)
anfm_target = np.zeros(tl_connear.shape)
anfl_target = np.zeros(tl_connear.shape)
for i in range (tl_target.shape[0]):
ihc_target[i,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[i,:,:]*magic_constant,fs_tl) # IHC output
ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
for i in range (tl_target.shape[0]):
anfh_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,2) * fs_connear # ANF HSR output
anfm_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,1) * fs_connear # ANF MSR output
anfl_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,0) * fs_connear # ANF LSR output
# remove context from the corresponding outputs (and the last dimension)
anfh_target = anfh_target[:, context_left:-context_right,0]
anfm_target = anfm_target[:, context_left:-context_right,0]
anfl_target = anfl_target[:, context_left:-context_right,0]
# apply proper scaling for feeding to the CoNNear
ihc_target = ihc_target * ihc_scaling # scaling for the cochlear input
############ CoNNear #################
# check the time dimension size
if ihc_target.shape[1] % 2**Nenc: # input size needs to be a multiple of 16384
Npad = int(np.ceil(ihc_target.shape[1]/(2**Nenc)))*(2**Nenc)-ihc_target.shape[1]
ihc_target = np.pad(ihc_target,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate
anfh_connear = anfh.predict(ihc_target, verbose=1)
anfm_connear = anfm.predict(ihc_target, verbose=1)
anfl_connear = anfl.predict(ihc_target, verbose=1)
# remove last dimension
anfh_connear = anfh_connear[:, :, 0]
anfm_connear = anfm_connear[:, :, 0]
anfl_connear = anfl_connear[:, :, 0]
# scale back to the original ANF values
anfh_connear = anfh_connear / an_scaling
anfm_connear = anfm_connear / an_scaling
anfl_connear = anfl_connear / an_scaling
# crop the connear time dimensions to match the size of the reference responses
anfh_connear = anfh_connear[:,:anfh_target.shape[1]]
anfm_connear = anfm_connear[:,:anfm_target.shape[1]]
anfl_connear = anfl_connear[:,:anfl_target.shape[1]]
############ Plots ###################
t_ds = np.arange(0., anfh_target.shape[1]/fs_connear, 1./fs_connear) # time vector of the (downsampled) responses
# Plot the firing rate patterns for the reference model
plt.plot(1e3*t_ds,anfh_target.T,'r')
plt.plot(1e3*t_ds,anfm_target.T,'b')
plt.plot(1e3*t_ds,anfl_target.T,'c')
plt.xlim(0,80.), plt.grid(which='both'),
plt.xlabel('Time (ms)')
plt.ylabel('Firing rate (spikes/s)')
plt.title('ANF Target')
plt.legend(['HSR','MSR','LSR'], frameon=False)
plt.show()
# Plot the firing rate patterns for the CoNNear model
plt.plot(1e3*t_ds,anfh_connear.T,'r')
plt.plot(1e3*t_ds,anfm_connear.T,'b')
plt.plot(1e3*t_ds,anfl_connear.T,'c')
plt.xlim(0,80.), plt.grid(which='both'),
plt.xlabel('Time (ms)')
plt.ylabel('Firing rate (spikes/s)')
plt.title('CoNNear Predicted')
plt.legend(['HSR','MSR','LSR'], frameon=False)
plt.show()
del anfh, anfm, anfl # remove the connear models variables to free-up some memory
```
### AN rate-level curves
Compare the simulated AN rate-level curves for the three fiber models.
**You can change the `f_tone` variable to get the curves of 1 kHz or 4 kHz.**
```
# Define the ANF model hyperparameters
context_left = 7936
context_right = 256
Nenc = 14 # number of layers in the encoder - check for the input size
# Load the 1-channel ANF models
N_cf = 1
anfh = load_connear_model(modeldir,json_name="/anfh_1cf.json",weights_name="/anfh.h5",name="anfh_model")
#anfh.summary()
anfm = load_connear_model(modeldir,json_name="/anfm_1cf.json",weights_name="/anfm.h5",name="anfm_model")
#anfm.summary()
anfl = load_connear_model(modeldir,json_name="/anfl_1cf.json",weights_name="/anfl.h5",name="anfl_model")
#anfl.summary()
# Define the pure tone stimulus
f_tone = 4e3 # frequency of the pure tone
L = np.arange(0.,101.,10.) # levels from 0 to 100 dB SPL
stim_dur = 50e-3 # duration of the stimulus - 409.6 ms correspond to 8192 samples (fs_connear= 20 kHz)
silence_left = 396.8e-3 + 5e-3 # silence before the onset of the stimulus - 396.8 ms correspond to 7936 samples (left context)
silence_right = 12.8e-3 # silence after the stimulus - 256 samples (right context)
win_dur = 2.5e-3 # 2.5ms long ramp window for gradual onset
# match the tone frequency to the corresponding CF
fno, _ = min(enumerate(CF_connear), key=lambda x: abs( x [1]- f_tone))
f_tone = CF_connear[int(fno)]
# indicate the region of the response for computing the mean - 10-40 ms after the stimulus onset
index_start = int((silence_left + 10e-3) * fs_connear) - context_left # omit silence + 10 ms (15ms = 300 samples)
index_end = index_start + int(30e-3 * fs_connear) # keep 30 ms of response after (30ms = 600 samples)
stim_reg = np.arange(index_start,index_end,1) # stimulus region
# make stimulus
t = np.arange(0., stim_dur, 1./fs_connear)
stim_sin = np.sin(2 * np.pi * f_tone * t) # generate the pure tone
# apply ramp
winlength = int(2*win_dur * fs_connear)
win = sp_sig.windows.bartlett(winlength) # double-sided ramp window
stim_sin[:int(winlength/2)] = stim_sin[:int(winlength/2)] * win[:int(winlength/2)]
stim_sin[-int(winlength/2):] = stim_sin[-int(winlength/2):] * win[int(winlength/2):]
total_length = int(silence_left * fs_connear) + len(stim_sin) + int(silence_right * fs_connear)
stim = np.zeros((len(L), total_length))
stimrange = range(int(silence_left * fs_connear), int(silence_left * fs_connear) + len(stim_sin))
for i in range(len(L)):
stim[i, stimrange] = p0 * 10**(L[i]/20) * stim_sin / rms(stim_sin) # calibrate
############ CoNNear cochlea ########
stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# check the time dimension size
if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate the cochlear output
tl_connear = cochlea.predict(stim, verbose=1)
tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
# find the CF closest to the stimulus frequency
No, _ = min(enumerate(CF_connear), key=lambda x: abs(x[1] - f_tone))
No = int(No)
# keep the outputs only for the specific CF
tl_connear = tl_connear[:, :, [No]]
tl_target = tl_target[:, :, [No]]
############ Verhulstetal ############
tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
ihc_target = np.zeros(tl_target.shape)
anfh_target = np.zeros(tl_connear.shape)
anfm_target = np.zeros(tl_connear.shape)
anfl_target = np.zeros(tl_connear.shape)
for i in range (tl_target.shape[0]):
ihc_target[i,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[i,:,:]*magic_constant,fs_tl) # IHC output
ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
for i in range (tl_target.shape[0]):
anfh_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,2) * fs_connear # ANF HSR output
anfm_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,1) * fs_connear # ANF MSR output
anfl_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,0) * fs_connear # ANF LSR output
# remove context from the corresponding outputs (and the last dimension)
anfh_target = anfh_target[:, context_left:-context_right,0]
anfm_target = anfm_target[:, context_left:-context_right,0]
anfl_target = anfl_target[:, context_left:-context_right,0]
# apply proper scaling for feeding to the CoNNear
ihc_target = ihc_target * ihc_scaling # scaling for the cochlear input
# compute the mean firing rate over the stimulus region
anfh_target_mean = np.mean(anfh_target[:,stim_reg],axis=1)
anfm_target_mean = np.mean(anfm_target[:,stim_reg],axis=1)
anfl_target_mean = np.mean(anfl_target[:,stim_reg],axis=1)
############ CoNNear #################
# check the time dimension size
if ihc_target.shape[1] % 2**Nenc: # input size needs to be a multiple of 16384
Npad = int(np.ceil(ihc_target.shape[1]/(2**Nenc)))*(2**Nenc)-ihc_target.shape[1]
ihc_target = np.pad(ihc_target,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate
anfh_connear = anfh.predict(ihc_target, verbose=1)
anfm_connear = anfm.predict(ihc_target, verbose=1)
anfl_connear = anfl.predict(ihc_target, verbose=1)
# remove last dimension
anfh_connear = anfh_connear[:, :, 0]
anfm_connear = anfm_connear[:, :, 0]
anfl_connear = anfl_connear[:, :, 0]
# scale back to the original ANF values
anfh_connear = anfh_connear / an_scaling
anfm_connear = anfm_connear / an_scaling
anfl_connear = anfl_connear / an_scaling
# compute the mean firing rate over the stimulus region
anfh_connear_mean = np.mean(anfh_connear[:,stim_reg],axis=1)
anfm_connear_mean = np.mean(anfm_connear[:,stim_reg],axis=1)
anfl_connear_mean = np.mean(anfl_connear[:,stim_reg],axis=1)
############ Plots ###################
# Plot the rate-level curves for the reference model
plt.plot(anfh_target_mean.T,'ro-')
plt.plot(anfm_target_mean.T,'bo-')
plt.plot(anfl_target_mean.T,'co-')
plt.xlim(0,10), plt.grid(which='both'),
plt.xticks(ticks=L/10 , labels=L.astype(int))
plt.xlabel('Stimulus level (dB SPL)')
plt.ylabel('Firing rate (spikes/s)')
plt.title('ANF Target')
plt.legend(['HSR','MSR','LSR'], frameon=False)
plt.show()
# Plot the rate-level curves for the CoNNear model
plt.plot(anfh_connear_mean.T,'ro-')
plt.plot(anfm_connear_mean.T,'bo-')
plt.plot(anfl_connear_mean.T,'co-')
plt.xlim(0,10), plt.grid(which='both'),
plt.xticks(ticks=L/10 , labels=L.astype(int))
plt.xlabel('Stimulus level (dB SPL)')
plt.ylabel('Firing rate (spikes/s)')
plt.title('CoNNear Predicted')
plt.legend(['HSR','MSR','LSR'], frameon=False)
plt.show()
del anfh, anfm, anfl # remove the connear models variables to free-up some memory
```
### ANF synchrony-level functions
Compare the simulated ANF synchrony-level functions for the three fiber models.
**You can change the `f_tone` variable to get the curves of 1 kHz or 4 kHz.**
```
# Define the ANF model hyperparameters
context_left = 7936
context_right = 256
Nenc = 14 # number of layers in the encoder - check for the input size
# Load the 1-channel ANF models
N_cf = 1
anfh = load_connear_model(modeldir,json_name="/anfh_1cf.json",weights_name="/anfh.h5",name="anfh_model")
#anfh.summary()
anfm = load_connear_model(modeldir,json_name="/anfm_1cf.json",weights_name="/anfm.h5",name="anfm_model")
#anfm.summary()
anfl = load_connear_model(modeldir,json_name="/anfl_1cf.json",weights_name="/anfl.h5",name="anfl_model")
#anfl.summary()
# Define the pure tone stimulus
f_tone = 4e3 # frequency of the pure tone
L = np.arange(0.,101.,10.) # levels from 0 to 100 dB SPL with a step of 10 - in the paper the step is 5
f_m = 100 # modulation frequency
m = 1 # modulation percentage
stim_dur = 400e-3 # duration of the stimulus - 409.6 ms correspond to 8192 samples (fs_connear= 20 kHz)
silence_left = 396.8e-3 + 5e-3 # silence before the onset of the stimulus - 396.8 ms correspond to 7936 samples (left context)
silence_right = 12.8e-3 # silence after the stimulus - 256 samples (right context)
win_dur = 7.8e-3 # 7.8ms long ramp window for gradual onset
# match the tone frequency to the corresponding CF
fno, _ = min(enumerate(CF_connear), key=lambda x: abs( x [1]- f_tone))
f_tone = CF_connear[int(fno)]
# indicate the region of interest for computing the synchrony
index_start = int(silence_left * fs_connear) - context_left # omit silence (5ms = 100 samples)
index_end = index_start + int(stim_dur * fs_connear) # keep for the stimulus duration (400ms = 8000 samples)
stim_reg = np.arange(index_start,index_end,1) # stimulus region
# make stimulus
t = np.arange(0., stim_dur, 1./fs_connear)
stim_sin = (1 + m * np.cos(2 * np.pi * f_m * t + np.pi)) * np.sin(2 * np.pi * f_tone * t) # generate the SAM tone
# apply ramp
winlength = int(2*win_dur * fs_connear)
win = sp_sig.windows.bartlett(winlength) # double-sided ramp window
stim_sin[:int(winlength/2)] = stim_sin[:int(winlength/2)] * win[:int(winlength/2)]
stim_sin[-int(winlength/2):] = stim_sin[-int(winlength/2):] * win[int(winlength/2):]
total_length = int(silence_left * fs_connear) + len(stim_sin) + int(silence_right * fs_connear)
stim = np.zeros((len(L), total_length))
stimrange = range(int(silence_left * fs_connear), int(silence_left * fs_connear) + len(stim_sin))
for i in range(len(L)):
stim[i, stimrange] = p0 * 10**(L[i]/20) * stim_sin / rms(stim_sin) # calibrate
############ CoNNear cochlea ########
stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# check the time dimension size
if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate the cochlear output
tl_connear = cochlea.predict(stim, verbose=1)
tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
# find the CF closest to the stimulus frequency
No, _ = min(enumerate(CF_connear), key=lambda x: abs(x[1] - f_tone))
No = int(No)
# keep the outputs only for the specific CF
tl_connear = tl_connear[:, :, [No]]
tl_target = tl_target[:, :, [No]]
############ Verhulstetal ############
tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
ihc_target = np.zeros(tl_target.shape)
anfh_target = np.zeros(tl_connear.shape)
anfm_target = np.zeros(tl_connear.shape)
anfl_target = np.zeros(tl_connear.shape)
for i in range (tl_target.shape[0]):
ihc_target[i,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[i,:,:]*magic_constant,fs_tl) # IHC output
ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
for i in range (tl_target.shape[0]):
anfh_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,2) * fs_connear # ANF HSR output
anfm_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,1) * fs_connear # ANF MSR output
anfl_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,0) * fs_connear # ANF LSR output
# remove context from the corresponding outputs (and the last dimension)
anfh_target = anfh_target[:, context_left:-context_right,0]
anfm_target = anfm_target[:, context_left:-context_right,0]
anfl_target = anfl_target[:, context_left:-context_right,0]
# apply proper scaling for feeding to the CoNNear
ihc_target = ihc_target * ihc_scaling # scaling for the cochlear input
# compute the fft of the response
N = stim_reg.size
anfh_target_fft = (1/N)*(np.abs(np.fft.fft(anfh_target[:,stim_reg])))
anfm_target_fft = (1/N)*(np.abs(np.fft.fft(anfm_target[:,stim_reg])))
anfl_target_fft = (1/N)*(np.abs(np.fft.fft(anfl_target[:,stim_reg])))
# divide the modulation frequency component of the Fourier transform by the DC component (1st bin) to get the vector strengh
anfh_target_vs = anfh_target_fft[:,int(np.ceil(f_m/fs_connear*N))] / anfh_target_fft[:,0]
anfm_target_vs = anfm_target_fft[:,int(np.ceil(f_m/fs_connear*N))] / anfm_target_fft[:,0]
anfl_target_vs = anfl_target_fft[:,int(np.ceil(f_m/fs_connear*N))] / anfl_target_fft[:,0]
############ CoNNear #################
# check the time dimension size
if ihc_target.shape[1] % 2**Nenc: # input size needs to be a multiple of 16384
Npad = int(np.ceil(ihc_target.shape[1]/(2**Nenc)))*(2**Nenc)-ihc_target.shape[1]
ihc_target = np.pad(ihc_target,((0,0),(0,Npad),(0,0))) # zero-pad
# simulate
anfh_connear = anfh.predict(ihc_target, verbose=1)
anfm_connear = anfm.predict(ihc_target, verbose=1)
anfl_connear = anfl.predict(ihc_target, verbose=1)
# remove last dimension
anfh_connear = anfh_connear[:, :, 0]
anfm_connear = anfm_connear[:, :, 0]
anfl_connear = anfl_connear[:, :, 0]
# scale back to the original ANF values
anfh_connear = anfh_connear / an_scaling
anfm_connear = anfm_connear / an_scaling
anfl_connear = anfl_connear / an_scaling
# compute the fft of the response
N = stim_reg.size
anfh_connear_fft = (1/N)*(np.abs(np.fft.fft(anfh_connear[:,stim_reg])))
anfm_connear_fft = (1/N)*(np.abs(np.fft.fft(anfm_connear[:,stim_reg])))
anfl_connear_fft = (1/N)*(np.abs(np.fft.fft(anfl_connear[:,stim_reg])))
# divide the modulation frequency component of the Fourier transform by the DC component (1st bin) to get the vector strengh
anfh_connear_vs = anfh_connear_fft[:,int(np.ceil(f_m/fs_connear*N))] / anfh_connear_fft[:,0]
anfm_connear_vs = anfm_connear_fft[:,int(np.ceil(f_m/fs_connear*N))] / anfm_connear_fft[:,0]
anfl_connear_vs = anfl_connear_fft[:,int(np.ceil(f_m/fs_connear*N))] / anfl_connear_fft[:,0]
############ Plots ###################
# Plot the synchrony-level functions for the reference model
plt.plot(anfh_target_vs.T,'ro-')
plt.plot(anfm_target_vs.T,'bo-')
plt.plot(anfl_target_vs.T,'co-')
plt.xlim(0,10), plt.grid(which='both'),
plt.ylim(0,1)
plt.xticks(ticks=L/10 , labels=L.astype(int))
plt.xlabel('Stimulus level (dB SPL)')
plt.ylabel('Synchrony to fm')
plt.title('ANF Target')
plt.legend(['HSR','MSR','LSR'], frameon=False)
plt.show()
# Plot the synchrony-level functions for the CoNNear model
plt.plot(anfh_connear_vs.T,'ro-')
plt.plot(anfm_connear_vs.T,'bo-')
plt.plot(anfl_connear_vs.T,'co-')
plt.xlim(0,10), plt.grid(which='both'),
plt.ylim(0,1)
plt.xticks(ticks=L/10 , labels=L.astype(int))
plt.xlabel('Stimulus level (dB SPL)')
plt.ylabel('Synchrony to fm')
plt.title('CoNNear Predicted')
plt.legend(['HSR','MSR','LSR'], frameon=False)
plt.show()
del anfh, anfm, anfl # remove the connear models variables to free-up some memory
```
### Speech Input
Here, a sentence from the TIMIT dataset is read from a wavfile and is used as input to the reference model and the CoNNear ANF models. The `frame_dur` parameter is used to define a shorter fragment of the sentence.
```
# # Define the ANF model hyperparameters
# context_left = 7936
# context_right = 256
# Nenc = 14 # number of layers in the encoder - check for the input size
# # Load the 201-channel IHC model to simulate for all CFs
# N_cf = 201
# anfh = load_connear_model(modeldir,json_name="/anfh.json",weights_name="/anfh.h5",name="anfh_model")
# #anfh.summary()
# anfm = load_connear_model(modeldir,json_name="/anfm.json",weights_name="/anfm.h5",name="anfm_model")
# #anfm.summary()
# anfl = load_connear_model(modeldir,json_name="/anfl.json",weights_name="/anfl.h5",name="anfl_model")
# #anfl.summary()
# #load the wavfile
# wavfile = 'sx228.wav'
# L = [70.] #sound-level of 70 dB SPL
# frame_dur = 409.6e-3 #define fragment duration
# onset_dur = 125e-3 # omit initial silence of the wavfile
# signal, fs_signal = wavfile_read(wavfile, fs_connear) # fs_tl as an argument resamples the signal to the given sampling frequency
# stim_full = np.zeros((len(L), signal.size))
# for j in range(len(L)):
# stim_full[j,:] = p0 * 10**(L[j]/20) * signal / rms(signal) # calibrate
# stim_length = int(fs_connear* frame_dur + context_left + context_right) # define the segment length (including context)
# stim = stim_full[:,int(fs_connear*onset_dur):int(fs_connear*onset_dur)+stim_length] # keep the segment
# total_length = stim.shape[1]
# ############ CoNNear cochlea ########
# stim = np.expand_dims(stim, axis=2) # make the stimulus 3D
# # check the time dimension size
# if stim.shape[1] % 2**Nenc_cochlea: # input size needs to be a multiple of 16
# Npad = int(np.ceil(stim.shape[1]/(2**Nenc_cochlea)))*(2**Nenc_cochlea)-stim.shape[1]
# stim = np.pad(stim,((0,0),(0,Npad),(0,0))) # zero-pad
# # simulate the cochlear output
# tl_connear = cochlea.predict(stim, verbose=1)
# tl_target = tl_connear / cochlea_scaling # scaling for feeding to the reference IHC model
# ############ Verhulstetal ############
# tl_target=sp_sig.resample_poly(tl_target, fs_tl, fs_connear, axis=1) # upsample to feed to the reference IHC model
# ihc_target = np.zeros(tl_target.shape)
# anfh_target = np.zeros(tl_connear.shape)
# anfm_target = np.zeros(tl_connear.shape)
# anfl_target = np.zeros(tl_connear.shape)
# for i in range (tl_target.shape[0]):
# ihc_target[i,:,:] = ihc_ref.inner_hair_cell_potential(tl_target[i,:,:]*magic_constant,fs_tl) # IHC output
# ihc_target=sp_sig.resample_poly(ihc_target, fs_connear, fs_tl, axis=1) # dowsample again to fs_connear
# for i in range (tl_target.shape[0]):
# anfh_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,2) * fs_connear # ANF HSR output
# anfm_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,1) * fs_connear # ANF MSR output
# anfl_target[i,:,:] = anf_ref.auditory_nerve_fiber(ihc_target[i,:,:],fs_connear,0) * fs_connear # ANF LSR output
# # remove context from the corresponding outputs
# anfh_target = anfh_target[:, context_left:-context_right,:]
# anfm_target = anfm_target[:, context_left:-context_right,:]
# anfl_target = anfl_target[:, context_left:-context_right,:]
# # apply proper scaling for feeding to the CoNNear
# ihc_target = ihc_target * ihc_scaling # scaling for the cochlear input
# ############ CoNNear #################
# # check the time dimension size
# if ihc_target.shape[1] % 2**Nenc:
# Npad = int(np.ceil(ihc_target.shape[1]/(2**Nenc)))*(2**Nenc)-ihc_target.shape[1]
# ihc_target = np.pad(ihc_target,((0,0),(0,Npad),(0,0))) # zero-pad
# # simulate
# anfh_connear = anfh.predict(ihc_target)
# anfm_connear = anfm.predict(ihc_target)
# anfl_connear = anfl.predict(ihc_target)
# # scale back to the original ANF values
# anfh_connear = anfh_connear / an_scaling
# anfm_connear = anfm_connear / an_scaling
# anfl_connear = anfl_connear / an_scaling
```
Here, the output of the HSR ANF models is plotted. Change the `anfh_target` and `anfh_connear` variables to get the outputs of the other two fibers (i.e. `anfm_target` and `anfm_connear` or `anfl_target` and `anfl_connear`).
```
# ################ Plots ######################################
# stim = stim[:,:,0] # make stim 2D again
# fig, axarr = plt.subplots(3)
# #axarr[0].set_ylim(-0.35, 0.35)
# axarr[0].plot(stim[0,context_left:-context_right].T)
# axarr[0].set_title('Audio Input')
# cax1 = axarr[1].imshow(anfh_target[0,:,:].T, cmap='bwr',aspect='auto', vmin=-0, vmax=1000)
# axarr[1].set_title('Output of reference ANF HSR model')
# axarr[1].set(ylabel='CF')
# cax2 = axarr[2].imshow(anfh_connear[0,:,:].T, cmap='bwr',aspect='auto', vmin=0, vmax=1000)
# axarr[2].set_title('Output of CoNNear ANfH model')
# axarr[2].set(ylabel='CF')
# plt.show()
# del anfh, anfm, anfl # remove the connear models variables to free-up some memory
# if 'time_elapsed' in locals() or 'time_elapsed' in globals():
# print("AN stage - Time elapsed: {:.{}f} mins".format((time() - time_elapsed)/60, 2 ))
```
| github_jupyter |
##### Copyright 2020 The OpenFermion Developers
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Hamiltonian Time Evolution and Expectation Value Computation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/openfermion/fqe/tutorials/hamiltonian_time_evolution_and_expectation_estimation"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/OpenFermion/blob/master/docs/fqe/tutorials/hamiltonian_time_evolution_and_expectation_estimation.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/OpenFermion/blob/master/docs/fqe/tutorials/hamiltonian_time_evolution_and_expectation_estimation.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/OpenFermion/docs/fqe/tutorials/hamiltonian_time_evolution_and_expectation_estimation.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
This tutorial describes the FQE's capabilities for Hamiltonian time-evolution and expectation value estimation
Where possible, LiH will be used as an example molecule for the API.
```
try:
import fqe
except ImportError:
!pip install fqe --quiet
Print = True
from openfermion import FermionOperator, MolecularData
from openfermion.utils import hermitian_conjugated
import numpy
import fqe
from fqe.unittest_data import build_lih_data, build_hamiltonian
numpy.set_printoptions(floatmode='fixed', precision=6, linewidth=80, suppress=True)
numpy.random.seed(seed=409)
h1e, h2e, wfn = build_lih_data.build_lih_data('energy')
lih_hamiltonian = fqe.get_restricted_hamiltonian(([h1e, h2e]))
lihwfn = fqe.Wavefunction([[4, 0, 6]])
lihwfn.set_wfn(strategy='from_data', raw_data={(4, 0): wfn})
if Print:
lihwfn.print_wfn()
```
## Application of one- and two-body fermionic gates
The API for time propogation can be invoked through the fqe namespace or the wavefunction object
```
# dummy geometry
from openfermion.chem.molecular_data import spinorb_from_spatial
from openfermion import jordan_wigner, get_sparse_operator, InteractionOperator, get_fermion_operator
h1s, h2s = spinorb_from_spatial(h1e, numpy.einsum("ijlk", -2 * h2e) * 0.5)
mol = InteractionOperator(0, h1s, h2s)
ham_fop = get_fermion_operator(mol)
ham_mat = get_sparse_operator(jordan_wigner(ham_fop)).toarray()
from scipy.linalg import expm
time = 0.01
evolved1 = lihwfn.time_evolve(time, lih_hamiltonian)
if Print:
evolved1.print_wfn()
evolved2 = fqe.time_evolve(lihwfn, time, lih_hamiltonian)
if Print:
evolved2.print_wfn()
assert numpy.isclose(fqe.vdot(evolved1, evolved2), 1)
cirq_wf = fqe.to_cirq_ncr(lihwfn)
evolve_cirq = expm(-1j * time * ham_mat) @ cirq_wf
test_evolve = fqe.from_cirq(evolve_cirq, thresh=1.0E-12)
assert numpy.isclose(fqe.vdot(test_evolve, evolved1), 1)
```
## Exact evolution implementation of quadratic Hamiltonians
Listed here are examples of evolving the special Hamiltonians.
Diagonal Hamiltonian evolution is supported.
```
wfn = fqe.Wavefunction([[4, 2, 4]])
wfn.set_wfn(strategy='random')
if Print:
wfn.print_wfn()
diagonal = FermionOperator('0^ 0', -2.0) + \
FermionOperator('1^ 1', -1.7) + \
FermionOperator('2^ 2', -0.7) + \
FermionOperator('3^ 3', -0.55) + \
FermionOperator('4^ 4', -0.1) + \
FermionOperator('5^ 5', -0.06) + \
FermionOperator('6^ 6', 0.5) + \
FermionOperator('7^ 7', 0.3)
if Print:
print(diagonal)
evolved = wfn.time_evolve(time, diagonal)
if Print:
evolved.print_wfn()
```
Exact evolution of dense quadratic hamiltonians is supported. Here is an evolution example using a spin restricted Hamiltonian on a number and spin conserving wavefunction
```
norb = 4
h1e = numpy.zeros((norb, norb), dtype=numpy.complex128)
for i in range(norb):
for j in range(norb):
h1e[i, j] += (i+j) * 0.02
h1e[i, i] += i * 2.0
hamil = fqe.get_restricted_hamiltonian((h1e,))
wfn = fqe.Wavefunction([[4, 0, norb]])
wfn.set_wfn(strategy='random')
initial_energy = wfn.expectationValue(hamil)
print('Initial Energy: {}'.format(initial_energy))
evolved = wfn.time_evolve(time, hamil)
final_energy = evolved.expectationValue(hamil)
print('Final Energy: {}'.format(final_energy))
```
The GSO Hamiltonian is for evolution of quadratic hamiltonians that are spin broken and number conserving.
```
norb = 4
h1e = numpy.zeros((2*norb, 2*norb), dtype=numpy.complex128)
for i in range(2*norb):
for j in range(2*norb):
h1e[i, j] += (i+j) * 0.02
h1e[i, i] += i * 2.0
hamil = fqe.get_gso_hamiltonian((h1e,))
wfn = fqe.get_number_conserving_wavefunction(4, norb)
wfn.set_wfn(strategy='random')
initial_energy = wfn.expectationValue(hamil)
print('Initial Energy: {}'.format(initial_energy))
evolved = wfn.time_evolve(time, hamil)
final_energy = evolved.expectationValue(hamil)
print('Final Energy: {}'.format(final_energy))
```
The BCS hamiltonian evovles spin conserved and number broken wavefunctions.
```
norb = 4
time = 0.001
wfn_spin = fqe.get_spin_conserving_wavefunction(2, norb)
hamil = FermionOperator('', 6.0)
for i in range(0, 2*norb, 2):
for j in range(0, 2*norb, 2):
opstring = str(i) + ' ' + str(j + 1)
hamil += FermionOperator(opstring, (i+1 + j*2)*0.1 - (i+1 + 2*(j + 1))*0.1j)
opstring = str(i) + '^ ' + str(j + 1) + '^ '
hamil += FermionOperator(opstring, (i+1 + j)*0.1 + (i+1 + j)*0.1j)
h_noncon = (hamil + hermitian_conjugated(hamil))/2.0
if Print:
print(h_noncon)
wfn_spin.set_wfn(strategy='random')
if Print:
wfn_spin.print_wfn()
spin_evolved = wfn_spin.time_evolve(time, h_noncon)
if Print:
spin_evolved.print_wfn()
```
Exact Evolution Implementation of Diagonal Coulomb terms
```
norb = 4
wfn = fqe.Wavefunction([[5, 1, norb]])
vij = numpy.zeros((norb, norb, norb, norb), dtype=numpy.complex128)
for i in range(norb):
for j in range(norb):
vij[i, j] += 4*(i % norb + 1)*(j % norb + 1)*0.21
wfn.set_wfn(strategy='random')
if Print:
wfn.print_wfn()
hamil = fqe.get_diagonalcoulomb_hamiltonian(vij)
evolved = wfn.time_evolve(time, hamil)
if Print:
evolved.print_wfn()
```
Exact evolution of individual n-body anti-Hermitian gnerators
```
norb = 3
nele = 4
ops = FermionOperator('5^ 1^ 2 0', 3.0 - 1.j)
ops += FermionOperator('0^ 2^ 1 5', 3.0 + 1.j)
wfn = fqe.get_number_conserving_wavefunction(nele, norb)
wfn.set_wfn(strategy='random')
wfn.normalize()
if Print:
wfn.print_wfn()
evolved = wfn.time_evolve(time, ops)
if Print:
evolved.print_wfn()
```
Approximate evolution of sums of n-body generators
Approximate evolution can be done for dense operators.
```
lih_evolved = lihwfn.apply_generated_unitary(time, 'taylor', lih_hamiltonian, accuracy=1.e-8)
if Print:
lih_evolved.print_wfn()
norb = 2
nalpha = 1
nbeta = 1
nele = nalpha + nbeta
time = 0.05
h1e = numpy.zeros((norb*2, norb*2), dtype=numpy.complex128)
for i in range(2*norb):
for j in range(2*norb):
h1e[i, j] += (i+j) * 0.02
h1e[i, i] += i * 2.0
hamil = fqe.get_general_hamiltonian((h1e,))
spec_lim = [-1.13199078e-03, 6.12720338e+00]
wfn = fqe.Wavefunction([[nele, nalpha - nbeta, norb]])
wfn.set_wfn(strategy='random')
if Print:
wfn.print_wfn()
evol_wfn = wfn.apply_generated_unitary(time, 'chebyshev', hamil, spec_lim=spec_lim)
if Print:
evol_wfn.print_wfn()
```
API for determining desired expectation values
```
rdm1 = lihwfn.expectationValue('i^ j')
if Print:
print(rdm1)
val = lihwfn.expectationValue('5^ 3')
if Print:
print(2.*val)
trdm1 = fqe.expectationValue(lih_evolved, 'i j^', lihwfn)
if Print:
print(trdm1)
val = fqe.expectationValue(lih_evolved, '5 3^', lihwfn)
if Print:
print(2*val)
```
2.B.1 RDMs
In addition to the above API higher order density matrices in addition to hole densities can be calculated.
```
rdm2 = lihwfn.expectationValue('i^ j k l^')
if Print:
print(rdm2)
rdm2 = fqe.expectationValue(lihwfn, 'i^ j^ k l', lihwfn)
if Print:
print(rdm2)
```
2.B.2 Hamiltonian expectations (or any expectation values)
```
li_h_energy = lihwfn.expectationValue(lih_hamiltonian)
if Print:
print(li_h_energy)
li_h_energy = fqe.expectationValue(lihwfn, lih_hamiltonian, lihwfn)
if Print:
print(li_h_energy)
```
2.B.3 Symmetry operations
```
op = fqe.get_s2_operator()
print(lihwfn.expectationValue(op))
op = fqe.get_sz_operator()
print(lihwfn.expectationValue(op))
op = fqe.get_time_reversal_operator()
print(lihwfn.expectationValue(op))
op = fqe.get_number_operator()
print(lihwfn.expectationValue(op))
```
| github_jupyter |
# Dimensionality Reduction
In machine learning, we are often dealing with very large datasets, not only in terms of the number of rows, but also in the number of columns (*i.e.* features or predictors). This presents a challenge in choosing which variables ought to be included in a particular analysis. Inevitably, some features will be correlated with other features, implying that they are partially redundant in terms of explaining part of the variability in the outcome variable.
To deal with this, we can apply one of several dimensionality reduction techniques, which aim to identify latent variables that are associated with both the features and the outcomes, but are complementary with one another in terms of the variability that they explain.
## Principal Component Analysis
The first **unsupervised learning** method that we will look at is Principal Component Analysis (PCA).
It is a technique to reduce the dimensionality of the data, by creating a linear projection.
That is, we find new features to represent the data that are a linear combination of the old data (i.e. we rotate it). Thus, we can think of PCA as a projection of our data onto a *new* feature space.
The way PCA finds these new directions is by looking for the directions of maximum variance.
Usually only few components that explain most of the variance in the data are kept. Here, the premise is to reduce the size (dimensionality) of a dataset while capturing most of its information. There are many reason why dimensionality reduction can be useful: It can reduce the computational cost when running learning algorithms, decrease the storage space, and may help with the so-called "curse of dimensionality," which we will discuss in greater detail later.
Here is an illustraion using the iris dataset we've seen previously.
```
from sklearn.datasets import load_iris
iris = load_iris()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
iris_df = (pd.DataFrame(iris.data, columns=iris.feature_names)
.assign(species=iris.target_names[iris.target]))
iris.feature_names
```
It's hard to visualize a 4-dimensional dataset simultaneously, but we can plot the data pairwise to get an idea of how the output (species labels) can be discriminated on the basis of each variable relative to another.
```
from itertools import combinations
for xy in combinations(iris.feature_names, 2):
x, y = xy
sns.lmplot(x, y,
data=iris_df,
fit_reg=False,
hue="species");
```
We can see, for example, that the petal variables appear to be redundant with respect to one another.
What PCA will do is formulate a set of **orthogonal** varibles, where the number of orthogonal axes is smaller than the number of original variables. It then **projects** the original data onto these axes to obtain transformed variables.
The key concept is that each set of axes constructed maximizes the amount of residual variability explained.
We can then fit models to the subset of orthogonal variables that accounts for most of the variability.
Let's do a PCA by hand first, before using scikit-learn:
### Standardization
As we saw in the previous unit, an important first step for many datasets is to **standardize** the original data. Its important for all variables to be on the same scale because the algorithm will be seeking to maximize variance along each axis. If one variable is numerically larger than another variable, it will tend to have larger variance, and will therefore garner undue attention from the algorithm.
This dataset is approximately on the same scale, though there are differences, particularly in the fourth variable (petal width):
```
iris.data[:5]
```
Let's apply a standardization transformation from scikit-learn:
```
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(iris.data)
X_std[:5]
```
### Eigendecomposition
The PCA algorithm is driven by the eigenvalues and eigenvectors of the original dataset.
- The eigenvectors determine the direction of each component
- The eigenvalues determine the length (magnitude) of the component
The eigendecomposition is performed on the covariance matrix of the data, which we can derive here using NumPy.
```
Σ = np.cov(X_std.T)
evals, evecs = np.linalg.eig(Σ)
evals
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import FancyArrowPatch
variables = [name[:name.find(' (')]for name in iris.feature_names]
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
ax.plot(X_std[:,0], X_std[:,1], X_std[:,2], 'o', markersize=8,
color='green',
alpha=0.2)
mean_x, mean_y, mean_z = X_std.mean(0)[:-1]
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
for v in evecs:
a = Arrow3D([mean_x, v[0]], [mean_y, v[1]], [mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
ax.add_artist(a)
ax.set_xlabel(variables[0])
ax.set_ylabel(variables[1])
ax.set_zlabel(variables[2])
plt.title('Eigenvectors')
```
## Selecting components
The eigenvectors are the principle components, which are normalized linear combinations of the original features. They are ordered, in terms of the amount of variation in the dataset that they account for.
```
fig, axes = plt.subplots(2, 1)
total = evals.sum()
variance_explained = 100* np.sort(evals)[::-1]/total
axes[0].bar(range(4), variance_explained)
axes[0].set_xticks(range(4));
axes[0].set_xticklabels(['Component ' + str(i+1) for i in range(4)])
axes[1].plot(range(5), np.r_[0, variance_explained.cumsum()])
axes[1].set_xticks(range(5));
```
## Projecting the data
The next step is to **project** the original data onto the orthogonal axes.
Let's extract the first two eigenvectors and use them as the projection matrix for the original (standardized) variables.
```
W = evecs[:, :2]
Y = X_std @ W
df_proj = pd.DataFrame(np.hstack((Y, iris.target.astype(int).reshape(-1, 1))),
columns=['Component 1', 'Component 2', 'Species'])
sns.lmplot('Component 1', 'Component 2',
data=df_proj,
fit_reg=False,
hue='Species')
```
## PCA in scikit-learn
`scikit-learn` provides a PCA transformation in its `decomposition` module.
```
from sklearn.decomposition import PCA
pca = PCA(n_components=3, whiten=True).fit(iris.data)
X_pca = pca.transform(iris.data)
iris_df['First Component'] = X_pca[:, 0]
iris_df['Second Component'] = X_pca[:, 1]
iris_df['Third Component'] = X_pca[:, 2]
sns.lmplot('First Component', 'Second Component',
data=iris_df,
fit_reg=False,
hue="species");
sns.lmplot('Second Component', 'Third Component',
data=iris_df,
fit_reg=False,
hue="species");
```
## Exercise
Import the wine dataset and perform PCA on the predictor variables, and decide how many principal components would you select.
```
wine = pd.read_table('../data/wine.dat', sep='\s+')
wine.head()
# Write your answer here
```
| github_jupyter |
In the [reverse probability page]({{ site.baseurl }}/chapters/10/first_bayes)
we played a game with two boxes, with different proportions of red and green
balls.
The [Bayes bars page]({{ site.baseurl }}/chapters/10/bayes_bars) has a way of
thinking of our calculations for this game, using the height of bars to
express our probabilities.
Now we extend the game to more options.
Now I have five boxes:
* BOX1 has one red ball and four green balls.
* BOX2 has two red balls and three green balls.
* BOX3 has three red balls and two green balls.
* BOX4 has four red balls and one green ball.
* BOX5 has five red balls.
The game proceeds as before:
* I offer you one of these five boxes at random, without telling you which.
* You draw a ball at random from the box, and you get a red ball.
* Now, what is the probability that I gave you BOX4?
First we will solve this by simulation. Then we will solve it by expressing
the calculation we illustrated with the bars from the Bayes bars page.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
```
Here is the simulation. It is a small modification from the simulation in the reverse probability page.
```
n_iters = 10000
box_nos = np.repeat([1], n_iters)
ball_colors = np.repeat(['green'], n_iters)
for i in np.arange(n_iters):
# Choose a box number randomly.
box_no = np.random.choice([1, 2, 3, 4, 5])
# Create the box with the correct numbers of red and green.
box = np.repeat(['red', 'green'], [box_no, 5-box_no])
# Draw a ball from the box
ball_color = np.random.choice(box)
# Store the results.
box_nos[i] = box_no
ball_colors[i] = ball_color
# Make the results into a data frame.
trial_results = pd.DataFrame()
trial_results['box no'] = box_nos
trial_results['ball color'] = ball_colors
trial_results.head()
```
Calculate the proportion of "red" trials that came from BOX4:
```
# Of the trials giving a red ball, what proportion came from box 4?
red_ball_trials = trial_results[trial_results['ball color'] == 'red']
p_box4 = np.count_nonzero(red_ball_trials['box no'] == 4) / len(red_ball_trials)
p_box4
```
Here are the initial probabilities of the boxes.
```
box_probabilities = np.repeat(1 / 5, 5)
x_locations = np.arange(5)
box_labels = ['BOX1', 'BOX2', 'BOX3', 'BOX4', 'BOX5']
plt.bar(x_locations, box_probabilities)
plt.xticks(x_locations, box_labels)
plt.ylabel("Probability of getting box")
plt.title('Initial probability of boxes');
```
We display the probabilities of getting a red ball from boxes 1 through 5:
```
red_probabilities = np.array([1, 2, 3, 4, 5]) / 5
plt.bar(x_locations, red_probabilities)
plt.xticks(x_locations, box_labels)
plt.ylabel("Probability of getting red ball")
plt.title('Probability of getting red for each box');
```
Next we combine the two steps, of getting a box, and then drawing a ball.
As before, we do this by multiplying the values expressed by heights in the
first bar graph, by the values expressed by heights in the second bar graph.
```
box_and_red_probs = box_probabilities * red_probabilities
plt.bar(x_locations, box_and_red_probs)
plt.xticks(x_locations, box_labels)
plt.ylabel("Probability of getting red ball and the given box")
plt.title('Probability of getting red and each box');
```
In this case, as before, where all the initial probabilities of the boxes are
the same, this last bar graph is just the second bar graph scaled down by 1/5.
Lastly, remember we are interested in the probability that we started with
BOX4, given we have a red ball.
To get this probability, we first take the probability of getting a red ball
*and* BOX4. This is the fourth bar on the graph above - 0.2 * 0.8 = 0.4.
Next we divide by the overall probability of getting a red ball, which is the
sum of the heights of the five bars above.
We can do this calculation by dividing the heights of the bars above by the
sum of the heights, so the heights of the bars now sum to 1.
```
# Overall probability of getting a red ball.
np.sum(box_and_red_probs)
```
Here are the bars divided by this sum:
```
box_given_red_probs = box_and_red_probs / np.sum(box_and_red_probs)
plt.bar(x_locations, box_given_red_probs)
plt.xticks(x_locations, box_labels)
plt.ylabel("Probability of box given a red ball")
plt.title('Probability of initial box given red ball drawn');
```
The probability we want is the height of the fourth bar:
```
box_given_red_probs[3]
```
The probability that we drew from BOX5 (given we have a red ball) is the height
of the fifth bar:
```
box_given_red_probs[4]
```
We can estimate this from the simulation as well:
```
# Of the trials giving a red ball, what proportion came from box5?
red_ball_trials = trial_results[trial_results['ball color'] == 'red']
p_box4 = np.count_nonzero(red_ball_trials['box no'] == 5) / len(red_ball_trials)
p_box4
```
As we saw the [Bayes bars]({{ site.baseurl }}/chapters/10/bayes_bars) page,
because all the boxes have equal probability, we can skip the calculation step
that scales by the box probability, and take the following shortcut:
```
# Skipping scaling by box probabilities, when probabilities are equal.
box_given_red_probs = red_probabilities / np.sum(red_probabilities)
# We get the same result as before.
box_given_red_probs
```
## Towards confidence
Now let me ask a different question.
What is the probability that the red ball came from *any* of boxes 2 through
5?
This is just the sum of the adjusted probabilities above, for boxes 2 through
5. Remember the sum of all the adjusted probabilities is 1.
```
np.sum(box_given_red_probs)
# Probability that red ball came from any of boxes 2 through 5.
red_from_box_2_5 = np.sum(box_given_red_probs[1:])
red_from_box_2_5
```
Put another way:
If I draw a red ball, there is a 93% chance that the box I drew from was one of
BOX2 through BOX5.
Put yet another way:
If I draw a red ball, I have 93% confidence that I have drawn from one of BOX2
through BOX5.
This is the logic for *Bayesian confidence intervals*. These are sometimes
called [credible intervals](https://en.wikipedia.org/wiki/Credible_interval).
We can reason about plausible states of the world that led to our results. In
our case we can reason about which box we have drawn from (state of the
world), given we have seen a red ball (the result).
We will soon see that we can apply this logic when we want to reason about ---
for example --- the relationship of the mean of a sample (the result) to the
mean for a whole population (the state of the world).
| github_jupyter |
# Anomaly Detection in Practice
```
import pandas as pd
import numpy as np
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.compose import make_column_selector
from sklearn.preprocessing import LabelEncoder, StandardScaler
np.random.seed(42)
# import the outlier detection toolkit
# install it with
# ! pip install --upgrade pyod
import pyod
# load the data set
# This is a 10% stratified subsample of the data from the 1999 ACM KDD Cup
# For more info, please see https://www.openml.org/d/1113
url = 'https://datahub.io/machine-learning/kddcup99/r/kddcup99.csv'
kdd = pd.read_csv(url)
ds = kdd[kdd.service == 'smtp'].sample(frac=1).reset_index(drop=True)
label_dict = {
'normal': 0,
'neptune': 1,
'satan': 1,
'portsweep': 1,
'ipsweep': 1
}
ds['label'] = [label_dict[item] for item in ds['label']]
X, y = ds.drop('label', axis = 1), ds.label
# summarize class distribution
counter = Counter(y)
print(counter)
numerical_columns_selector = make_column_selector(dtype_exclude=object)
num_features = numerical_columns_selector(X)
categorical_columns_selector = make_column_selector(dtype_include=object)
cat_features = categorical_columns_selector(X)
for feat in num_features:
scaler = StandardScaler()
X[feat] = scaler.fit_transform(np.array(X[feat]).reshape(-1, 1))
for feat in cat_features:
encoder = LabelEncoder()
X[feat] = encoder.fit_transform(np.array(X[feat]).reshape(-1, 1))
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# summarize the shape of the training dataset
print(X_train.shape, y_train.shape)
# fit the model
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
# evaluate the model
yhat = model.predict(X_test)
# evaluate predictions
f1 = f1_score(y_test, yhat)
auc = roc_auc_score(y_test, yhat)
print('F-1: {}\nROC_AUC: {}'.format(f1, auc))
```
# Histogram-based Outlier Detection (HBOS)
*(from pyod)*
```
from pyod.models.hbos import HBOS
contamination = 0.1
hbos = HBOS(contamination=contamination)
# fit the data to HBOS
hbos.fit(X_train)
y_hat = hbos.predict(X_train)
# filter out predictions values = 0
# as they are considered as anomalies
mask = y_hat != 0
out_hbos, in_hbos = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_hbos, in_hbos))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_hbos = f1_score(y_test, y_pred)
auc_hbos = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_hbos, auc_hbos))
```
# Density-Based Spatial Clustering of Applications with Noise(DBSCAN)
*(from sklearn)*
```
from sklearn.cluster import DBSCAN
dbscan = DBSCAN(eps=0.1, min_samples=2, metric='cosine')
# fit the data to IF
y_hat = dbscan.fit_predict(X_train, y_train)
# filter out predictions values = -1
# as they are considered as anomalies
mask = y_hat != -1
out_dbscan, in_dbscan = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_dbscan, in_dbscan))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_dbscan = f1_score(y_test, y_pred)
auc_dbscan = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_dbscan, auc_dbscan))
```
# One-Class Support Vector Machine (OCSVM)
*(from scikit-learn)*
```
from sklearn.svm import OneClassSVM as OCSVM
ocsvm = OCSVM(gamma='auto', kernel='linear')
# fit the data to OCSVM
y_hat = ocsvm.fit_predict(X_train, y_train)
# filter out predictions values = -1
# as they are considered as anomalies
mask = y_hat != -1
out_ocsvm, in_ocsvm = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_ocsvm, in_ocsvm))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_ocsvm = f1_score(y_test, y_pred)
auc_ocsvm = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_ocsvm, auc_ocsvm))
```
# IsolationForest Outlier Detector
*(from pyod also on scikit-learn)*
```
from pyod.models.iforest import IForest
random_state = np.random.RandomState(42)
contamination = 0.1
iso = IForest(contamination=contamination, random_state=random_state)
# fit the data to IF
iso.fit(X_train)
y_hat = iso.predict(X_train)
# filter out predictions values = 0
# as they are considered as anomalies
mask = y_hat != 0
out_iso, in_iso = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_iso, in_iso))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_iso = f1_score(y_test, y_pred)
auc_iso = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_iso, auc_iso))
```
# Local Outlier Factor (LOF)
*(from pyod also on scikit-learn)*
```
from pyod.models.lof import LOF
contamination = 0.1
lof = LOF(n_neighbors=20, algorithm='auto', leaf_size=30, metric='minkowski', contamination = contamination)
# fit the data to LOF
lof.fit(X_train)
y_hat = lof.predict(X_train)
# filter out predictions values = 0
# as they are considered as anomalies
mask = y_hat != 0
out_lof, in_lof = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_lof, in_lof))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_lof = f1_score(y_test, y_pred)
auc_lof = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_lof, auc_lof))
```
# Clustering Based Local Outlier Factor (CBLOF)
*(from pyod)*
```
from pyod.models.cblof import CBLOF
random_state = np.random.RandomState(42)
contamination = 0.1
cblof = CBLOF(contamination=contamination, check_estimator=False, random_state=random_state)
# fit the data to CBLOF
cblof.fit(X_train)
y_hat = cblof.predict(X_train)
# filter out predictions values = 0
# as they are considered as anomalies
mask = y_hat != 0
out_cblof, in_cblof = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_cblof, in_cblof))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_cblof = f1_score(y_test, y_pred)
auc_cblof = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_cblof, auc_cblof))
```
# ABOD
*(from pyod)*
```
from pyod.models.abod import ABOD
contamination = 0.1
abod = ABOD(contamination=contamination)
# fit the data to ABOD
abod.fit(X_train)
y_hat = abod.predict(X_train)
# filter out predictions values = 0
# as they are considered as anomalies
mask = y_hat != 0
out_abod, in_abod = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_abod, in_abod))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_abod = f1_score(y_test, y_pred)
auc_abod = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_abod, auc_abod))
```
# Feaure Bagging
*(from pyod)*
```
from pyod.models.feature_bagging import FeatureBagging
random_state = np.random.RandomState(42)
contamination = 0.1
fbd = FeatureBagging(LOF(n_neighbors=20),contamination=contamination,
check_estimator=False,random_state=random_state)
# fit the data to FB
fbd.fit(X_train)
y_hat = fbd.predict(X_train)
# filter out predictions values = 0
# as they are considered as anomalies
mask = y_hat != 0
out_fb, in_fb = Counter(mask)[0],Counter(mask)[1]
print('Removed {} outliers, kept {} inliers'.format(out_fb, in_fb))
X_masked, y_masked = X_train[mask], y_train[mask]
model = DecisionTreeClassifier()
# fit the model
model.fit(X_masked, y_masked)
# evaluate the model
y_pred = model.predict(X_test)
# evaluate predictions
f1_fb = f1_score(y_test, y_pred)
auc_fb = roc_auc_score(y_test, y_pred)
print('F-1: {}\nROC_AUC: {}'.format(f1_fb, auc_fb))
```
# Summarize results
```
cols = ['Detector', 'Outliers', 'Inliers', 'F1', 'ROC_AUC']
df = pd.DataFrame(columns=cols)
detectors = ['None', 'OCSVM', 'ABOD', 'CBLOF', 'DBSCAN', 'FB', 'IF', 'HBOS', 'LOF']
aucs = [auc, auc_ocsvm, auc_abod, auc_cblof, auc_dbscan, auc_fb, auc_iso, auc_hbos, auc_lof]
f1s = [f1, f1_ocsvm, f1_abod, f1_cblof, f1_dbscan, f1_fb, f1_iso, f1_hbos, f1_lof]
inliers = [np.NaN, in_ocsvm, in_abod, in_cblof, in_dbscan, in_fb, in_iso, in_hbos, in_lof]
outliers = [np.NaN, out_ocsvm, out_abod, out_cblof, out_dbscan, out_fb, out_iso, out_hbos, out_lof]
df.Detector = detectors
df.Outliers = outliers
df.Inliers = inliers
df.F1 = f1s
df.ROC_AUC = aucs
df
```
| github_jupyter |
# LinearSVC with Scale & Polynomial Features
This Code template is for the Classification task using a simple Linear Support Vector Classifier (LinearSVC) based on the Support Vector Machine algorithm with feature rescaling technique Normalize and feature transformation technique Polynomial Features in a pipeline and separately as well.
### Required Packages
```
!pip install imblearn
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import LabelEncoder, Normalizer, PolynomialFeatures
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ''
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X=df[features]
Y=df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
### Handling Target Imbalance
The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.
One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
```
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
```
### Data Rescaling
<Code>Normalizer</Code> normalizes samples (rows) individually to unit norm.
Each sample with at least one non zero component is rescaled independently of other samples so that its norm (l1, l2 or inf) equals one.
We will fit an object of Normalizer to train data then transform the same data via <Code>fit_transform(X_train)</Code> method, following which we will transform test data via <Code>transform(X_test)</Code> method.
```
normalizer = Normalizer()
x_train = normalizer.fit_transform(x_train)
x_test = normalizer.transform(x_test)
```
### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
LinearSVC is similar to SVC with kernel=’linear’. It has more flexibility in the choice of tuning parameters and is suited for large samples.
Model Tuning Parameters:
- penalty -> Specifies the norm used in the penalization. The ‘l2’ penalty is the standard used in SVC. The ‘l1’ leads to coef_ vectors that are sparse.
- Loss -> Specifies the loss function. ‘hinge’ is the standard SVM loss (used e.g. by the SVC class) while ‘squared_hinge’ is the square of the hinge loss. The combination of penalty='l1' and loss='hinge' is not supported.
- C -> Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
- tolerance -> Tolerance for stopping criteria.
### Feature Transformation
Polynomial Features is a technique to generate polynomial and interaction features.
Polynomial features are features created by raising existing features to an exponent. **PolynomialFeatures** function generates a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
For more information on PolynomialFeatures [ click here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html)
```
model=make_pipeline(PolynomialFeatures(), LinearSVC(random_state=123))
model.fit(x_train,y_train)
```
### Model Accuracy
We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
score: The score function returns the coefficient of determination R2 of the prediction.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
r2_score: The r2_score function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
mae: The mean abosolute error function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
mse: The mean squared error function squares the error(penalizes the model for large errors) by our model.
### Confusion Matrix
A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
```
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
```
### Classification Report
A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
where:
Precision:- Accuracy of positive predictions.
Recall:- Fraction of positives that were correctly identified.
f1-score:- percent of positive predictions were correct
support:- Support is the number of actual occurrences of the class in the specified dataset.
```
print(classification_report(y_test,model.predict(x_test)))
```
#### Creator: Viraj Jayant, Github: [Profile](https://github.com/Viraj-Jayant/)
| github_jupyter |
This file is part of the [test suite](./tests) and will be moved there when [nbval#116](https://github.com/computationalmodelling/nbval/issues/116#issuecomment-793148404) is fixed.
See [DEMO.ipynb](DEMO.ipynb) instead for notebook examples.
```
from functools import partial
from time import sleep
from tqdm.notebook import tqdm_notebook
from tqdm.notebook import tnrange
# avoid displaying widgets by default (pollutes output cells)
tqdm = partial(tqdm_notebook, display=False)
trange = partial(tnrange, display=False)
help(tqdm_notebook.display)
# NBVAL_TEST_NAME: basic use
with tqdm_notebook(range(9)) as t:
for i in t:
print(i)
assert t.container.children[1].bar_style == 'success'
t = tqdm_notebook(total=9)
t.update()
t.refresh()
# NBVAL_TEST_NAME: reset
print(t)
t.reset(total=5)
t.update(1)
print(t)
# NBVAL_TEST_NAME: bar_style
assert t.container.children[1].bar_style != 'danger'
t.close()
assert t.container.children[1].bar_style == 'danger'
# NBVAL_TEST_NAME: repr
with trange(1, 9) as t:
print(t)
print(t.container)
it = iter(t)
print(next(it))
print(t)
print(t.container)
t = trange(9)
# NBVAL_TEST_NAME: display pre
print(t)
print(t.container)
for i in t:
pass
# NBVAL_TEST_NAME: display post
print(t)
print(t.container)
# NBVAL_TEST_NAME: no total
with tqdm(desc="no total") as t:
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: ncols
with trange(9, ncols=66) as t:
print(t)
for i in t:
if i == 1:
break
print(t)
# NBVAL_TEST_NAME: leave
assert (False, None) != (getattr(t.container, "visible", False), getattr(t.container, "_ipython_display_", None))
for total in (1, 9):
with tqdm(total=total, leave=False) as t:
print(t)
t.update()
print(t)
assert total != 1 or (False, None) == (
getattr(t.container, "visible", False), getattr(t.container, "_ipython_display_", None)
)
# NBVAL_TEST_NAME: no total
with tqdm() as t:
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: reset and disable
for disable in (None, True):
print("disable:", disable)
with tqdm(total=1, disable=disable) as t:
print(t)
t.update()
print(t)
t.reset(total=9)
print(t)
t.update()
print(t)
with tqdm(disable=disable) as t:
print(t)
t.update()
print(t)
t.reset(total=1)
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: bar_format
with tqdm(total=1, bar_format='{l_bar}{r_bar}') as t:
print(t)
t.update()
print(t)
with tqdm(total=1, bar_format='{l_bar}{bar}') as t:
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: colour
assert t.colour != 'yellow'
with tqdm(total=1, colour='yellow') as t:
print(t)
t.update()
print(t)
assert t.colour == 'yellow'
# NBVAL_TEST_NAME: delay no trigger
with tqdm_notebook(total=1, delay=10) as t:
t.update()
# NBVAL_TEST_NAME: delay trigger
with tqdm_notebook(total=1, delay=0.1) as t:
sleep(0.1)
t.update()
```
| github_jupyter |
# New York City Signature Locations
---
Timothy Helton
---
<br>
<font color="red">
NOTE:
<br>
This notebook uses code found in the
<a href="https://github.com/TimothyHelton/k2datascience/blob/master/nyc_signature">
<strong>nyc_signature</strong></a> package.
To execute all the cells do one of the following items:
<ul>
<li>Install the nyc_signature package to the active Python interpreter.</li>
<li>Add nyc_signature/nyc_signature to the PYTHON_PATH system variable.</li>
</font>
---
## Imports
```
import sys
from nyc_signature import demographics
from nyc_signature import subway
from nyc_signature import locations
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
%matplotlib inline
```
---
## Python Version
```
print(f'Python Version: {sys.version}')
```
---
## Votor Demographics
### US Voters By Age and Gender
#### Load Data
```
us_age = demographics.Age()
us_age.data.info()
us_age.data.head()
us_age.age_vote_plot()
```
#### Finding
1. Half the people 37 years of age or older in the US voted during the last election.
1. The age groups in the 70th percentile or higher for votor population are 45, 46 and 49 through 65.
1. A **Target Age Range** of **45-65 years old** will be chosen to maximize the opportunity to engage an individual voted in the last election the therfore will be open to signing a petition.
1. Gender does not play a significant role.
1. Preference due to gender is weak, but if multiple candidates are available **chose to persue the women**.
### New York State Voters
#### Load Data
```
ny = demographics.NewYork()
ny.data.info()
ny.data.loc['new york', :]
ny.ethnicity_plot()
```
#### Findings
1. Purse **Black or White** individuals first, since they are more likely to be a voter.
1. The next most likely race to be a voter is Hispanic.
1. Asians are the least likly race to have voted in New York state.
---
## Hospital Locations
### Locations
#### Load Data
```
hosp = locations.Hospitals()
hosp.hospitals
```
---
## Subway Stations
### Loctions
#### Load Data
```
stations = subway.Stations()
stations.data.info()
stations.data.head()
stations.trains.info()
stations.trains.head()
stations.hospital_distances()
stations.hosp_dist.head(10)
stations.hosp_prox.head(10)
stations.train_plot()
```
---
## Subway Turnstile Data
```
ts = subway.Turnstile()
ts.get_data()
ts.data.info()
ts.data.head()
ts.get_targets()
ts.targets
ts.target_data.info()
ts.target_data.head()
ts.targets_entry_plot()
ts.get_top_stations()
ts.top_stations
ts.top_entry_bar_plot()
ts.daily_use_plot('FULTON ST')
ts.daily_use_plot('DEKALB AV')
ts.daily_use_plot('7 AV')
```
| github_jupyter |
# 作業 : (Kaggle)鐵達尼生存預測
https://www.kaggle.com/c/titanic
# [作業目標]
- 試著模仿範例寫法, 在鐵達尼生存預測中, 觀察計數編碼與特徵雜湊的效果
# [作業重點]
- 仿造範例, 完成計數編碼以及搭配邏輯斯迴歸的預測 (In[4], Out[4], In[5], Out[5])
- 仿造範例, 完成雜湊編碼, 以及計數編碼+雜湊編碼 搭配邏輯斯迴歸的預測 (In[6], Out[6], In[7], Out[7])
- 試著回答上述執行結果的觀察
# 作業1
* 參考範例,將鐵達尼的艙位代碼( 'Cabin' )欄位使用特徵雜湊 / 標籤編碼 / 計數編碼三種轉換後,
與其他類別型欄位一起預估生存機率
```
# 做完特徵工程前的所有準備 (與前範例相同)
import pandas as pd
import numpy as np
import copy, time
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
data_path = '../data/'
df_train = pd.read_csv(data_path + 'titanic_train.csv')
df_test = pd.read_csv(data_path + 'titanic_test.csv')
train_Y = df_train['Survived']
ids = df_test['PassengerId']
df_train = df_train.drop(['PassengerId', 'Survived'] , axis=1)
df_test = df_test.drop(['PassengerId'] , axis=1)
df = pd.concat([df_train,df_test])
df.head()
#只取類別值 (object) 型欄位, 存於 object_features 中
object_features = []
for dtype, feature in zip(df.dtypes, df.columns):
if dtype == 'object':
object_features.append(feature)
print(f'{len(object_features)} Object Features : {object_features}\n')
# 只留類別型欄位
df = df[object_features]
df = df.fillna('None')
train_num = train_Y.shape[0]
df.head()
```
# 作業2
* 承上題,三者比較效果何者最好?
```
# 對照組 : 標籤編碼 + 邏輯斯迴歸
df_temp = pd.DataFrame()
for c in df.columns:
df_temp[c] = LabelEncoder().fit_transform(df[c])
train_X = df_temp[:train_num]
estimator = LogisticRegression()
print(cross_val_score(estimator, train_X, train_Y, cv=5).mean())
df_temp.head()
# 加上 'Cabin' 欄位的計數編碼
count_df = df.groupby(['Cabin'])['Name'].agg('size').reset_index().rename(columns={'Name':'Cabin_Count'})
df = pd.merge(df, count_df, on=['Cabin'], how='left')
count_df.sort_values(by=['Cabin_Count'], ascending=False).head(10)
df
# 'Cabin'計數編碼 + 邏輯斯迴歸
df_temp = pd.DataFrame()
for c in object_features:
df_temp[c] = LabelEncoder().fit_transform(df[c])
df_temp['Cabin_Count'] = df['Cabin_Count']
train_X = df_temp[:train_num]
estimator = LogisticRegression()
print(cross_val_score(estimator, train_X, train_Y, cv=5).mean())
# 'Cabin'特徵雜湊 + 邏輯斯迴歸
df_temp = pd.DataFrame()
for c in object_features:
df_temp[c] = LabelEncoder().fit_transform(df[c])
df_temp['Cabin_Hash'] = df['Cabin'].map(lambda x:hash(x) % 10)
train_X = df_temp[:train_num]
estimator = LogisticRegression()
print(cross_val_score(estimator, train_X, train_Y, cv=5).mean())
df_temp.head()
# 'Cabin'計數編碼 + 'Cabin'特徵雜湊 + 邏輯斯迴歸
df_temp = pd.DataFrame()
for c in object_features:
df_temp[c] = LabelEncoder().fit_transform(df[c])
df_temp['Cabin_Hash'] = df['Cabin'].map(lambda x:hash(x) % 10)
df_temp['Cabin_Count'] = df['Cabin_Count']
train_X = df_temp[:train_num]
estimator = LogisticRegression()
print(cross_val_score(estimator, train_X, train_Y, cv=5).mean())
df_temp.head()
```
| github_jupyter |

Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Logging
_**This notebook showcases various ways to use the Azure Machine Learning service run logging APIs, and view the results in the Azure portal.**_
---
---
## Table of Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. Validate Azure ML SDK installation
1. Initialize workspace
1. Set experiment
1. [Logging](#Logging)
1. Starting a run
1. Viewing a run in the portal
1. Viewing the experiment in the portal
1. Logging metrics
1. Logging string metrics
1. Logging numeric metrics
1. Logging vectors
1. Logging tables
1. Uploading files
1. [Analyzing results](#Analyzing-results)
1. Tagging a run
1. [Next steps](#Next-steps)
## Introduction
Logging metrics from runs in your experiments allows you to track results from one run to another, determining trends in your outputs and understand how your inputs correspond to your model and script performance. Azure Machine Learning services (AzureML) allows you to track various types of metrics including images and arbitrary files in order to understand, analyze, and audit your experimental progress.
Typically you should log all parameters for your experiment and all numerical and string outputs of your experiment. This will allow you to analyze the performance of your experiments across multiple runs, correlate inputs to outputs, and filter runs based on interesting criteria.
The experiment's Run History report page automatically creates a report that can be customized to show the KPI's, charts, and column sets that are interesting to you.
|  |  |
|:--:|:--:|
| *Run Details* | *Run History* |
---
## Setup
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't already to establish your connection to the AzureML Workspace. Also make sure you have tqdm and matplotlib installed in the current kernel.
```
(myenv) $ conda install -y tqdm matplotlib
```
### Validate Azure ML SDK installation and get version number for debugging purposes
```
from azureml.core import Experiment, Workspace, Run
import azureml.core
import numpy as np
from tqdm import tqdm
# Check core SDK version number
print("This notebook was created using SDK version 1.33.0, you are currently running version", azureml.core.VERSION)
```
### Initialize workspace
Initialize a workspace object from persisted configuration.
```
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
```
### Set experiment
Create a new experiment (or get the one with the specified name). An *experiment* is a container for an arbitrary set of *runs*.
```
experiment = Experiment(workspace=ws, name='logging-api-test')
```
---
## Logging
In this section we will explore the various logging mechanisms.
### Starting a run
A *run* is a singular experimental trial. In this notebook we will create a run directly on the experiment by calling `run = exp.start_logging()`. If you were experimenting by submitting a script file as an experiment using ``experiment.submit()``, you would call `run = Run.get_context()` in your script to access the run context of your code. In either case, the logging methods on the returned run object work the same.
This cell also stores the run id for use later in this notebook. The run_id is not necessary for logging.
```
# start logging for the run
run = experiment.start_logging()
# access the run id for use later
run_id = run.id
# change the scale factor on different runs to see how you can compare multiple runs
scale_factor = 2
# change the category on different runs to see how to organize data in reports
category = 'Red'
```
#### Viewing a run in the Portal
Once a run is started you can see the run in the portal by simply typing ``run``. Clicking on the "Link to Portal" link will take you to the Run Details page that shows the metrics you have logged and other run properties. You can refresh this page after each logging statement to see the updated results.
```
run
```
### Viewing an experiment in the portal
You can also view an experiement similarly by typing `experiment`. The portal link will take you to the experiment's Run History page that shows all runs and allows you to analyze trends across multiple runs.
```
experiment
```
## Logging metrics
Metrics are visible in the run details page in the AzureML portal and also can be analyzed in experiment reports. The run details page looks as below and contains tabs for Details, Outputs, Logs, and Snapshot.
* The Details page displays attributes about the run, plus logged metrics and images. Metrics that are vectors appear as charts.
* The Outputs page contains any files, such as models, you uploaded into the "outputs" directory from your run into storage. If you place files in the "outputs" directory locally, the files are automatically uploaded on your behald when the run is completed.
* The Logs page allows you to view any log files created by your run. Logging runs created in notebooks typically do not generate log files.
* The Snapshot page contains a snapshot of the directory specified in the ''start_logging'' statement, plus the notebook at the time of the ''start_logging'' call. This snapshot and notebook can be downloaded from the Run Details page to continue or reproduce an experiment.
### Logging string metrics
The following cell logs a string metric. A string metric is simply a string value associated with a name. A string metric String metrics are useful for labelling runs and to organize your data. Typically you should log all string parameters as metrics for later analysis - even information such as paths can help to understand how individual experiements perform differently.
String metrics can be used in the following ways:
* Plot in hitograms
* Group by indicators for numerical plots
* Filtering runs
String metrics appear in the **Tracked Metrics** section of the Run Details page and can be added as a column in Run History reports.
```
# log a string metric
run.log(name='Category', value=category)
```
### Logging numerical metrics
The following cell logs some numerical metrics. Numerical metrics can include metrics such as AUC or MSE. You should log any parameter or significant output measure in order to understand trends across multiple experiments. Numerical metrics appear in the **Tracked Metrics** section of the Run Details page, and can be used in charts or KPI's in experiment Run History reports.
```
# log numerical values
run.log(name="scale factor", value = scale_factor)
run.log(name='Magic Number', value=42 * scale_factor)
```
### Logging vectors
Vectors are good for recording information such as loss curves. You can log a vector by creating a list of numbers, calling ``log_list()`` and supplying a name and the list, or by repeatedly logging a value using the same name.
Vectors are presented in Run Details as a chart, and are directly comparable in experiment reports when placed in a chart.
**Note:** vectors logged into the run are expected to be relatively small. Logging very large vectors into Azure ML can result in reduced performance. If you need to store large amounts of data associated with the run, you can write the data to file that will be uploaded.
```
fibonacci_values = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
scaled_values = (i * scale_factor for i in fibonacci_values)
# Log a list of values. Note this will generate a single-variable line chart.
run.log_list(name='Fibonacci', value=scaled_values)
for i in tqdm(range(-10, 10)):
# log a metric value repeatedly, this will generate a single-variable line chart.
run.log(name='Sigmoid', value=1 / (1 + np.exp(-i)))
```
### Logging tables
Tables are good for recording related sets of information such as accuracy tables, confusion matrices, etc.
You can log a table in two ways:
* Create a dictionary of lists where each list represents a column in the table and call ``log_table()``
* Repeatedly call ``log_row()`` providing the same table name with a consistent set of named args as the column values
Tables are presented in Run Details as a chart using the first two columns of the table
**Note:** tables logged into the run are expected to be relatively small. Logging very large tables into Azure ML can result in reduced performance. If you need to store large amounts of data associated with the run, you can write the data to file that will be uploaded.
```
# create a dictionary to hold a table of values
sines = {}
sines['angle'] = []
sines['sine'] = []
for i in tqdm(range(-10, 10)):
angle = i / 2.0 * scale_factor
# log a 2 (or more) values as a metric repeatedly. This will generate a 2-variable line chart if you have 2 numerical columns.
run.log_row(name='Cosine Wave', angle=angle, cos=np.cos(angle))
sines['angle'].append(angle)
sines['sine'].append(np.sin(angle))
# log a dictionary as a table, this will generate a 2-variable chart if you have 2 numerical columns
run.log_table(name='Sine Wave', value=sines)
```
### Logging images
You can directly log _matplotlib_ plots and arbitrary images to your run record. This code logs a _matplotlib_ pyplot object. Images show up in the run details page in the Azure ML Portal.
```
%matplotlib inline
# Create a plot
import matplotlib.pyplot as plt
angle = np.linspace(-3, 3, 50) * scale_factor
plt.plot(angle,np.tanh(angle), label='tanh')
plt.legend(fontsize=12)
plt.title('Hyperbolic Tangent', fontsize=16)
plt.grid(True)
# Log the plot to the run. To log an arbitrary image, use the form run.log_image(name, path='./image_path.png')
run.log_image(name='Hyperbolic Tangent', plot=plt)
```
### Uploading files
Files can also be uploaded explicitly and stored as artifacts along with the run record. These files are also visible in the *Outputs* tab of the Run Details page.
```
import os
directory = 'logging-api'
if not os.path.exists(directory):
os.mkdir(directory)
file_name = os.path.join(directory, "myfile.txt")
with open(file_name, "w") as f:
f.write('This is an output file that will be uploaded.\n')
# Upload the file explicitly into artifacts
run.upload_file(name = file_name, path_or_stream = file_name)
```
### Completing the run
Calling `run.complete()` marks the run as completed and triggers the output file collection. If for any reason you need to indicate the run failed or simply need to cancel the run you can call `run.fail()` or `run.cancel()`.
```
run.complete()
```
---
## Analyzing results
You can refresh the run in the Azure portal to see all of your results. In many cases you will want to analyze runs that were performed previously to inspect the contents or compare results. Runs can be fetched from their parent Experiment object using the ``Run()`` constructor or the ``experiment.get_runs()`` method.
```
fetched_run = Run(experiment, run_id)
fetched_run
```
Call ``run.get_metrics()`` to retrieve all the metrics from a run.
```
fetched_run.get_metrics()
```
Call ``run.get_metrics(name = <metric name>)`` to retrieve a metric value by name. Retrieving a single metric can be faster, especially if the run contains many metrics.
```
fetched_run.get_metrics(name = "scale factor")
```
See the files uploaded for this run by calling ``run.get_file_names()``
```
fetched_run.get_file_names()
```
Once you know the file names in a run, you can download the files using the ``run.download_file()`` method
```
import os
os.makedirs('files', exist_ok=True)
for f in run.get_file_names():
dest = os.path.join('files', f.split('/')[-1])
print('Downloading file {} to {}...'.format(f, dest))
fetched_run.download_file(f, dest)
```
### Tagging a run
Often when you analyze the results of a run, you may need to tag that run with important personal or external information. You can add a tag to a run using the ``run.tag()`` method. AzureML supports valueless and valued tags.
```
fetched_run.tag("My Favorite Run")
fetched_run.tag("Competition Rank", 1)
fetched_run.get_tags()
```
## Next steps
To experiment more with logging and to understand how metrics can be visualized, go back to the *Start a run* section, try changing the category and scale_factor values and going through the notebook several times. Play with the KPI, charting, and column selection options on the experiment's Run History reports page to see how the various metrics can be combined and visualized.
After learning about all of the logging options, go to the [train on remote vm](..\train-on-remote-vm\train-on-remote-vm.ipynb) notebook and experiment with logging from remote compute contexts.
| github_jupyter |
```
%matplotlib inline
%config InlineBackend.figure_format='retina'
# %load_ext autoreload
# the "1" means: always reload modules marked with "%aimport"
# %autoreload 1
from __future__ import absolute_import, division, print_function
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import GridSpec
import seaborn as sns
import mpld3
import numpy as np
import pandas as pd
import os, sys
import warnings
!conda install matplotlib-venn -y
from matplotlib_venn import venn2, venn2_circles, venn3, venn3_circles
from itertools import combinations, islice, takewhile
from numpy.random import randint, shuffle, choice
num = 100
names = [''.join(x) for x in islice(combinations('abcdefghijklmnopqrstuvwxyz', 2), num)]
size_1 = 50
size_2 = 75
df1 = pd.DataFrame({'names':choice(names, size_1, replace=False),
'random':np.random.rand(size_1),
})
df2 = pd.DataFrame({'names':choice(names, size_2, replace=False),
'random':np.random.rand(size_2),
})
merged = pd.merge(df1, df2, how='inner', on="names")
len(df1), len(merged), len(df2)
def merge_sizes(left_df, merged_df, right_df):
"""Grab the relative lengths. Key thing that happens here is that the overlap size has to be
subtracted from the total size of each of the joining tables."""
return len(left_df) - len(merged_df), len(merged_df), len(right_df) - len(merged_df)
def plot_join_results(left=3, overlap=1, right=2,
left_name="A", right_name="B",
left_color='r', right_color='g',
title="",
):
"""Create a Venn diagram with given inputs"""
fig, ax = plt.subplots(figsize=(8, 8))
venn2(subsets = (left, right, overlap),
set_labels=(left_name, right_name),
set_colors=(left_color, right_color),
ax=ax);
if title:
ax.set_title(title)
return
hex_to_name = {
'#D34100': 'leafblower red',
'#707071': 'medium gray',
'#ECEFF0': 'oyster gray',
'#2C3E4F': 'tardis blue',
'#F15E24': 'dark orange',
'#FF9700': 'orange',
'#0055A7': 'blue',
'#091D32': 'midnight tardis',
'#26C5ED': 'sully blue',
'#00CC66': 'leaf green',
}
svds_color_dict = {v: k for k, v in hex_to_name.items()}
plot_join_results(*merge_sizes(df1, merged, df2), title='This is a remarkable title',
left_color=svds_color_dict['leafblower red'], right_color=svds_color_dict['sully blue'])
```
# From [the docs](https://pypi.python.org/pypi/matplotlib-venn)
The following show some more complicated things. The set one on the bottom doesn't correspond to what I'd expect to happen, so beware!
```
fig, ax = plt.subplots(figsize=(4,4))
v = venn3(subsets=(1, 2, 3, 4, 5, 6, 7), set_labels = ('A', 'B', 'C'), ax=ax)
v.get_patch_by_id('100').set_alpha(1.0)
v.get_patch_by_id('100').set_color('white')
v.get_label_by_id('100').set_text('Unknown')
v.get_label_by_id('A').set_text('Set "A"')
c = venn3_circles(subsets=(1, 2, 3, 4, 5, 6, 7))
# , linestyle='dashed')
c[0].set_lw(1.0)
c[0].set_ls('dotted')
ax.set_title("Sample Venn diagram")
ax.annotate('Unknown set', xy=v.get_label_by_id('100').get_position() - np.array([0, 0.05]), xytext=(-70,-70),
ha='center', textcoords='offset points', bbox=dict(boxstyle='round,pad=0.5', fc='gray', alpha=0.1),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',color='gray'));
figure, axes = plt.subplots(2, 2)
venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'), ax=axes[0][0])
venn2_circles((1, 2, 3), ax=axes[0][1])
venn3(subsets=(1, 1, 1, 1, 1, 1, 1), set_labels = ('A', 'B', 'C'), ax=axes[1][0])
venn3_circles({'001': 10, '100': 20, '010': 21, '110': 13, '011': 14}, ax=axes[1][1])
# plt.show()
set1 = set(['A', 'B', 'C', 'D'])
set2 = set([ 'B', 'C', 'D', 'E'])
set3 = set([ 'C', 'D',' E', 'F', 'G'])
venn3([set1, set2, set3], ('Set1', 'Set2', 'Set3'))
plt.show()
```
| github_jupyter |
```
%matplotlib inline
import os
import struct
import torch
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import cv2
def fftImg(Img):
Img = np.array(Img)
fImg = np.fft.fft2(Img)
fshiftImg = np.fft.fftshift(fImg)
AbsImg = np.log(np.abs(fshiftImg))
AngImg = np.angle(fshiftImg)
return AbsImg, AngImg
def cropimg(Img):
w,h,c = Img.shape
return Img[w/2-112:w/2+112,h/2-112:h/2+112].transpose(2,0,1)
def gammaImg(image, gamma):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
def compressImg(img,q):
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), q]
result, encimg = cv2.imencode('.jpg', img, encode_param)
decimg = cv2.imdecode(encimg, 1)
return decimg
def interpolImg(Img, factor):
w,h,c = Img.shape
factorx = factor
factory = factor
if factor*h<224:
factorx = 1
if factor*w<224:
factory = 1
return cv2.resize(Img,None,fx=factorx,fy=factory, interpolation = cv2.INTER_CUBIC)
listing = ['Sony-NEX-7',
'Motorola-X',
'HTC-1-M7',
'Samsung-Galaxy-Note3',
'Motorola-Droid-Maxx',
'iPhone-4s',
'iPhone-6',
'LG-Nexus-5x',
'Samsung-Galaxy-S4',
'Motorola-Nexus-6']
Datapath = 'train/'
listImg=[]
for i in range(len(listing)):
listImg.append(os.listdir(Datapath+listing[i]))
print(listing)
TrainImg = torch.ByteTensor(275*10, 3, 224, 224)
TrainL = torch.Tensor(275*10)
TrainImgs = torch.ByteTensor(275*80, 3, 224, 224)
TrainLs = torch.Tensor(275*80)
for i in range(len(listImg)):
print(i)
for j in range(len(listImg[i])):
imgfilepath = Datapath+listing[i]+'/'+listImg[i][j]
imgfile = cv2.imread(imgfilepath)
TrainImg[i*275+j] = torch.from_numpy(cropimg(imgfile))
TrainL[i*275+j] = i
TrainImgs[i*275*8+j] = torch.from_numpy(cropimg(compressImg(imgfile,70)))
TrainLs[i*275*8+j] = i
TrainImgs[i*275*8+275+j] = torch.from_numpy(cropimg(compressImg(imgfile,90)))
TrainLs[i*275*8+275+j] = i
TrainImgs[i*275*8+275*2+j] = torch.from_numpy(cropimg(interpolImg(imgfile,0.5)))
TrainLs[i*275*8+275*2+j] = i
TrainImgs[i*275*8+275*3+j] = torch.from_numpy(cropimg(interpolImg(imgfile,0.8)))
TrainLs[i*275*8+275*3+j] = i
TrainImgs[i*275*8+275*4+j] = torch.from_numpy(cropimg(interpolImg(imgfile,1.5)))
TrainLs[i*275*8+275*4+j] = i
TrainImgs[i*275*8+275*5+j] = torch.from_numpy(cropimg(interpolImg(imgfile,2.0)))
TrainLs[i*275*8+275*5+j] = i
TrainImgs[i*275*8+275*6+j] = torch.from_numpy(cropimg(gammaImg(imgfile,0.8)))
TrainLs[i*275*8+275*6+j] = i
TrainImgs[i*275*8+275*7+j] = torch.from_numpy(cropimg(gammaImg(imgfile,1.2)))
TrainLs[i*275*8+275*7+j] = i
torch.save(TrainImg, 'data/TrainImg.pth')
torch.save(TrainL, 'data/TrainLabel.pth')
torch.save(TrainImgs, 'data/TrainImgs.pth')
torch.save(TrainLs, 'data/TrainLabels.pth')
```
| github_jupyter |
# RAMLfications
### Lynn Root
### Backend Engineer @ Spotify
### @roguelynn
# What Is RAML?
RAML stands for RESTful API Modeling Language.
Very similar to Swagger and Blueprint, it's used to describe REST APIs.
What it looks like:
```yaml
#%RAML 0.8
title: Spotify Web API
version: v1
baseUri: https://api.spotify.com/{version}
mediaType: application/json
documentation:
- title: Spotify Web API Docs
content: |
Welcome to the _Spotify Web API_ specification. For more information about
how to use the API, check out [developer site](https://developer.spotify.com/web-api/).
/albums:
displayName: several-albums
get:
description: |
[Get Several Albums](https://developer.spotify.com/web-api/get-several-albums/)
queryParameters:
ids:
displayName: Spotify Album IDs
type: string
description: A comma-separated list of IDs
required: true
example: "382ObEPsp2rxGrnsizN5TX,1A2GTWGtFfWp7KSQTwWOyo,2noRn2Aes5aoNVsU6iWThc"
market:
displayName: Market
description: The market (an ISO 3166-1 alpha-2 country code)
type: string
example: ES
required: false
responses:
200:
body:
application/json:
example: !include example/get-albums-example.json
```
# Why?
## Why a description language for your API?
* Have a single source-of-truth reference for your API
* Machine & human-ish readable
* Clear definition of own versioned API specification
## Why RAML?
* not limited to describing in JSON (Swagger only supports JSON Schema)
* Can support different API versioning (Blueprint can as well)
* Include sample representations (Blueprint can as well)
* Allows for including of external files from either local filesystem or via an HTTP request - no need for one massive `.raml` file (Neither Swagger nor Blueprints supports this)
* It's not WADL
## Why did Spotify choose RAML
* "new kid on the block" & very active development - also means it's very young
* more readable from our PoV
* actual syntax & file structure
* external file inclusions
* open-source spec
# Setting the scene
### I hated answering repeated questions
### So I built a thing
### It takes our RAML file
### And makes an interactive console
```
from IPython.display import Image
Image(filename='api-console-snapshot.png')
Image(filename='api-console-snapshot-resp.png')
```
# enter: RAMLfications
### TL;DR: Python reference implementation for RAML
Let's play:
```bash
$ pip install ramlfications
```
```
!pip install ramlfications
```
# using RAMLfications within your library
```
from ramlfications import parse
RAML_FILE = "spotify-web-api.raml"
api = parse(RAML_FILE)
api
#metadata
api.title
api.version
api.base_uri
api.protocols
# security schemes
api.security_schemes
oauth = api.security_schemes[0]
oauth.settings.get("scopes")
# API endpoints
res = api.resources
res
get_an_album = res[1]
get_an_album.uri_params
get_an_album.method
get_an_album.description
get_an_album.description.html
get_an_album.display_name
get_an_album.path
get_an_album.absolute_uri
# parameters
uri_param = get_an_album.uri_params[0]
uri_param.name
uri_param.required
uri_param.example
get_an_album.parent
# API traits
api.traits
paged = api.traits[1]
paged.query_params
query_param = _[0]
query_param.name
query_param.raw
```
# fun from the command line
### validate your RAML file
```
!ramlfications validate spotify-web-api.raml
!ramlfications validate invalid.raml
```
## Your API does in fact support FTP?
Just add it to your config file!
```bash
$ cat raml_config.ini
```
```ini
[custom]
protocols = FTP
```
```
!ramlfications validate --config raml_config.ini invalid.raml
```
### visualize your RAML file
```
!ramlfications tree spotify-web-api.raml
# MOAR!
!ramlfications tree spotify-web-api.raml -v
# I want MOAR
!ramlfications tree spotify-web-api.raml -vv
# I WANT MOOOOARRRRR
!ramlfications tree spotify-web-api.raml -vvv
```
# What's next?
Coming soon:
* Documentation generator based off of RAML
* API console
# FIN
### Docs: ramlfications.readthedocs.org
### Code: github.com/spotify/ramlfications
### Slides: rogue.ly/ramlfications
### Thanks!
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Introduction to graphs and tf.function
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/intro_to_graphs"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Overview
This guide goes beneath the surface of TensorFlow and Keras to see how TensorFlow works. If you instead want to immediately get started with Keras, please see [our collection of Keras guides](keras/).
In this guide you'll see the core of how TensorFlow allows you to make simple changes to your code to get graphs, how graphs are stored and represented, and how you can use them to accelerate your models.
Note: For those of you who are only familiar with TensorFlow 1.x, this guide demonstrates a very different view of graphs.
**This is a big-picture overview that covers how `tf.function` allows you to switch from eager execution to graph execution.** For a more complete specification of `tf.function`, see [the `tf.function` guide](function).
### What are graphs?
In the previous three guides, you have seen TensorFlow running **eagerly**. This means TensorFlow operations are executed by Python, operation by operation, and returning results back to Python.
While eager execution has several unique advantages, graph execution enables portability outside Python and tends to offer better performance. **Graph execution** means that tensor computations are executed as a *TensorFlow graph*, sometimes referred to as a `tf.Graph` or simply a "graph."
**Graphs are data structures that contain a set of `tf.Operation` objects, which represent units of computation; and `tf.Tensor` objects, which represent the units of data that flow between operations.** They are defined in a `tf.Graph` context. Since these graphs are data structures, they can be saved, run, and restored all without the original Python code.
This is what a TensorFlow graph representing a two-layer neural network looks like when visualized in TensorBoard.

### The benefits of graphs
With a graph, you have a great deal of flexibility. You can use your TensorFlow graph in environments that don't have a Python interpreter, like mobile applications, embedded devices, and backend servers. TensorFlow uses graphs as the format for [saved models](saved_model) when it exports them from Python.
Graphs are also easily optimized, allowing the compiler to do transformations like:
* Statically infer the value of tensors by folding constant nodes in your computation *("constant folding")*.
* Separate sub-parts of a computation that are independent and split them between threads or devices.
* Simplify arithmetic operations by eliminating common subexpressions.
There is an entire optimization system, [Grappler](./graph_optimization.ipynb), to perform this and other speedups.
In short, graphs are extremely useful and let your TensorFlow run **fast**, run **in parallel**, and run efficiently **on multiple devices**.
However, you still want to define our machine learning models (or other computations) in Python for convenience, and then automatically construct graphs when you need them.
## Taking advantage of graphs
You create and run a graph in TensorFlow by using `tf.function`, either as a direct call or as a decorator. `tf.function` takes a regular function as input and returns a `Function`. **A `Function` is a Python callable that builds TensorFlow graphs from the Python function. You use a `Function` in the same way as its Python equivalent.**
```
import tensorflow as tf
import timeit
from datetime import datetime
# Define a Python function.
def a_regular_function(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# `a_function_that_uses_a_graph` is a TensorFlow `Function`.
a_function_that_uses_a_graph = tf.function(a_regular_function)
# Make some tensors.
x1 = tf.constant([[1.0, 2.0]])
y1 = tf.constant([[2.0], [3.0]])
b1 = tf.constant(4.0)
orig_value = a_regular_function(x1, y1, b1).numpy()
# Call a `Function` like a Python function.
tf_function_value = a_function_that_uses_a_graph(x1, y1, b1).numpy()
assert(orig_value == tf_function_value)
```
On the outside, a `Function` looks like a regular function you write using TensorFlow operations. [Underneath](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/eager/def_function.py), however, it is *very different*. A `Function` **encapsulates [several `tf.Graph`s behind one API](#polymorphism_one_function_many_graphs).** That is how `Function` is able to give you the [benefits of graph execution](#the_benefits_of_graphs), like speed and deployability.
`tf.function` applies to a function *and all other functions it calls*:
```
def inner_function(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# Use the decorator to make `outer_function` a `Function`.
@tf.function
def outer_function(x):
y = tf.constant([[2.0], [3.0]])
b = tf.constant(4.0)
return inner_function(x, y, b)
# Note that the callable will create a graph that
# includes `inner_function` as well as `outer_function`.
outer_function(tf.constant([[1.0, 2.0]])).numpy()
```
If you have used TensorFlow 1.x, you will notice that at no time did you need to define a `Placeholder` or `tf.Session`.
### Converting Python functions to graphs
Any function you write with TensorFlow will contain a mixture of native TF operations and Python logic, such as `if-then` clauses, loops, `break`, `return`, `continue`, and more. While TensorFlow operations are easily captured by a `tf.Graph`, Python-specific logic needs to undergo an extra step in order to become part of the graph. `tf.function` uses a library called AutoGraph (`tf.autograph`) to convert Python code into graph-generating code.
```
def simple_relu(x):
if tf.greater(x, 0):
return x
else:
return 0
# `tf_simple_relu` is a TensorFlow `Function` that wraps `simple_relu`.
tf_simple_relu = tf.function(simple_relu)
print("First branch, with graph:", tf_simple_relu(tf.constant(1)).numpy())
print("Second branch, with graph:", tf_simple_relu(tf.constant(-1)).numpy())
```
Though it is unlikely that you will need to view graphs directly, you can inspect the outputs to see the exact results. These are not easy to read, so no need to look too carefully!
```
# This is the graph-generating output of AutoGraph.
print(tf.autograph.to_code(simple_relu))
# This is the graph itself.
print(tf_simple_relu.get_concrete_function(tf.constant(1)).graph.as_graph_def())
```
Most of the time, `tf.function` will work without special considerations. However, there are some caveats, and the [tf.function guide](./function.ipynb) can help here, as well as the [complete AutoGraph reference](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md)
### Polymorphism: one `Function`, many graphs
A `tf.Graph` is specialized to a specific type of inputs (for example, tensors with a specific [`dtype`](https://www.tensorflow.org/api_docs/python/tf/dtypes/DType) or objects with the same [`id()`](https://docs.python.org/3/library/functions.html#id])).
Each time you invoke a `Function` with new `dtypes` and shapes in its arguments, `Function` creates a new `tf.Graph` for the new arguments. The `dtypes` and shapes of a `tf.Graph`'s inputs are known as an **input signature** or just a **signature**.
The `Function` stores the `tf.Graph` corresponding to that signature in a `ConcreteFunction`. **A `ConcreteFunction` is a wrapper around a `tf.Graph`.**
```
@tf.function
def my_relu(x):
return tf.maximum(0., x)
# `my_relu` creates new graphs as it sees more signatures.
print(my_relu(tf.constant(5.5)))
print(my_relu([1, -1]))
print(my_relu(tf.constant([3., -3.])))
```
If the `Function` has already been called with that signature, `Function` does not create a new `tf.Graph`.
```
# These two calls do *not* create new graphs.
print(my_relu(tf.constant(-2.5))) # Signature matches `tf.constant(5.5)`.
print(my_relu(tf.constant([-1., 1.]))) # Signature matches `tf.constant([3., -3.])`.
```
Because it's backed by multiple graphs, we can say a `Function` is **polymorphic**. That enables it to support more input types than a single `tf.Graph` could represent, as well as to optimize each `tf.Graph` for better performance.
```
# There are three `ConcreteFunction`s (one for each graph) in `my_relu`.
# The `ConcreteFunction` also knows the return type and shape!
print(my_relu.pretty_printed_concrete_signatures())
```
## Using `tf.function`
So far, you've seen how you can convert a Python function into a graph simply by using `tf.function` as a decorator or wrapper. But in practice, getting `tf.function` to work correctly can be tricky! In the following sections, you'll learn how you can make your code work as expected with `tf.function`.
### Graph execution vs. eager execution
The code in a `Function` can be executed both eagerly and as a graph. By default, `Function` executes its code as a graph:
```
@tf.function
def get_MSE(y_true, y_pred):
sq_diff = tf.pow(y_true - y_pred, 2)
return tf.reduce_mean(sq_diff)
y_true = tf.random.uniform([5], maxval=10, dtype=tf.int32)
y_pred = tf.random.uniform([5], maxval=10, dtype=tf.int32)
print(y_true)
print(y_pred)
get_MSE(y_true, y_pred)
```
To verify that your `Function`'s graph is doing the same computation as its equivalent Python function, we can make it execute eagerly with `tf.config.run_functions_eagerly(True)`. This is a switch that **turns off `Function`'s ability to create and run graphs**, instead executing the code normally.
```
tf.config.run_functions_eagerly(True)
get_MSE(y_true, y_pred)
# Don't forget to set it back when you are done.
tf.config.run_functions_eagerly(False)
```
However, `Function` can behave differently under graph and eager execution. The Python [`print`](https://docs.python.org/3/library/functions.html#print) function is one example of how these two modes differ. Let's see what happens when we insert a `print` statement to our function and call it repeatedly.
```
@tf.function
def get_MSE(y_true, y_pred):
print("Calculating MSE!")
sq_diff = tf.pow(y_true - y_pred, 2)
return tf.reduce_mean(sq_diff)
```
Observe what is printed:
```
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
```
Is the output surprising? **`get_MSE` only printed once even though it was called *three* times.**
To explain, the `print` statement is executed when `Function` runs the original code in order to create the graph in a process known as ["tracing"](function.ipynb#tracing). **Tracing captures the TensorFlow operations into a graph, and `print` is not captured in the graph.** That graph is then executed for all three calls **without ever running the Python code again**.
As a sanity check, let's turn off graph execution to compare:
```
# Now, globally set everything to run eagerly to force eager execution.
tf.config.run_functions_eagerly(True)
# Observe what is printed below.
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
tf.config.run_functions_eagerly(False)
```
`print` is a *Python side effect*, and there are [other differences](function#limitations) that you should be aware of when converting a function into a `Function`.
Note: If you would like to print values in both eager and graph execution, use `tf.print` instead.
###`tf.function` best practices
It may take some time to get used to the behavior of `Function`. To get started quickly, first-time users should play around with decorating toy functions with `@tf.function` to get experience with going from eager to graph execution.
*Designing for `tf.function`* may be your best bet for writing graph-compatible TensorFlow programs. Here are some tips:
- Toggle between eager and graph execution early and often with `tf.config.run_functions_eagerly` to pinpoint if/ when the two modes diverge.
- Create `tf.Variable`s
outside the Python function and modify them on the inside. The same goes for objects that use `tf.Variable`, like `keras.layers`, `keras.Model`s and `tf.optimizers`.
- Avoid writing functions that [depend on on outer Python variables](function#depending_on_python_global_and_free_variables), excluding `tf.Variables` and Keras objects.
- Prefer to write functions which take tensors and other TensorFlow types as input. You can pass in other object types but [be careful](function#depending_on_python_objects)!
- Include as much computation as possible under a `tf.function` to maximize the performance gain. For example, decorate a whole training step or the entire training loop.
## Seeing the speed-up
`tf.function` usually improves the performance of your code, but the amount of speed-up depends on the kind of computation you run. Small computations can be dominated by the overhead of calling a graph. You can measure the difference in performance like so:
```
x = tf.random.uniform(shape=[10, 10], minval=-1, maxval=2, dtype=tf.dtypes.int32)
def power(x, y):
result = tf.eye(10, dtype=tf.dtypes.int32)
for _ in range(y):
result = tf.matmul(x, result)
return result
print("Eager execution:", timeit.timeit(lambda: power(x, 100), number=1000))
power_as_graph = tf.function(power)
print("Graph execution:", timeit.timeit(lambda: power_as_graph(x, 100), number=1000))
```
`tf.function` is commonly used to speed up training loops, as you can see [here](keras/writing_a_training_loop_from_scratch#speeding-up_your_training_step_with_tffunction) with Keras.
Note: You can also try [`tf.function(jit_compile=True)`](https://www.tensorflow.org/xla#explicit_compilation_with_tffunctionjit_compiletrue) for a more significant performance boost, especially if your code is heavy on TF control flow and uses many small tensors.
### Performance and trade-offs
Graphs can speed up your code, but the process of creating them has some overhead. For some functions, the creation of the graph takes more time than the execution of the graph. **This investment is usually quickly paid back with with the performance boost of subsequent executions, but it's important to be aware that the first few steps of any large model training can be slower due to tracing.**
No matter how large your model, you want to avoid tracing frequently. The `tf.function` guide discusses [how to set input specifications and use tensor arguments](function#controlling_retracing) to avoid retracing. If you find you are getting unusually poor performance, it's a good idea to check if you are retracing accidentally.
## When is a `Function` tracing?
To figure out when your `Function` is tracing, add a `print` statement to its code. As a rule of thumb, `Function` will execute the `print` statement every time it traces.
```
@tf.function
def a_function_with_python_side_effect(x):
print("Tracing!") # An eager-only side effect.
return x * x + tf.constant(2)
# This is traced the first time.
print(a_function_with_python_side_effect(tf.constant(2)))
# The second time through, you won't see the side effect.
print(a_function_with_python_side_effect(tf.constant(3)))
# This retraces each time the Python argument changes,
# as a Python argument could be an epoch count or other
# hyperparameter.
print(a_function_with_python_side_effect(2))
print(a_function_with_python_side_effect(3))
```
Here, you see extra tracing because new Python arguments always trigger the creation of a new graph.
## Next steps
You can read a more in-depth discussion at both the `tf.function` API reference page and at the [guide](function).
| github_jupyter |
```
import re
from tqdm import tqdm
import pickle
from functools import reduce
import mafan
from mafan import text
import itertools
import sys
import os
bos = " <bos> "
eos = " <eos> "
```
# Tokenizer Functions
## Sentence Tokenizer
```
def zng(paragraph):
for sent in re.findall(u'[^!?。\.\!\?]+[!?。\.\!\?]?', paragraph, flags=re.U):
yield sent
```
## Simplified Chinese Tokenizer
Below is the code for simplified to traditional mapping dictionary.
We have a large dictionary *conversions.txt* that includes words, characters, common phrases, locations and idioms. Each entry contains the traditional chinese word and simplified chinese word.
```
infile = open("conversions.txt", "r+", encoding="utf-8")
s2t_dict = dict()
for line in infile:
line = line.rstrip()
arr = line.split()
trad = arr[0]
sim = arr[1]
if sim not in s2t_dict:
s2t_dict[sim] = [trad]
else:
s2t_dict[sim].append(trad)
s2t_dict['-'] = ['-']
```
Tokeniser is used for identifying dictionary words and phrases in the input sentence. We always prefer longer phrases because it gives more meaning and less translation mappings. Hence we use Byte Pair Encoding (BPE) for identifying words, while BPE candidates are constrained by the defined list of vocabs in the dictionary. Since the longest phrase in the dictionary has 8 characters we start with 8-character phrases and do it backwards.
```
def tokenizer(sentence, n = 8):
'''
This function tokenizes input sentences according to the dicitionary.
Input: a sentence or paragraph
Output: a list of tokens from the input in order according to the original paragraph; a list of non-chinese characters from the original text.
'''
text, charList = prepare(sentence)
token_list = []
input_text = text
for k in range(n, 0, -1):
candidates = [input_text[i:i + k] for i in range(len(input_text) - k + 1)]
for candidate in candidates:
if candidate in s2t_dict:
token_list.append(candidate)
input_text = re.sub(candidate, '', input_text)
final = sequencer(token_list, text)
return final, charList
def output_list(sentence_list, char_list):
count = 0
original = [] # sentence we want to output
for word in sentence_list:
if "-" in word:
original.append(list(char_list[count]))
count += 1
else:
original.append(word)
return original
def output(sentence, char_list):
count = 0
original = "" # sentence we want to output
for char in list(sentence):
if char == "-":
original += char_list[count] # append character if non-chinese
count += 1
else:
original += char # append chinese
return original
def prepare(sentence):
new = "" # input to your tokenizer
char_list = [] # punct / english to be omitted
for char in list(sentence):
if text.identify(char) is mafan.NEITHER:
new += "-" # sub - with non-chinese chars
char_list.append(char)
else:
new += char
return new, char_list
def sequencer(tokens, example):
flags = [1] * len(example)
sequence = []
for token in tokens:
for match in re.finditer(token, example):
location = (token, match.span()[0], match.span()[1])
valid = reduce(lambda x,y:x*y, flags[location[1]:location[2]])
if valid:
sequence.append(location)
for i in range(location[1], location[2]):
flags[i] = 0
else:
continue
sequence.sort(key=lambda x: x[1])
result = [x[0] for x in sequence]
return result
```
## Corpus Preparation
First, we need to prepare our corpus.
1. We will add paddings (sentinels) to our sentences.
2. Take one sentence at a time.
3. Change non-chinese words to FW to avoid data explosion.
4. Slice the n-grams and add them to dictionary
```
def add_stuff(order):
'''
This function divides the corpus into n-grams and stores them in dictionary.
Input: order of n-gram (like 2 for bi-gram)
Output: none
'''
infile = open("hk-zh.txt", "r+") # this contains our corpus
start_padding = bos * order # add padding
end_padding = eos * order
for line in tqdm(infile, total=1314726):
line = line.rstrip()
sentences = list(zng(line)) # tokenize sentence by sentence
for sentence in sentences:
candidate = start_padding + sentence + end_padding # form sentence
word_list = candidate.split()
word_list_tokens = []
for word in word_list:
if not(bool(re.match('^[a-zA-Z0-9]+$', word))):
word_list_tokens.append(word) # add if not chinese
else:
word_list_tokens.append("FW") # turn non-chinese (except punc) to FW
word_list = word_list_tokens
ordered = [word_list[i:i + order] for i in range(1, len(word_list) - order)] # extract n-grams through slicing
# for each ngram, convert to tuple and add to dictionary
for ngram in ordered:
ngram = tuple(ngram)
if ngram not in corpus:
corpus[ngram] = 1
else:
corpus[ngram] += 1
```
Let's say you want to extract till trigrams.
We want to do 3 iterations, for trigram, bi-gram and then unigram. Each iteration takes 2 minutes. This is only time-consuming part of this code. Once you prep the dictionary, you don't need to do this again.
```
corpus = dict()
start_order = 3
for i in range(start_order, 0, -1):
add_stuff(i)
```
Once you made the dictionary, dump it into a pickle.
```
import pickle
with open('corpus.pkl', 'wb') as handle:
pickle.dump(corpus, handle)
```
Here's a way to load a pickle so you don't need to process data everytime.
```
with open('corpus.pkl', 'rb') as fp:
corpus = pickle.load(fp)
```
# Making Candidate Lists
1. Tokenize the input.
2. Check the mappings of each input.
3. Add all possible mappings to candidate list.
```
def convert(sentence):
'''
Returns list of possible mappings.
Input: Simplified chinese sentence
Output: List of lists. Each list has a set of possible traditional chinese tokens
'''
tokens, char_list = tokenizer(sentence)
candidate_list = []
for token in tokens:
candidate_list.append(s2t_dict[token])
candidate_list = output_list(candidate_list, char_list)
return(candidate_list)
```
# Maximum log-likelihood calculations
Compute the log likelihood of a sentence with different \\(\alpha\\) penalties for unigram, bigram and trigrams.
```
num_tokens = 4526000 # total number of tokens in corpus
def prob(word_list, alpha_0 = 0.25, alpha_1 = 0.5, alpha_2 = 1.0):
'''
Computes the log likelihood probability.
Input: A sequence of words in form of list
Output: Log probabilties
'''
word_list = tuple(word_list) # change word list to tuple
if word_list in corpus:
# word found in dictionary
numerator = corpus[word_list] # get the frequency of that word list
denominator = num_tokens # let denominator be num tokens
# cutoff the last word and check whether it's in corpus
if len(word_list[:-1]) > 1 and word_list[:-1] in corpus:
denom_list = word_list[:-1]
denominator = corpus[denom_list]
if len(word_list[:-1]) == 1 and word_list[:-1] in corpus:
return alpha_0 * log(numerator / denominator) # log of prob*alpha
elif len(word_list[:-1]) == 2 and word_list[:-1] in corpus:
return alpha_1 * log(numerator / denominator)
elif len(word_list[:-1]) == 3 and word_list[:-1] in corpus:
return alpha_2 * log(numerator / denominator)
else:
return log(numerator/denominator)
else:
word_list = list(word_list) # convert it back to list
k = len(word_list) - 1 # backoff, reduce n gram length
if k > 0:
# recursive function, divide the sequence into smaller n and find probs
probs = [prob(word_list[i:i + k]) for i in range(len(word_list) - k + 1)]
return sum(probs)
else:
# we found an unseen word
if not(bool(re.match('^[a-zA-Z0-9]+$', word_list[0]))):
return log(1 / num_tokens) # return a small probability
else:
return prob(["FW"]) # we encountered a non-chinese word
```
# Language Model
Generative n-gram language model that estimates the conditional probability of a word given its history in the n-gram. It's calculated by backing off through progressively shorter history models.
Stupid Backoff does not generate normalized probabilities. The main difference is that we don’t apply any discounting and instead directly use the relative frequencies (S is used instead of P to emphasize that these are not probabilities but scores).
\\(S(w^i|w^{i−1}_{i−k+1}) =
\begin{cases}
\frac{f(w^{i}_{i−k+1})}{f(w^{i-1}_{i−k+1})} & \text{if } f(w^{i}_{i−k+1})> 0\\
\\\alpha S(w^i|w^{i−1}_{i−k+2}), & \text{otherwise}
\end{cases}
\\)
Where \\(\alpha\\) is the backoff factor.
Stupid Backoff is inexpensive to calculate in a distributed environment while approaching the quality
of Kneser-Ney smoothing for large amounts of data.
```
from math import log
def backoff(sentence, order, alpha_0 = 0.25, alpha_1 = 0.5, alpha_2 = 1.0):
'''
Calcuates log likelihood using backoff language model
Input: Sentence and order of the n-gram
Output: Log prob of that sentence
'''
score = 0
sentences = list(zng(sentence)) # sentence tokenizer
for sentence in sentences:
start_padding = bos * order # beginning padding
end_padding = eos * order # ending padding
candidate = start_padding + sentence + end_padding # add paddings
word_list = candidate.split()
word_list_tokens = []
for word in word_list:
# append only non-chinese words
if not(bool(re.match('^[a-zA-Z0-9]+$', word))):
word_list_tokens.append(word)
else:
word_list_tokens.append("FW")
word_list = word_list_tokens
ordered = [word_list[i:i + order] for i in range(1, len(word_list) - order)] # shingle into n-grams
probs = [prob(x, alpha_0, alpha_1, alpha_2) for x in ordered] # calculate probabilities
score += sum(probs) # final answer
return score
```
# Translator
Take simplified sentence as input, generate candidate list for sentence.
For words with many to one mappings add the candidate to a temporary sentence, calculate perplexity and choose the option which gives the lowest perplexity.
Call function to add back spaces at the end and output the final sentence.
```
def translate(sentence, alpha_0 = 0.25, alpha_1 = 0.5, alpha_2 = 1.0):
'''
Translate a given sentence to traditional
Input: Simplified Sentence
Output: Traditional Sentence
'''
candidates = convert(sentence) # get the candidate lists
final_sent = ""
for words in candidates:
if len(words) > 1:
# many to one mappings
score = -50000.0 # start with extreme negative value
likely = ""
for candidate in words:
temp = final_sent
temp = temp + " " + candidate # add a candidate to temp sentence
current_score = backoff(temp, 3, alpha_0, alpha_1, alpha_2) # check perplexity
if current_score > score:
# if performing good, include that
score = current_score
likely = candidate
final_sent = final_sent + " " + likely
else:
final_sent = final_sent + " " + words[0]
final_sent = final_sent.replace(" ", "")
final_sent = add_back_spaces(sentence, final_sent) #call function to add the spaces back and output translation
return final_sent
```
Add spaces back by enumerating through the original and the appended list.
```
def add_back_spaces(original, current):
current_list = list(current)
original_list = list(original)
count = 1
for index, char in enumerate(original_list):
if char == " ":
current_list[index - count] += " "
count += 1
current = "".join(current_list)
return current
```
Test sentence for translate.
```
sentence = "早在23岁,伍兹就参与了世界上首个核反应堆Chicago Pile-1的建设,她是导师费米领导的项目团队中最年轻的一员。此外,伍兹在建立和使用实验所需的盖革计数器上起到关键作用。反应堆成功运转并达到自持状态时,她也是唯一在场的女性。曼哈顿计划中,她与费米合作;同时,她曾与第一任丈夫约翰·马歇尔(John Marshall)一同解决了汉福德区钚生产厂氙中毒的问题,并负责监督钚生产反应炉的建造和运行。"
a = translate(sentence)
print(a)
```
# Joint Probability Based Tokenizer
Greedy tokenizer would generally work for most of the cases, however, it could lead to an undesirable segmentation, due to the preference towards longer chunks.
We propose a joint consideration for sub-word segmentation by considering both source and target sentences.
A translator needs a source sentence $\mathbf{S}$ consisting of segmentations where $\mathbf{S} = s_0 s_1 \dots s_n$ and a target sentence $\mathbf{T}$ consisting of segmentations where $\mathbf{T} = t_0 t_1 \dots t_m$.
We want to find optimal arrangement of $\mathbf{S}$ which is $\mathbf{S}^*$ and optimal arrangement of $\mathbf{T}$ which is $\mathbf{T}^*$. Mathematically:
\begin{align}
\label{eq1}
\mathbf{S}^*, \mathbf{T}^* = \underset{{s_i \in \mathbf{S}, t_j \in \mathbf{T}}}{\operatorname{argmax}} P(\mathbf{S}, \mathbf{T})
\end{align}
where $P(\mathbf{S}, \mathbf{T})$ is the joint probability of sequences.
We assume that the prior probabilities, which are $P(\mathbf{S})$ and $P(\mathbf{T})$, are language model based probabilities.
```
def dp_tokenizer(sentence):
s = sentence
global orig_len
orig_len = len(s)
return segment(s)
model = kenlm.Model("sim_train.klm")
def memo(f):
"Memoize function f, whose args must all be hashable."
cache = {}
def fmemo(*args):
if args not in cache:
cache[args] = f(*args)
return cache[args]
fmemo.cache = cache
return fmemo
def splits(text, start=0, L=20):
"Return a list of all (first, rest) pairs; start <= len(first) <= L."
return [(text[:i], text[i:])
for i in range(start, min(len(text), L)+1)]
```
We chose Viterbi for segmentation the given sentence. The scoring function is obtained from the constructed language models.
Optimal segmentation depends on the following:
<ol>
<li>language model score of source sentence of a candidate segment.</li>
<li>language model score of target sentence of a candidate segment.</li>
<li>item mapping conversions from source segment to target segment</li>
</ol>
```
@memo
def segment(text):
"Return a list of words that is the most probable segmentation of text."
if not text:
return []
else:
candidates = ([first] + segment(rest)
for (first, rest) in splits(text, 1))
return max(candidates, key=Pwords)
```
To avoid OOVs as output segmentations, we imposed a penalty on OOV outputs, which is given by: $\alpha \times \frac{\texttt{len(segment)}}{\texttt{len(sentence)}}$.
```
penalty_constant = 15.0
def Pwords(words):
"Probability of words, assuming each word is independent of others."
sentence = " ".join(words)
score = 0
words_ = ['<s>'] + sentence.split() + ['</s>']
for i, (prob, length, oov) in enumerate(model.full_scores(sentence)):
if oov:
penalty = len(words_[i+1]) / orig_len
score += penalty_constant * prob * penalty
else:
score += prob
return score
import string
alphanumerics = 'a-zA-Z0-9'
known_stops = u'。。…!?'
known_punctuation = u'/()、,。:「」…。『』!?《》“”;’ ‘【】·〔〕'
eng_punct = string.punctuation
avoid = re.compile("([%s%s%s%s]+)" % (alphanumerics, known_stops, known_punctuation, eng_punct))
```
# Tokenize Sentence
Tokenize sentence and output the tokens.
```
def tokenize_sentence(sentence):
split_words = re.split(avoid, sentence)
split_words_values = [(i, bool(re.search(avoid, i))) for i in split_words]
answer = []
for (word, value) in split_words_values:
segmented_text = []
if value == False:
orig_len = len(word)
segmented_text = dp_tokenizer(word)
else:
segmented_text = list(word)
for segs in segmented_text:
answer.append(segs)
return answer
sentence = "姚松炎、周庭势被「DQ」? 泛民质疑,政府再取消参选人资格涉政治筛选,要求律政司司长郑若骅解释法律理据。 有报道指,据全国人大常委会就《基本法》第一百零四条进行的释法,代表泛民参选立法会港岛及九龙西补选的香港众志周庭和被「DQ」前议员姚松炎,势被取消参选资格。律政司表示,法律政策专员黄惠冲将于稍后时间与泛民议员会面,确实时间待定。 民主派议员前晚在律政中心外静坐要求与律政司司长郑若骅会面不果后,昨在立法会召开记者招待会,要求郑就撤销参选人资格的理据,及其给予选举主任的法律意见作出详细交代。公民党议员郭荣铿批评,郑不向公众交代的做法是「冇承担,冇责任」的表现,不能只把责任交托予公务员。"
a = tokenize_sentence(sentence)
```
# Evaluation
Evaluating the accuracy on 100 lines on sample test data.
```
sim_filename = "simplified100.txt"
tra_filename = "traditional100.txt"
checklist = []
for key in s2t_dict:
if len(s2t_dict[key]) > 1:
for t in s2t_dict[key]:
checklist.append(t)
```
The translated characters are matched with the original traditional corpus during evaluation. The mismatch characters include wrongly characters and variant characters. Variant characters are characters that are homophones and synonyms. In some cases, simplified characters can have multiple traditional variant characters mapped to them, which gives the same meaning and context. Thus a mismatch in this case does not necessarily means a incorrect conversion. However this is not a common case and does not affect the evaluation result substantially.
```
def eval(sim_filename, tra_filename, alpha_0 = 0.25, alpha_1 = 0.5, alpha_2 = 1.0):
total = 0
correct = 0
wrong = 0
micro_total = 0
micro_correct = 0
sim_file = open(sim_filename, "r+", encoding="utf-8")
tra_file = open(tra_filename, "r+", encoding="utf-8")
tra_lines = tra_file.readlines()
line_count = 0
for line in sim_file:
line = line.rstrip()
line = translate(line, alpha_0 , alpha_1 , alpha_2)
tra_line = tra_lines[line_count].rstrip()
if len(line) == len(tra_line):
char_count = 0
for c in line:
total = total + 1
if c == tra_line[char_count]:
correct = correct + 1
else:
# print(c + tra_line[char_count])
wrong = wrong + 1
if tra_line[char_count] in checklist:
micro_total += 1
if c == tra_line[char_count]:
micro_correct = micro_correct + 1
char_count = char_count + 1
line_count += 1
results = []
results.append(('Total', (total)))
results.append(('Correct' , (correct)))
results.append(('Wrong' , (wrong)))
results.append(('Percentage' , (correct/total*100)))
results.append(('Micro Total' , (micro_total)))
results.append(('Micro Correct' , (micro_correct)))
results.append(('Micro Percentage' , (micro_correct/micro_total*100)))
return results
```
Overall Accuracy is defined as (no. of correctly converted characters) / (no. of converted characters). We also calculate the Micro-average accuracy to evaluate the performance for one-to-many character conversions only.
```
print(eval(sim_filename, tra_filename, 0.7, 0.5, 1))
```
# Hyper-Parameter Tuning
Testing the evaluation function on the test set with multiple hyperparameter values in order to determine the optimal values for the hyperparameter.
```
outfile = open('alpha_tuning.txt', 'w+')
max = [99.5, 0.25, 0.5, 1]
for a_0 in range(100, 10, -10):
for a_1 in range(100, 10, -10):
outfile.write(str(eval(sim_filename, tra_filename, alpha_0 = float(a_0)/100.0, alpha_1 = float(a_1)/100.0, alpha_2 = 1.0)) + "a0: " + str(a_0/100.0) + " " + "a1: " + str(a_1/100.0) + '\n')
```
| github_jupyter |
```
# import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
import pandas as pd
from math import exp, pi, sqrt
layout = go.Layout(
title='Node data',
autosize=False,
width=600,
height=600,
margin=dict(
l=65,
r=50,
b=65,
t=90
)
)
def multivariate_gaussian(pos, mu, Sigma):
"""Return the multivariate Gaussian distribution on array pos.
pos is an array constructed by packing the meshed arrays of variables
x_1, x_2, x_3, ..., x_k into its _last_ dimension.
"""
n = mu.shape[0]
Sigma_det = np.linalg.det(Sigma)
Sigma_inv = np.linalg.inv(Sigma)
N = np.sqrt((2*np.pi)**n * Sigma_det)
# This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized
# way across all the input variables.
fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)
return np.exp(-fac / 2) / N
def norm(s):
return s/s.max()
N = 50
x = np.linspace(0, 1, N)
y = np.linspace(0, 1, N)
def evaluate(mu, sigma):
mx, my = np.meshgrid(x, y)
pos = np.empty(mx.shape + (2,))
pos[:,:,0] = mx
pos[:,:,1] = my
return multivariate_gaussian(pos, mu, sigma)
def evaluate_f(f):
mx, my = np.meshgrid(x, y)
pos = np.empty(mx.shape + (2,))
pos[:,:,0] = mx
pos[:,:,1] = my
return f(pos)
def surface(z):
return go.Surface(x=x, y=y, z=z)
def draw(s):
return go.FigureWidget(data=s, layout=layout)
sw = norm(evaluate(np.array([0.25, 0.85]), np.array([[0.1, 0], [0, 0.2]])))
sn = norm(evaluate(np.array([0.75, 0.25]), np.array([[0.01, 0], [0, 0.01]])))
sx = norm(evaluate(np.array([0.25, 0.25]), np.array([[0.10, 0], [0, 0.01]])))
sy = norm(evaluate(np.array([0.75, 0.65]), np.array([[0.02, 0], [0, 0.15]])))
s = norm(sw*1.4 + sx*0.8 + sy + sn*0.5)
draw([surface(s)])
sw1 = evaluate(np.array([0.25, 0.85]), np.array([[0.1, 0], [0, 0.2]]))
sn1 = evaluate(np.array([0.75, 0.25]), np.array([[0.01, 0], [0, 0.01]]))
sx1 = evaluate(np.array([0.25, 0.25]), np.array([[0.10, 0], [0, 0.01]]))
sy1 = evaluate(np.array([0.75, 0.65]), np.array([[0.02, 0], [0, 0.01]]))
s1 = sw1*1.45 + sx1*0.7 + sy1*1.1 + sn1*0.6
draw([surface(s1)])
fw = lambda pos: multivariate_gaussian(pos, np.array([0.25, 0.85]), np.array([[0.3, 0], [0, 0.2]]))
fn = lambda pos: multivariate_gaussian(pos, np.array([0.75, 0.25]), np.array([[0.01, 0], [0, 0.01]]))
fx = lambda pos: multivariate_gaussian(pos, np.array([0.25, 0.25]), np.array([[0.10, 0], [0, 0.1]]))
fy = lambda pos: multivariate_gaussian(pos, np.array([0.75, 0.65]), np.array([[0.01, 0], [0, 0.15]]))
f = lambda pos: fw(pos)*2.5 + fx(pos)/1 + fy(pos)/5 + fn(pos)/32
draw([surface(evaluate_f(f))])
```
## test 2
```
t1 = norm(evaluate(np.array([0.25, 0.5]), np.array([[0.20, 0.1], [0, 0.1]])))
t2 = norm(evaluate(np.array([0.75, 0.5]), np.array([[0.05, 0], [0, 0.1]])))
tx = norm(t1+t2/2)
draw([surface(tx)])
```
| github_jupyter |
> This is one of the 100 recipes of the [IPython Cookbook](http://ipython-books.github.io/), the definitive guide to high-performance scientific computing and data science in Python.
# 8.4. Learning from text: Naive Bayes for Natural Language Processing
In this recipe, we show how to handle text data with scikit-learn. Working with text requires careful preprocessing and feature extraction. It is also quite common to deal with highly sparse matrices.
We will learn to recognize whether a comment posted during a public discussion is considered insulting to one of the participants. We will use a labeled dataset from [Impermium](https://impermium.com), released during a [Kaggle competition](https://www.kaggle.com/c/detecting-insults-in-social-commentary).
You need to download the *troll* dataset on the book's website. (https://ipython-books.github.io)
1. Let's import our libraries.
```
import numpy as np
import pandas as pd
import sklearn
import sklearn.model_selection as ms
import sklearn.feature_extraction.text as text
import sklearn.naive_bayes as nb
import matplotlib.pyplot as plt
%matplotlib inline
```
2. Let's open the csv file with Pandas.
```
df = pd.read_csv("data/troll.csv")
```
3. Each row is a comment. There are three columns: whether the comment is insulting (1) or not (0), the data, and the unicode-encoded contents of the comment.
```
df[['Insult', 'Comment']].tail()
```
4. Now, we are going to define the feature matrix $\mathbf{X}$ and the labels $\mathbf{y}$.
```
y = df['Insult']
```
Obtaining the feature matrix from the text is not trivial. Scikit-learn can only work with numerical matrices. How to convert text into a matrix of numbers? A classical solution is to first extract a **vocabulary**: a list of words used throughout the corpus. Then, we can count, for each sample, the frequency of each word. We end up with a **sparse matrix**: a huge matrix containiny mostly zeros. Here, we do this in two lines. We will give more explanations in *How it works...*.
```
tf = text.TfidfVectorizer()
X = tf.fit_transform(df['Comment'])
print(X.shape)
```
5. There are 3947 comments and 16469 different words. Let's estimate the sparsity of this feature matrix.
```
print("Each sample has ~{0:.2f}% non-zero features.".format(
100 * X.nnz / float(X.shape[0] * X.shape[1])))
```
6. Now, we are going to train a classifier as usual. We first split the data into a train and test set.
```
(X_train, X_test,
y_train, y_test) = ms.train_test_split(X, y,
test_size=.2)
```
7. We use a **Bernoulli Naive Bayes classifier** with a grid search on the parameter $\alpha$.
```
bnb = ms.GridSearchCV(nb.BernoulliNB(), param_grid={'alpha':np.logspace(-2., 2., 50)})
bnb.fit(X_train, y_train);
```
8. What is the performance of this classifier on the test dataset?
```
bnb.score(X_test, y_test)
```
9. Let's take a look at the words corresponding to the largest coefficients (the words we find frequently in insulting comments).
```
# We first get the words corresponding to each feature.
names = np.asarray(tf.get_feature_names())
# Next, we display the 50 words with the largest
# coefficients.
print(','.join(names[np.argsort(
bnb.best_estimator_.coef_[0,:])[::-1][:50]]))
```
10. Finally, let's test our estimator on a few test sentences.
```
print(bnb.predict(tf.transform([
"I totally agree with you.",
"You are so stupid.",
"I love you."
])))
```
> You'll find all the explanations, figures, references, and much more in the book (to be released later this summer).
> [IPython Cookbook](http://ipython-books.github.io/), by [Cyrille Rossant](http://cyrille.rossant.net), Packt Publishing, 2014 (500 pages).
| github_jupyter |
# OpenVINO example with Squeezenet Model
This notebook illustrates how you can serve [OpenVINO](https://software.intel.com/en-us/openvino-toolkit) optimized models for Imagenet with Seldon Core.

To run all of the notebook successfully you will need to start it with
```
jupyter notebook --NotebookApp.iopub_data_rate_limit=100000000
```
## Setup Seldon Core
Use the setup notebook to [Setup Cluster](../../../notebooks/seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../../notebooks/seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html).
```
!kubectl create namespace seldon
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
```
## Deploy Seldon Intel OpenVINO Graph
```
!helm install openvino-squeezenet ../../../helm-charts/seldon-openvino \
--set openvino.model.src=gs://seldon-models/openvino/squeezenet \
--set openvino.model.path=/opt/ml/squeezenet \
--set openvino.model.name=squeezenet1.1 \
--set openvino.model.input=data \
--set openvino.model.output=prob
!helm template openvino-squeezenet ../../../helm-charts/seldon-openvino \
--set openvino.model.src=gs://seldon-models/openvino/squeezenet \
--set openvino.model.path=/opt/ml/squeezenet \
--set openvino.model.name=squeezenet1.1 \
--set openvino.model.input=data \
--set openvino.model.output=prob | pygmentize -l json
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=openvino-model -o jsonpath='{.items[0].metadata.name}')
```
## Test
```
%matplotlib inline
import numpy as np
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
import sys
import json
import matplotlib.pyplot as plt
from seldon_core.seldon_client import SeldonClient
def getImage(path):
img = image.load_img(path, target_size=(227, 227))
x = image.img_to_array(img)
plt.imshow(x/255.)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
X = getImage("car.png")
X = X.transpose((0,3,1,2))
print(X.shape)
sc = SeldonClient(deployment_name="openvino-model",namespace="seldon")
response = sc.predict(gateway="ambassador",transport="grpc",data=X, client_return_type="proto")
result = response.response.data.tensor.values
result = np.array(result)
result = result.reshape(1,1000)
with open('imagenet_classes.json') as f:
cnames = eval(f.read())
for i in range(result.shape[0]):
single_result = result[[i],...]
ma = np.argmax(single_result)
print("\t",i, cnames[ma])
assert(cnames[ma]=="sports car, sport car")
!helm delete openvino-squeezenet
```
| github_jupyter |
(c) Ali Parandeh - Beginners Machine Learning - London
# Introduction to Unsupervised Machine Learning with AWS Sagemaker
In this interesting 3hr workshop, you will take the massive dataset of UFO sightings (80,000 reports over the past century) from [National UFO Reporting Center (NUFORC)](http://www.nuforc.org/) and use Amazon's machine learning services ([AWS Sagemaker](https://aws.amazon.com/sagemaker/)) to identify the top 10 locations that are most likely to have UFO sightings. To do so, you will need to use an unsupervised machine learning algorithm.
You will then take your trained model, deserialise it, convert its output to a csv format and visualise it on a map using AWS [Quicksight](https://aws.amazon.com/quicksight/) to see where these locations are. Then you can try correlating these locations with landmarks.
The general machine learning workflow with AWS Sagemaker is shown below. For this assignment we will not evaluate or deploy the model but only use its output to visualise the results on a world map.
<img src="https://docs.aws.amazon.com/sagemaker/latest/dg/images/ml-concepts-10.png">
### What is Unsupervised Machine Learning?
With unsupervised learning, data features are fed into the learning algorithm, which determines how to label them (usually with numbers 0,1,2..) and based on what. This “based on what” part dictates which unsupervised learning algorithm to follow.
Most unsupervised learning-based applications utilize the sub-field called **Clustering**.
One of the most famous topics under the realm of Unsupervised Learning in Machine Learning is k-Means Clustering. Even though this clustering algorithm is fairly simple, it can look challenging to newcomers into the field.
### What is the difference between supervised and unsupervised machine learning?
The main difference between Supervised and Unsupervised learning algorithms is the absence of data labels in the latter.
### What does clustering mean?
**Clustering** is the process of grouping data samples together into clusters based on a certain feature that they share — exactly the purpose of unsupervised learning in the first place.
<img src="https://cdn-images-1.medium.com/max/1600/1*tWaaZX75oumVwBMcKN-eHA.png">
Source: [Clustering using K-means algorithm](https://towardsdatascience.com/clustering-using-k-means-algorithm-81da00f156f6)
### How does the K-Means Algorithm work?
Being a clustering algorithm, **k-Means** takes data points as input and groups them into `k` clusters. This process of grouping is the training phase of the learning algorithm. The result would be a model that takes a data sample as input and returns the cluster that the new data point belongs to, according the training that the model went through.
<img src="https://miro.medium.com/max/700/1*6EOTS1IE2ULWC9SKgf7mYw.png">
Source - [How Does k-Means Clustering in Machine Learning Work?](https://towardsdatascience.com/how-does-k-means-clustering-in-machine-learning-work-fdaaaf5acfa0)
<img src="https://miro.medium.com/max/700/1*4LOxZL6bFl3rXlr2uCiKlQ.gif">
Source: [How Does k-Means Clustering in Machine Learning Work?](https://towardsdatascience.com/how-does-k-means-clustering-in-machine-learning-work-fdaaaf5acfa0)
Check out the the two articles below to learn more about how the K-Means Algorithm work:
- [Clustering using K-means algorithm](https://towardsdatascience.com/clustering-using-k-means-algorithm-81da00f156f6)
- [How Does k-Means Clustering in Machine Learning Work?](https://towardsdatascience.com/how-does-k-means-clustering-in-machine-learning-work-fdaaaf5acfa0)
### Where can you use k-means?
The **k-means algorithm** can be a good fit for finding patterns or groups in large datasets that have not been explicitly labeled. Here are some example use cases in different domains:
**E-commerce**
- Classifying customers by purchase history or clickstream activity.
**Healthcare**
- Detecting patterns for diseases or success treatment scenarios.
- Grouping similar images for image detection.
**Finance**
- Detecting fraud by detecting anomalies in the dataset. For example, detecting credit card frauds by abnormal purchase patterns.
**Technology**
- Building a network intrusion detection system that aims to identify attacks or malicious activity.
**Meteorology**
- Detecting anomalies in sensor data collection such as storm forecasting.
## Step 1: Importing Data
For this part of the assignment, we need to import the following packages:
- **Amazon SageMaker Python SDK**: Amazon SageMaker Python SDK is an open source library for training and deploying machine-learned models on Amazon SageMaker. See [Documentation](https://sagemaker.readthedocs.io/en/stable/index.html)
- **Python Built-in Library** [datetime](https://docs.python.org/2/library/datetime.html)
- **Numpy** and **Pandas**
```
# TODO: Import the above packages below
import ____ as pd
import ____ as np
import ____
from ____ import datetime
```
> **Exercise:** Construct a url to the the dataset location in your S3 bucket using the following expression and save it to `data_location`.
```
# TODO: Construct the url path to your dataset file that you have just uploaded to your newly created S3 bucket
bucket = "____"
prefix = "ufo_dataset"
data_key = "ufo_complete.csv"
# Construct a url string and save it to data_location variable
data_location = "s3://{}/{}/{}".format(bucket, prefix, data_key)
# print data_location
print(data_location)
# Internally do not process the file in chunks when loading the csv onto a dataframe
# to ensure avoid mixed type inferences when importing the large UFO dataset.
df = pd.read_csv(data_location, low_memory= False)
# Inspect the tail of the dataframe
df.tail()
# Inspect the shape of the dataframe
df.shape
```
## Step 2: Clearning, transforming and preparing the data
```
# TODO: Select the 'latitude' and 'longitude' columns and save it as a new dataframe `df_geo` with .copy().
df_geo = df[["____", "____"]].copy()
# Inspect the tail of df_geo
df_geo.tail()
# Fully inspect the df_geo dataframe
df_geo.info()
```
Upon successfull inspection of the above dataframe, you should note the following with this dataframe:
- There are no `null` or missing values in both columns. However, we still need to check for other incorrect entries that are not **coordinates**. Examples include: `0`, `string`s, etc.
- The `latitude` column has a `dtype` of `object`. This means the column may have missing or string values where the rest of the values are numbers. If the entries in the column are non-homogenous, pandas will store the column as a `string` or `object` data type. To clean the data in this column we can use pandas' `pd.to_numeric()` method to convert the data in this column to `float` for processing. The machine learning algorithm expects the data passed in to it to be numerical digits `float`s or `int`s not `string`s. - See [Documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_numeric.html) on how to use this method.
> **Exercise:** Convert the `latitude` column's datatype to `float`. You can pass in the `errors = "coerce"` option to `pd.to_numeric()` method to enforce the conversion. When conversion is not possible - i.e. values are `string`s - these strings will be replaced with `NaNs`. Therefore, you need to use a `.dropna()` method to drop rows where `NaNs` exist. Then check whether the column formats have been converted to numerical data types `float` and if any missing values are still present. **Note**: You can pass in `inplace = True` argument to `.dropna()` methods so that operations are performed in place and to avoid re-assignments.
```
# TODO: Convert the column values to numeric and whenever this is not possible replace the value with NaNs
df_geo["latitude"] = pd.____(df_geo.____, errors = "____")
# Count the number of null values in the dataframe - Expecting this to be non-zero
print("Number of null values in the dataframe before dropping rows is {}".format(df_geo.isnull().any().sum()))
# TODO: Drop all rows that NaN Values
df_geo.____(inplace=____)
# Count the number of null values in the dataframe - Expecting this to be zero
print("Number of null values in the dataframe before dropping rows is {}". format(df_geo.isnull().any().sum()))
# Count how many rows in the df have 0 values
print(df_geo[(df_geo.longitude == 0) | (df_geo.latitude == 0) ].count())
# TODO: Select all rows that have non-zero coordinate values and re-assign the selection to df_geo
df_geo = df_geo[(df_geo.longitude != ____) &(df_geo.latitude != ____) ]
# Check that the there are no coordinate values in the df_geo dataframe with 0
print(df_geo[(df_geo.longitude == 0) &(df_geo.latitude == 0)])
# Re-checking the dataframe to ensure both columns have numerical datatype such as `float` or `int`.
df_geo.info()
# Check if we have any missing values (NaNs) in our dataframe
missing_values = df_geo.isnull().values.any()
print("Are there any missing values? {}".format(missing_values))
# If there are any missing values in the dataframe, show them
if (missing_values):
df_geo[df_geo.isnull().any(axis = 1)]
# TODO: store the cleaned up dataframe column values as a 2D numpy array (matrix) with datatype of float32
data_train = df_geo.values.astype("____")
# Print the 2D numpy array
data_train
```
## Step 3: Visualising the last 5000 reports of the data on the map
One of the useful packages for visualising the data on a map is called **plotly**.
We can import the following module from plotly package as `px`:
- **plotly**'s [express](https://plot.ly/python/plotly-express/) - Plotly Express is a terse, consistent, high-level wrapper around `plotly.graph_objects` for rapid data exploration and figure generation.
For data available as a tidy pandas DataFrame, we can use the Plotly Express function `px.scatter_geo` for a geographical scatter plot. The `color` argument is used to set the color of the markers from a given column of the DataFrame.
```
import plotly.express as px
# Showing only the last 5000 rows only on a map
fig = px.scatter_geo(df_geo.iloc[-5000: -1, :], lat="latitude", lon = "longitude",
title="UFO Reports by Latitude/Longitude in the world - Last 5000 Reports", color = "longitude")
fig.show()
```
You may notice that most of the recent 5000 UFO reports have been located in the United States. Let's take a closer look at United States by using `plotly`'s `geo` layout feature to show sightings on the US map.
```
from plotly.offline import iplot
data = [dict(
type = 'scattergeo',
locationmode = 'USA-states',
lat = df_geo.iloc[-5000:-1, 0],
lon = df_geo.iloc[-5000:-1, 1],
mode = 'markers',
marker = dict(
size = 5.5,
opacity = 0.75,
color = 'rgb(0, 163, 81)',
line = dict(color = 'rgb(255, 255, 255)', width = 1))
)]
layout = dict(
title = 'UFO Reports by Latitude/Longitude in United States - Last 5000 Reports',
geo = dict(
scope = 'usa',
projection = dict(type = 'albers usa'),
showland = True,
landcolor = 'rgb(250, 250, 250)',
subunitwidth = 1,
subunitcolor = 'rgb(217, 217, 217)',
countrywidth = 1,
countrycolor = 'rgb(217, 217, 217)',
showlakes = True,
lakecolor = 'rgb(255, 255, 255)')
)
figure = dict(data = data, layout = layout)
iplot(figure)
```
## Step 3: Create and train our model
```
# Define number of clusters and output location URL to save the trained model
num_clusters = 10
output_location = "s3://" + bucket + "/model-artifacts"
```
To pass a training command to Amazon Sagemaker, we need to grab the details of the current execution role **ARN ID** whose credentials we are using to call the Sagemaker API.
> **Exercise:** Grab the ARN ID of your current Execution role using the `sagemaker` SDK - See [Documentation](https://sagemaker.readthedocs.io/en/stable/session.html?highlight=get%20execution#sagemaker.session.get_execution_role)
```
# TODO: Get the execution role ARN ID to pass to the sagemaker API later on
role = sagemaker.____()
# Check that you have this step correctly performed
print(role)
```
We now can use Amazon's built-in K-means ML algorithm to find `k` clusters of data in our unlabeled UFO dataset.
Amazon SageMaker uses a modified version of the web-scale k-means clustering algorithm. Compared with the original version of the algorithm, the version used by Amazon SageMaker is more accurate. Like the original algorithm, it scales to massive datasets and delivers improvements in training time. To do this, the version used by Amazon SageMaker streams mini-batches (small, random subsets) of the training data. The k-means algorithm expects tabular data, where rows represent the observations that you want to cluster, and the columns represent attributes of the observations. See [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/k-means.html)
To ask AWS sagemaker for training a model using this algorithm we need to define a **K-means Estimator**. KMeans estimators can be configured by setting **hyperparameters**. These hyperparameters are arguments passed into the estimator's Constructor Function.
This estimator requires the following hyperparameters to be passed in `sagemaker.KMeans()`:
- `role` (str) – An AWS IAM role (either name or full ARN)
- `train_instance_count` (int) – Number of Amazon EC2 instances to use for training. We only need 1 for this exercise.
- `train_instance_type` (str) – Type of EC2 instance to use for training, for example, ‘ml.c4.xlarge’. This is the **compute resources** that you want Amazon SageMaker to use for model training. Compute resources are ML compute instances that are managed by Amazon SageMaker.
- `k` (int) – The number of clusters to produce. We need to 10 for this exercise.
- `output_path` (str) - The URL of the S3 bucket where you want to store the output of the job.
```
# TODO: Define the training API request to AWS Sagemaker
kmeans = sagemaker.____(role = ____,
train_instance_count = ____,
train_instance_type = "____",
output_path = ____,
k = ____)
```
The following diagram shows how you train and deploy a model with Amazon SageMakern - See [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html) For this assignment, Amazon SageMaker provides training algorithms that are great out-of-the-box solution for quick model training. We have used some helper code above to clean and prepare the dataset and configure AWS Sagemaker API calls but do not need to specify training code or even a training code image from EC2 Container Registry. We only need to pass in the dataset for training with AWS's KMeans default algorithm. If we wanted to specify our own algorithms or use one of the popular deep learning frameworks - tensorflow/etc. - then we provide additional training code.
<img src="https://docs.aws.amazon.com/sagemaker/latest/dg/images/sagemaker-architecture-training-2.png">
To train a model in Amazon SageMaker, you create a **training job** using the `kmeans.fit()` method. - See [Documentation](https://sagemaker.readthedocs.io/en/stable/kmeans.html?highlight=kmeans.fit#sagemaker.KMeans.fit)
The training job requires the following information passed in to `.fit()` method:
- `record_set(data_train)` - The training records to train the KMeans Estimator on. Here `data_train` must be passed in to the `kmeans.record_set()` method to convert our 2D numpy array data to a `RecordSet` object that is required by the algorithm. - See [Documentation](https://sagemaker.readthedocs.io/en/stable/sagemaker.amazon.amazon_estimator.html?highlight=record_set()#sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase.record_set)
- `job_name` (str) - Training job name. If not specified, the estimator generates a default job name, based on the training image name and current timestamp.
Amazon SageMaker then launches the ML compute instances and uses the training dataset to train the model. It saves the resulting model artifacts and other output in the S3 bucket you specified for that purpose.
Here we are going to construct a job name using the following expression and Python's built-in `datetime` module. This ensures our `job_name` is unique. Each training job requires a **unique** `job_name`. Otherwise, AWS will throw an error.
```
# Construct a unique job_name using datetime module
job_name = "kmeans-geo-job-{}".format(datetime.now().strftime("%Y%m%d%H%M%S"))
# Print job_name
print("Here is the job name: {}".format(job_name))
```
> **Exercise**: Create a training job using `kmeans.fit()`. Use the AWS documentation links above to figure out how to pass in the arguments to `kmeans.fit()` for the training job to commence.
If you do this step right, you should see outputs like this appear underneath the code cell:
```
2019-07-29 00:54:46 Starting - Starting the training job...
2019-07-29 00:54:47 Starting - Launching requested ML instances...
2019-07-29 00:55:44 Starting - Preparing the instances for training......
2019-07-29 00:56:24 Downloading - Downloading input data...
2019-07-29 00:57:05 Training - Downloading the training image..
.
.
.
2019-07-29 00:57:31 Uploading - Uploading generated training model
2019-07-29 00:57:31 Completed - Training job completed
Billable seconds: 68
CPU times: user 1.78 s, sys: 18.7 ms, total: 1.8 s
Wall time: 3min 13s
```
```
%%time
# TOOD: Create a training job and time it. Running this code cell will send a training job request to AWS Sagemaker
kmeans.fit(kmeans.record_set(_____), job_name= _____)
```
**Congratulations** on building and training a model on the cloud using unsupervised machine learning algorithm and saving it! Next we are going to deserialise the model so that we can use its output.
## Step 4: Model Deserialisation
To deserialise the compressed model output saved on our S3 bucket we need to import the following packages.
- **Boto** is the Amazon Web Services (AWS) SDK for Python. It enables Python developers to create, configure, and manage AWS services, such as EC2 and S3. Boto provides an easy to use, object-oriented API, as well as low-level access to AWS service. See [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html)
> **Exercise**: Import `boto3` package, then use the AWS Python SDK boto3 to download the compressed model from the S3 bucket to a file. You will need to construct a url to the model and save it to `path_to_model` variable. Then pass `path_to_model` to the following command `boto3.resource("s3").Bucket(bucket).download_file(path_to_model, file_name_to_save_to)`. - See [boto3 Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html?highlight=s3.object#S3.Client.download_file)
```
# TODO: Import boto3
import ____
# Construct a url to the model. Compressed model is saved under the model-artifacts folder
path_to_model = "model-artifacts/" + job_name + "/output/model.tar.gz"
# TODO: Use the AWS Python SDK boto3 to download the compressed model output from S3 bucket onto `model.tar.gz` file.
boto3.____("____").____(____).download_file(____, "model.tar.gz")
```
To deserialise the compressed model output saved on our S3 bucket we need to import the following packages.
- **Python's Built-in module** `os` - See [Documentation](https://docs.python.org/2/library/os.html#os.system)
Python's built-in system module `os.system()` can be used to execute a shell command `tar -zxvf` on the `model.tar.gz` compressed gzipped file. This command shell helps to extract tar files out a `tar.gz` archives. The `-zxvf` flags can passed in to `os.system()` to perform the following commands:
- `-z` - The file is a “gzipped” file
- `-x` - Extract files
- `-v` - Verbose, print the file names as they are extracted one by one
- `-f` - Use the following tar archive for the operation
See [Linux's tar Man Pages](https://linux.die.net/man/1/tar) for more details on the `tar` shell command.
> **Exercise:** Use `os.system()` method to run the `tar` command on the compressed gzip file `model.tar.gz` with the above flags.
```
# TODO: Import the required packages for deserialisation
import os
# TODO: Use Python's built-in os package to open the compressed model output
os.system("____ -____ model.tar.gz")
```
`os.system()` later can be used to execute the `unzip` shell command on `model_algo-1`. `unzip` shell command lists, tests, or extracts files from a ZIP archive. See [Linux unzip Man Pages](https://linux.die.net/man/1/unzip) for more details on the `unzip` command.
> **Exercise:** Use `os.system()` method to unzip `model_algo-1`.
```
# TODO: Use Python's built-in os package to unzip model_algo-1 file.
os.system("____ model_algo-1")
```
To load the unzipped model output parameters, we need to install `mxnet` package.
> **Exercise**: Use `!pip install` to install `mxnet`.
```
# TODO: Install mxnet package
!pip install ____
```
To load the model output parameters we need to import the following package:
- **MXNet**: A flexible and efficient library for deep learning. - See [Documentation](https://mxnet.apache.org/versions/master/api/python/index.html)
> **Exercise**: Use `mxnet`'s `.ndarray.load()` method to load the model output parameters and assign it to `Kmeans_model_params` variable - See [Documentation](https://mxnet.incubator.apache.org/api/python/ndarray/ndarray.html)
```
# TODO: Import mxnet
import ____ as mx
# TODO: Use mxnet to load the model parameters
Kmeans_model_params = mx.____.____("model_algo-1")
```
> **Exercise**: Convert the model parameters to a dataframe called `cluster_centroids_kmeans` using `pd.DataFrame()`. You can grab the model output parameters using `Kmeans_model_params[0].asnumpy()` to pass to `pd.DataFrame()`.
```
# TODO: Convert the Kmeans_model_params to a dataframe using pandas and numpy: cluster_centroids_kmeans
cluster_centroids_kmeans = pd.____(____[0].____())
# TODO: Set the column names of the cluster_centroids_kmeans dataframe to match the df_geo column names
cluster_centroids_kmeans.____ = df_geo.____
# Print cluster_centroids_kmeans
print(cluster_centroids_kmeans)
```
To write the content of the model output using An in-memory stream for text I/O we need to import the following package:
- **Python's Built-in Package** `io` - See [Documentation](https://docs.python.org/3/library/io.html#io.StringIO)
```
# TODO: Import Python's built-on package io
import ____
# When a csv_buffer object is created, it is initialized using StringIO() constructor
# Here no string is given to the StringIO() so the csv_buffer object is empty.
csv_buffer = io.StringIO()
# TODO: Use pandas .to_csv() method to weite the cluster_centroids_kmeans dataframe to a csv file
cluster_centroids_kmeans.____(csv_buffer, index = False)
# TODO: Let's use Amazon S3
s3_resource = boto3.resource("____")
# Use the .Object() method to upload an object in the given `bucket`
# Save the content of the csv_buffer file using the .put() method
s3_resource.Object(bucket, "results/ten_locations_kmeans.csv").put(Body = csv_buffer.getvalue())
```
Let's quickly visualise where these top 10 coordinates are! We will use **AWS Quicksights** later on to for reporting these locations.
```
# TODO: Visualise the top 10 locations in the world most likely to have UFO Sightings
fig = px.scatter_geo(cluster_centroids_kmeans, lat="____", lon = "____",
title="Top 10 Locations in the world mostly likely to have UFO Sightings", color = "longitude")
fig.show()
# TODO: Visualise the top locations in the US most likely to have UFO Sightings
data = [dict(
type = '____',
locationmode = 'USA-states',
lat = ____.iloc[:, 0],
lon = ____.iloc[:, 1],
mode = 'markers',
marker = dict(
size = 5.5,
opacity = 0.75,
color = 'rgb(0, 163, 81)',
line = dict(color = 'rgb(255, 255, 255)', width = 1))
)]
layout = dict(
title = 'Top locations in the United States most likely to have UFO Sightings',
geo = dict(
scope = '____',
projection = dict(type = 'albers usa'),
showland = True,
landcolor = 'rgb(250, 250, 250)',
subunitwidth = 1,
subunitcolor = 'rgb(217, 217, 217)',
countrywidth = 1,
countrycolor = 'rgb(217, 217, 217)',
showlakes = True,
lakecolor = 'rgb(255, 255, 255)')
)
figure = dict(data = ____, layout = ____)
iplot(____)
```
Interesting findings! Now answer the following questions:
- Which cities are the closest to these top 10 locations?
- Which states in the United States are these top coordinates located in?
- What landmarks - airports, research centres, etc. - do these coordinates correlate with?
```
# TODO: Your answers here
cities = ["___", "___", "___", "___", "___", "___", "___", "___", "___", "___"]
us_states = ["___", "___", "___", "___", "___", "___"]
landmarks = ["___", "___", "___", "___", "___", "___", "___", "___", "___", "___"]
```
## CONGRATULATIONS!!!
Well done on completing this difficult part of the assignment. All is now left for you to do is to visualise the model outputs you have saved in the `ten_locations_kmeans.csv` file in your S3 bucket on a map. Simply create an **AWS Quicksight** account and use the `my-manifest.json` file under the `quicksight` folder of [BML github repo](https://github.com/beginners-machine-learning-london/intro_to_unsupervised_ml_with_AWS_Sagemaker/tree/master/exercises/quicksight) to configure AWS Quicksight.
Again, Well done on completing the above assignments! This was a hard exercise. You have learned how to use AWS Sagemaker to train an unsupervised machine learning model in the cloud. We hope that you enjoyed this **Introduction to unsupervised machine learning with AWS** Workshop. To learn more about AWS Sagemaker and machine learning in the cloud check out a few resources we have provided in our repo's [README.md](https://github.com/beginners-machine-learning-london/intro_to_unsupervised_ml_with_AWS_Sagemaker).
Also make sure to join our meetup group to be informed of future workshops! [London Beginners Machine Learning Meetup](https://www.meetup.com/beginners-machine-learning-london/).
And join our [slack channel](https://join.slack.com/t/beginnersmach-wlf5812/shared_invite/enQtNzAzODA4OTY3MTcyLWU2ZDMzNGU2YTQ4ZDk5ZjY3OTk1YWU2OGU5NWRmMjM1NzkwM2MwYjk5MDNhZWE1YWVmNzY1MjgzZDk4OGE1OGE) to ask questions, discuss ML with other BML community members and suggest the topics of future workshops.
| github_jupyter |
<a href="https://colab.research.google.com/github/iamsoroush/DeepEEG/blob/master/arch_attv2_tests_4s.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title # Clone the repository and upgrade Keras {display-mode: "form"}
!git clone https://github.com/iamsoroush/DeepEEG.git
!pip install --upgrade keras
!rm -r DeepEEG
#@title # Imports {display-mode: "form"}
import os
import pickle
import sys
sys.path.append('DeepEEG')
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.getcwd()))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import numpy as np
from deepeeg.helpers import CrossValidator
from deepeeg.models import DilatedDeepEEG
from deepeeg.dataset import DataLoader, Splitter, FixedLenGenerator
from google.colab import drive
drive.mount('/content/gdrive')
#@title # Set data path {display-mode: "form"}
#@markdown ---
#@markdown Type in the folder in your google drive that contains numpy _data_ folder:
parent_dir = 'soroush_deep_eeg'#@param {type:"string"}
gdrive_path = os.path.abspath(os.path.join('gdrive/My Drive', parent_dir))
data_dir = os.path.join(gdrive_path, 'data')
cv_results_dir = os.path.join(gdrive_path, 'cross_validation')
if not os.path.exists(cv_results_dir):
os.mkdir(cv_results_dir)
print('Data directory: ', data_dir)
print('Cross validation results dir: ', cv_results_dir)
#@title # Set Parameters
batch_size = 80
epochs = 50
k = 10
t = 10
instance_duration = 4
instance_overlap = 1
sampling_rate = 256
n_channels = 19
# task = 'rnr'
data_mode = 'cross_subject'
```
# RnR
```
#@title ## DilatedDeepEEG-AttV2
task = 'rnr'
model_name = 'DilatedDeepEEG-AttV2'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DilatedDeepEEG(input_shape,
model_name=model_name,
attention='v2')
model = model_obj.create_model()
model.summary()
scores = validator.do_cv(model_obj,
data,
labels)
```
# H-MDD
```
#@title ## DilatedDeepEEG-AttV2
task = 'hmdd'
model_name = 'DilatedDeepEEG-AttV2'
train_generator = FixedLenGenerator(batch_size=batch_size,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=True)
test_generator = FixedLenGenerator(batch_size=8,
duration=instance_duration,
overlap=instance_overlap,
sampling_rate=sampling_rate,
is_train=False)
params = {'task': task,
'data_mode': data_mode,
'main_res_dir': cv_results_dir,
'model_name': model_name,
'epochs': epochs,
'train_generator': train_generator,
'test_generator': test_generator,
't': t,
'k': k,
'channel_drop': True}
validator = CrossValidator(**params)
dataloader = DataLoader(data_dir,
task,
data_mode,
sampling_rate,
instance_duration,
instance_overlap)
data, labels = dataloader.load_data()
input_shape = (sampling_rate * instance_duration,
n_channels)
model_obj = DilatedDeepEEG(input_shape,
model_name=model_name,
attention='v2')
model = model_obj.create_model()
model.summary()
scores = validator.do_cv(model_obj,
data,
labels)
```
| github_jupyter |
# First model with scikit-learn

## Objective
In this module, we present how to build predictive models on tabular datasets, with only numerical features.
In particular we will highlight:
* the scikit-learn API: `.fit(X, y)`/`.predict(X)`/`.score(X, y)`;
* how to evaluate the generalization performance of a model with a train-test
split.
## Data
We will use the same dataset "adult_census" described in the previous
module. For more details about the dataset see <http://www.openml.org/d/1590>.
```
import pandas as pd
adult_census = pd.read_csv("../data/adult-census.csv")
```
## Separating features from target
Scikit-learn prefers our features ($X$) apart from our target ($y$)
Numerical data is the most natural type of data used in machine learning and can (almost) directly be fed into predictive models. Consequently, for this module we will use a subset of the original data with only the numerical columns.
```
import numpy as np
# create column names of interest
target_col = "class"
feature_col = adult_census.drop(columns=target_col).select_dtypes(np.number).columns.values
target = adult_census[target_col]
target
features = adult_census[feature_col]
features
print(
f"The dataset contains {features.shape[0]} samples and "
f"{features.shape[1]} features"
)
```
## Fit a model
We will build a classification model using the "K-nearest neighbors"
strategy. To predict the target of a new sample, a k-nearest neighbors takes
into account its `k` closest samples in the training set and predicts the
majority target of these samples.
<div class="admonition note alert alert-info">
<p class="first admonition-title" style="font-weight: bold;"><b>Note</b></p>
<p class="last">We use a K-nearest neighbors here. However, be aware that it is seldom useful
in practice. We use it because it is an intuitive algorithm. In future modules, we will introduce alternative algorithms.</p>
</div>
The `fit` method is called to train the model from the input (features) and
target data.
```
# to display nice model diagram
from sklearn import set_config
set_config(display='diagram')
from sklearn.neighbors import KNeighborsClassifier
# 1. define the algorithm
model = KNeighborsClassifier()
# 2. fit the model
model.fit(features, target)
```
Learning can be represented as follows:

The method `fit` is based on two important elements: (i) **learning algorithm**
and (ii) **model state**. The model state can be used later to either predict (for classifiers and regressors) or transform data (for transformers).
<div class="admonition note alert alert-info">
<p class="first admonition-title" style="font-weight: bold;"><b>Note</b></p>
<p class="last">Here and later, we use the name <tt class="docutils literal">data</tt> and <tt class="docutils literal">target</tt> to be explicit. In
scikit-learn documentation, <tt class="docutils literal">data</tt> is commonly named <tt class="docutils literal">X</tt> and <tt class="docutils literal">target</tt> is
commonly called <tt class="docutils literal">y</tt>.</p>
</div>
## Make predictions
Let's use our model to make some predictions using the same dataset. To predict, a model uses a **prediction function** that will use the input data together with the model states.

```
target_predicted = model.predict(features)
target_predicted
```
...and we could even check if the predictions agree with the real targets:
```
# accuracy of first 5 predictions
target[:5] == target_predicted[:5]
```
<div class="admonition note alert alert-info">
<p class="first admonition-title" style="font-weight: bold;"><b>Note</b></p>
<p class="last">Here, we see that our model makes a mistake when predicting for the third observation.</p>
</div>
To get a better assessment, we can compute the average success rate.
```
(target == target_predicted).mean()
```
<div class="admonition tip alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;"><b>Warning!</b></p>
<p class="last">But, can this evaluation be trusted, or is it too good to be true?</p>
</div>
## Train-test data split
When building a machine learning model, it is important to evaluate the
trained model on data that was not used to fit it, as **generalization** is
our primary concern -- meaning we want a rule that generalizes to new data.
Correct evaluation is easily done by leaving out a subset of the data when
training the model and using it afterwards for model evaluation.
The data used to fit a model is called <b><em>training data</em></b> while the data used to
assess a model is called <b><em>testing data</em></b>.
Scikit-learn provides the helper function `sklearn.model_selection.train_test_split` which is used to automatically split the dataset into two subsets.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
features,
target,
random_state=123,
test_size=0.25,
stratify=target
)
```
<div class="admonition tip alert alert-info">
<p class="first admonition-title" style="font-weight: bold;"><b>Tip</b></p>
<p class="last">In scikit-learn setting the <tt class="docutils literal">random_state</tt> parameter allows to get
deterministic results when we use a random number generator. In the
<tt class="docutils literal">train_test_split</tt> case the randomness comes from shuffling the data, which
decides how the dataset is split into a train and a test set).
And as your target becomes more imbalanced it is important to use the <tt class="docutils literal">stratify</tt> parameter.
</p>
</div>
<div class="admonition tip alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;"><b>Your Turn</b></p>
<p class="last">
1. How many observations are in your train and test data sets?
2. What is the proportion of response values in your <tt class="docutils literal">y_train</tt> and <tt class="docutils literal">y_test</tt>?
</p>
</div>
Instead of computing the prediction and manually computing the average
success rate, we can use the method `score`. When dealing with classifiers
this method returns their performance metric.

```
# 1. define the algorithm
model = KNeighborsClassifier()
# 2. fit the model
model.fit(X_train, y_train)
# 3. score our model on test data
accuracy = model.score(X_test, y_test)
print(f'The test accuracy using {model.__class__.__name__} is {round(accuracy, 4) * 100}%')
```
<div class="admonition tip alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;"><b>Important!</b></p>
<p class="last">
If we compare with the accuracy obtained by wrongly evaluating the model
on the training set, we find that this evaluation was indeed optimistic
compared to the score obtained on a held-out test set.
This illustrates the importance of always testing the generalization performance of
predictive models on a different set than the one used to train these models.
</p>
</div>
## Wrapping up
In this module we learned how to:
* fit a predictive machine learning algorithm (**k-nearest neighbors**) on a training dataset;
* evaluate its generalization performance on the testing data;
* introduced the scikit-learn API `.fit(X, y)` (to train a model),
`.predict(X)` (to make predictions) and `.score(X, y)`
(to evaluate a model).
| github_jupyter |
<img src="images/usm.jpg" width="480" height="240" align="left"/>
# MAT281 - Laboratorio N°03
## Objetivos del laboratorio
* Reforzar conceptos básicos de análisis no supervisado.
## Contenidos
* [Problema 01](#p1)
<a id='p1'></a>
## I.- Problema 01
<img src="https://freedesignfile.com/upload/2013/06/Car-logos-1.jpg" width="360" height="360" align="center"/>
El conjunto de datos se denomina `vehiculos_procesado_con_grupos.csv`, el cual contine algunas de las características más importante de un vehículo.
En este ejercicio se tiene como objetivo, es poder clasificar los distintos vehículos basados en las cracterísticas que se presentan a continuación. La dificultad de este ejercicio radíca en que ahora tenemos variables numéricas y variables categóricas.
Lo primero será cargar el conjunto de datos:
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.dummy import DummyClassifier
from sklearn.cluster import KMeans
%matplotlib inline
sns.set_palette("deep", desat=.6)
sns.set(rc={'figure.figsize':(11.7,8.27)})
# cargar datos
df = pd.read_csv(os.path.join("data","vehiculos_procesado_con_grupos.csv"), sep=",")\
.drop(
["fabricante",
"modelo",
"transmision",
"traccion",
"clase",
"combustible",
"consumo"],
axis=1)
df.head()
```
En este caso, no solo se tienen datos numéricos, sino que también categóricos. Además, tenemos problemas de datos **vacíos (Nan)**. Así que para resolver este problema, seguiremos varios pasos:
## 1.- Normalizar datos
1. Cree un conjunto de datos con las variables numéricas, además, para cada dato vacía, rellene con el promedio asociado a esa columna. Finalmente, normalize los datos mediante el procesamiento **MinMaxScaler** de **sklearn**.
2.- Cree un conjunto de datos con las variables categóricas , además, transforme de variables numéricas a categóricas ocupando el comando **get_dummies** de pandas ([refrenecia](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html)). Explique a grande rasgo como se realiza la codificación de variables numéricas a categóricas.
3.- Junte ambos dataset en uno, llamado **df_procesado**.
```
from sklearn.impute import SimpleImputer
df_num= df.drop(['clase_tipo', 'traccion_tipo', 'transmision_tipo', 'combustible_tipo', 'tamano_motor_tipo',
'consumo_tipo', 'co2_tipo', 'year'],axis=1)
si = SimpleImputer(strategy='mean')
si.fit(df_num)
df_num= pd.DataFrame(si.fit_transform(df_num))
df_num = df_num.rename(columns={ 0: 'desplazamiento', 1: 'cilindros', 2: 'co2' , 3: 'consumo_litros_milla'})
scaler = MinMaxScaler()
columns = df_num.columns
df_num[columns] = scaler.fit_transform(df_num[columns])
df_num.head()
df_cat= df.drop(['desplazamiento', 'cilindros', 'co2', 'consumo_litros_milla'],axis=1)
df_cat=pd.get_dummies(df_cat)
df_procesado= pd.merge(df_num.reset_index(), df_cat.reset_index(), on='index', how='left')
df_procesado=df_procesado.drop(['index'],axis=1)
df_procesado
```
* get_dummies categoriza las varibles de tal forma que las transforma en columnas para cada categoría, marcando con 1 en esa columna si la variable corresponde a esa categoría y 0 en caso contrario
## 2.- Realizar ajuste mediante kmeans
Una vez depurado el conjunto de datos, es momento de aplicar el algoritmo de **kmeans**.
1. Ajuste el modelo de **kmeans** sobre el conjunto de datos, con un total de 8 clusters.
2. Calcular los cluster y el valor de los centroides.
3. Realizar que resumas las principales cualidades de cada cluster. Para cada cluster calcule:
a. Valor promedio de las variables numérica.\
b. Moda para las variables numericas
```
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=8)
kmeans.fit(df_procesado)
centroids = kmeans.cluster_centers_ # centros
clusters = kmeans.labels_ # clusters
centroids_df = pd.DataFrame(centroids, columns=list(df_procesado.columns))
# etiquetar los datos con los clusters encontrados
centroids_df = pd.DataFrame(centroids, columns=list(df_procesado.columns))
df_procesado["cluster"] = clusters
df_procesado["cluster"] = df_procesado["cluster"].astype('category')
centroids_df["cluster"] = [1,2,3,4,5,6,7,8]
for i in range(0,8):
print("Media de variables numéricas del cluster", i+1)
print(df_procesado[df_procesado['cluster']==i].drop(list(df_procesado.columns[4:]), axis=1).mean())
print("\n :::::::::::::::::::::::::: \n")
for i in range(0,8):
print("Moda de variables numéricas del cluster", i+1)
print(df_procesado[df_procesado['cluster']==i].drop(list(df_procesado.columns[4:]), axis=1).mode())
print("\n :::::::::::::::::::::::::: \n")
```
## 3.- Elegir Número de cluster
Estime mediante la **regla del codo**, el número de cluster apropiados para el caso.
Para efectos prácticos, eliga la siguiente secuencia como número de clusters a comparar:
$$[5, 10, 20, 30, 50, 75, 100, 200, 300]$$
Una ve realizado el gráfico, saque sus propias conclusiones del caso.
```
# implementación de la regla del codo
Nc = [5,10,20,30,50,75,100,200,300]
kmeans = [KMeans(n_clusters=i) for i in Nc]
score = [kmeans[i].fit(df_procesado).inertia_ for i in range(len(kmeans))]
df_Elbow = pd.DataFrame({'Number of Clusters':Nc,
'Score':score})
df_Elbow
# graficar los datos etiquetados con k-means
fig, ax = plt.subplots(figsize=(11, 8.5))
plt.title('Elbow Curve')
sns.lineplot(x="Number of Clusters",
y="Score",
data=df_Elbow)
sns.scatterplot(x="Number of Clusters",
y="Score",
data=df_Elbow)
```
*Es conveniente utilizar entre 10 y 20 clusters*
| github_jupyter |
# What is quantum?
## What is a bit?
In the previous chapter we learnt that ‘bits’ are the world’s simplest alphabet, and that they can be used to represent any information. We also learnt that all modern computers store, and do operations on bits. So far, we’ve only thought about bits as abstract units of information, but to make a working computer, we need to make our bits out of real things. So how can we make a bit in the real world?
<!-- ::: q-block.tabs -->
## How can we store bits?
<!-- ::: tab -->
### Punched cards
<!-- ::: column(width=350) -->
**Punched cards**
In the early days of computing, computer scientists stored bits by making holes in paper cards. These cards were divided into grids, and cell in the grid represents a bit. If there is a hole in that cell, the bit is '1', if no hole, the bit is '0'. If you want to change the value of a bit, you can either punch a new hole, or [patch](gloss:patch) the hole back up.
<!-- ::: column(width=250) -->

<!-- ::: -->
<!-- ::: tab -->
### Compact disks
<!-- ::: column(width=350) -->
**Compact disks**
Compact disks were created in the '80s to store audio recordings using bits. The designers decided to use pits and troughs on the surface of the disk to represent their bits, and we can sweep a laser over the disk to detect them. The disk is read along a spiral line, divided into segments. For each segment, a flat surface represents a '0', and a transition from peak to trough (or vice-versa) represents a '1'.
<!-- ::: column(width=250) -->

<!-- ::: -->
<!-- ::: tab -->
### Electron orbitals
<!-- ::: column(width=350) -->
**Electron orbitals**
You may remember from chemistry (don't worry if not) that particles called electrons orbit the center (nucleus) of an atom. You may also remember that these electrons can only orbit at specific ([quantized](gloss:quantized)) distances from the nucleus which we call [_shells_](gloss:shell). If we have a way of detecting which shell an electron is in, we can pick two of these shells and use the location of the electron as a bit. If the electron is in one of these shells, the bit is '0', and if it is in the other, the bit is '1'.
<!-- ::: column(width=250) -->

<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
The behaviour of both cards and compact disks can be easily explained using _classical physics_: Physics is essentially a description of the world around us; we do experiments to see how the world behaves, and from this we try to work out what rules the universe follows. “Classical physics” is the name for the set of rules that scientists came up with before the early 1900s, and it’s really good at predicting the behaviour of things like cricket balls and car engines.
But the electron orbiting an atom is a bit different. Around the early 1900s, scientists started being able to study things on the atomic scale. They found that really small things like atoms behave differently to the things we interact with in our day-to-day lives, and in certain cases the rules of classical physics weren’t quite right. The physics they had needed modifying, so the scientists came up with a more accurate set of rules which became known as “quantum physics”.

<!-- ::: q-block.reminder -->
### Key experiments in quantum physics
For those interested, here are quick summaries of some events that led to the development of quantum physics. You don't need to understand these to read the rest of this course, this is just here for those that want to read more about the history and physics.
<details>
<summary>The ultraviolet catastrophe</summary>
<p>Hot things emit radiation as light, and the higher the temperature, the higher the frequencies of these light waves. If something only emits and radiates light (i.e., does not reflect it), we call it a ‘black body’. The theoretically perfect black body is not a terrible approximation of many real objects, including stars and planets.</p>
<p>Between 1900-1905, two scientists (Rayleigh and Jeans) used the rules of classical physics to derive an equation that describes how the <i>temperature</i> of a black body affects the <i>frequency</i> of its emitted light waves. The law they derived predicted that hot objects should emit a ridiculous amount of high-frequency (ultraviolet) light, which doesn’t match what we see at all.</p>
<p>Until this point, it was widely accepted that light was a wave. But another scientist (Max Planck) managed to show that, if light had to be emitted in quantized chunks (i.e., particles we now call ‘photons’), you could derive an equation that correctly described the relationship between temperature and emission that we can measure.</p>
</details>
<details>
<summary>The double slit experiment</summary>
<p>If you fire some particles through a hole onto a screen and recorded where they landed, you’d find more in the middle of the screen and less at the edges (the shape is actually a bit more complicated than this but we won't discuss that here). So, what would you expect to happen if you instead fired particles through _two_ holes onto a screen? If the holes were close enough, common sense says we’d see roughly the same thing, with maybe more particles making it through.</p>
<p>But when we try this experiment with small particles such as photons, electrons and even atoms, we see an interference pattern, even if we just send one particle through at a time! This experiment was done all the way back in the late 1700s with light, which lead scientists to believe light was not a particle but instead a wave. But as we saw with the ultraviolet catastrophe, this still wasn’t the entire picture.</p>

<p>Image by Jordgette - Own work, CC BY-SA 3.0, <a href="https://commons.wikimedia.org/w/index.php?curid=9529698">Link</a></p>
</details>
<!-- ::: -->
For computer scientists, a natural question is: “What if our bits followed the rules of quantum physics instead of classical physics?”. We call these bits “qubits” for “quantum bit”, and computers that can operate on these bits are called “quantum computers”.
In the next section you can interact with one of these qubits (well, not really – we’re just simulating it - but we promise the behaviour is the same) and discover what 'quantum' is!
## Exploring qubits
To explore quantum behaviour, we need to remind ourselves about probabilities. If you’ve read the previous chapter, all the operations we’ve seen so far have been [deterministic](gloss:deterministic). This means that, acting on the same input state, they will always give the same output state. Let’s take a look at a new operation that only acts on quantum bits: the [Hadamard](gloss:hadamard) gate, which we will call the “H-gate” for short.
<!-- ::: q-block -->
### Exploring the H-gate
q-mini-composer(goal="what-is-minicomposer")
.slide
.circuit
.autoMeasureGate
.availableGates H
.initialCircuit
.qubit
.goalCircuit
.qubit H
.startProbabilities 0: 1, 1: 0
.endProbabilities 0: 0.5, 1: 0.5
.instructions Drag the H-gate down onto the circuit
.lesson The H-gate seems to give a 50-50 chance of transforming the qubit from |0⟩ to either |0⟩ or |1⟩. We can plot this on a probability tree: <img src="images/what-is/minicomposer/prob-0.svg">
.info Nice one! Let's keep experimenting.
.slide
.circuit
.autoMeasureGate
.availableGates X
.initialCircuit
.qubit
.goalCircuit
.qubit X
.startProbabilities 0: 1, 1: 0
.endProbabilities 0: 0, 1: 1
.instructions Transform our qubit to the state |1⟩.
.lesson Great! After the X-gate, our qubit has 100% chance of being in the state |1⟩. Let's see how the H-gate acts on this input.
.info Click 'next' to add the H-gate.
.slide
.circuit
.autoMeasureGate
.availableGates H
.initialCircuit
.qubit X
.goalCircuit
.qubit X H
.startProbabilities 0: 0, 1: 1
.endProbabilities 0: 0.5, 1: 0.5
.instructions Add the H-gate to the end of the circuit.
.lesson The H-gate also seems to give a 50-50 chance of transforming the qubit from |1⟩ to either |0⟩ or |1⟩. We can plot this on a probability tree too: <img src="images/what-is/minicomposer/prob-1.svg">
.info Awesome!
.slide
.circuit
.autoMeasureGate
.availableGates
.initialCircuit
.qubit X H
.goalCircuit
.qubit H
.startProbabilities 0: 0.5, 1: 0.5
.endProbabilities 0: 0.5, 1: 0.5
.instructions Remove the X-gate from your circuit
.lesson Now we know how the H-gate behaves on both possible input states, we can use this to predict its behaviour. <img src="images/what-is/minicomposer/prob-chained.svg"> Look at the probability tree, what do you think would happen if we applied two H-gates in a row?
.info Think about the answer before clicking 'next'.
.slide
.circuit
.autoMeasureGate
.availableGates H
.initialCircuit
.qubit H
.goalCircuit
.qubit H H
.startProbabilities 0: 0.5, 1: 0.5
.endProbabilities 0: 1, 1: 0
.instructions Let's test our theory. What would happen if we applied <i>two</i> H-gates in sequence?
.lesson What‽ This is strange. Let's try with the input |1⟩.
.info Nice one! Let's keep experimenting.
.slide
.circuit
.autoMeasureGate
.availableGates H
.initialCircuit
.qubit X H
.goalCircuit
.qubit X H H
.startProbabilities 0: 0.5, 1: 0.5
.endProbabilities 0: 0, 1: 1
.instructions What happens if we apply two H-gates after an X-gate?
.lesson This doesn't fit our model at all! In fact, there are no numbers we can put on our probability trees that will describe this behaviour. <img src="images/what-is/minicomposer/prob-chained-wrong.svg"> To explain this, we'll need to replace our probabilities with something else.
.info Congratulations! You've completed this exercise.
<!-- ::: -->
We’ve just seen some interesting behaviour: Two probabilistic operations, applied in sequence, seem to “undo” each other! In fact, this simple behaviour is so unusual we can’t describe it using probability trees. There just aren’t any numbers we can put on our branches that give the correct result in all cases.
You might be thinking, “maybe the H-gate knows it’s being applied twice, and behaves differently?”. This would count as another input into the gate, alongside the qubit’s state. You could describe this behaviour if you allow another hidden input to the H-gate operation that we can’t see directly. And in fact, this is sort of what’s happening.
## Beyond probabilities
Probabilities are useful for when there are many possible outcomes and we don’t have enough information to work out which will happen, like a dice roll, or a coin toss. We give each outcome a probability and use these to work out the likelihood of something occurring. Probabilities work extremely well for things we usually see in the world around us, but with "quantum" things (like qubits), this approach fails, and we need to update it to explain the behaviour we’re seeing.
So, what’s the solution? To describe quantum mechanics, we can use probability _amplitudes._ Probability amplitudes are similar to normal probabilities in that:
- amplitudes have a magnitude,
- each possible outcome has a probability amplitude,
- and the magnitude of that outcome's amplitude tells us how likely that outcome is to occur.
But amplitudes also have an extra property which we call "[phase](gloss:phase)". You can think of phase as an angle, so whereas conventional probabilities only have a magnitude, amplitudes also have a direction.

More specifically, an amplitude is a complex number (if you don’t know what a complex number is, don’t worry about that just yet).
The result of phase is that, when we add two of these amplitudes together, they can cancel each other out, just like positive and negative numbers do. This behaviour is called _interference_ and explains all the behaviour specific to quantum mechanics that we don’t see in classical mechanics.
<!-- ::: q-block.binary -->
### Amplitude Addition Widget
We can see two amplitudes added together below. Try changing the size and direction of the amplitudes. What affect does this have on the size of the amplitude on the right?
q-amplitude-addition-widget
<!-- ::: -->
To find the probability of measuring an outcome, we square the magnitude of that outcome’s amplitude. This is a mathematical ‘trick’ that makes everything add up nicely at the end.

In fact, the introduction of phases and amplitudes is also a mathematical trick, but it works so well that scientists eventually concluded it must exist. We never see phase directly, but we know it exists because of the interference effects it produces.
<!-- ::: q-block -->
### Explaining the H-gate
<!-- ::: q-carousel -->
<!-- ::: div -->

So, with this new tool in our box, can we describe the H-gate?
<!-- ::: -->
<!-- ::: div -->

Seeing as we’ve taken you down this route, you can probably guess the answer is “yes”, and it involves phases. Let’s start by remembering how the H-gate acts on the state $|0\rangle$. Before the H-gate, the chance of measuring the state $|0\rangle$ was 1, so the magnitude of the amplitude must be 1 too. We can’t tell what the phase is from this information, so we’ll just assume it’s 0.
<!-- ::: -->
<!-- ::: div -->

After we apply the H-gate, we will measure either $|0\rangle$ or $|1\rangle$ with probability $1/2$. Since we square the magnitudes of our amplitudes to get probabilities, the magnitudes of these two amplitudes must be $\sqrt{1/2}$. We can also say the same if we had started in the state $|1\rangle$. Again, we can’t get any information about how this transformation might affect the phases yet; they could actually be transformed in any way and we wouldn’t notice through this experiment alone, so we will not make any guesses just yet.
<!-- ::: -->
<!-- ::: div -->

But when we apply two H-gates, we can start to make statements about the phases our amplitudes can have. At the end of our amplitude tree, we have four different branches that lead to only two possible outcomes. To work out the final amplitudes for these outcomes, we need to multiply along those branches and add the amplitudes together, just as we would a probability tree. To give a 0% chance of measuring $|1\rangle$, we need the amplitudes on the two branches that lead to $|1\rangle$ to have opposite phases.
<!-- ::: -->
<!-- ::: div -->

Since two H-gates in sequence give a deterministic result, the H-gate must always behave in the same way given the same input. Here's one guess for what might happen: Acting on the $|0\rangle$ state, all output amplitudes point in the same direction, but acting on the $|1\rangle$ state, the phase of the $|1\rangle$ state is rotated 180°.
<!-- ::: -->
<!-- ::: div -->

When we chain these trees together, we multiply along the branches and add up the final amplitudes for each state.
<!-- ::: -->
<!-- ::: div -->

Let's start with the two branches that lead to the state $|0\rangle$. For the top branch, we do $\sqrt{1/2}\times\sqrt{1/2} = 1/2$, and we add that to the other branch that leads to $|0\rangle$ (for which we also get the $1/2$). Adding these together, the final magnitude of the amplitude of the state $|0\rangle$ is $\tfrac{1}{2} + \tfrac{1}{2} = 1$. This means the probability of measuring $|0\rangle$ is also $1$.
<!-- ::: -->
<!-- ::: div -->

Our model works so far, let's see what happens to the branches that lead to the state $|1\rangle$. Both these branches also end up with magnitudes of $1/2$, but the branch on the bottom is pointing the other way. This means these two amplitudes cancel out! On the amplitude tree, we've used a minus sign to show that the H-gate has reversed the direction of this amplitude. The final amplitude of the state $|1\rangle$ is then $0$, which matches what we thought would happen!
<!-- ::: -->
<!-- ::: div -->

So our amplitude model works! We now have a complete description of the H-gate.
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
Here, something interesting and counter-intuitive has happened; we use probabilities to account for lack of knowledge, e.g. if we toss a coin, it could be head or tails with equal probability. The reality is that the coin will be in one of these states and we use probabilities to deal with the fact that we don’t know which.
But with this amplitude model, we can’t say the qubit took a specific route (“$0 \rightarrow 0 \rightarrow 0$ _or_ $0 \rightarrow 1 \rightarrow 0$”) because we wouldn’t see the interference effect. This leads people to say things like “the qubit can be $0$ _and_ $1$ _at the same time!”_, which isn’t necessarily an incorrect way to describe the behaviour of the qubit, but also isn’t especially useful to people learning about quantum computing. It’s important to remember that, because we don’t see behaviour like this in our everyday lives, we don’t really have common words for it, which is why scientists created the new words we’ll learn about in this course.
## Neatening things up
We now have a working explanation of the H-gate using amplitude trees, but using trees can get messy quite quickly. There are so many branches and labels that even trees for a small number of qubits get very complicated. To simplify this, quantum computer scientists use mathematical objects called ‘[vectors](gloss:vector)’ which, for our purposes, are just lists of numbers. At each point in our computation, we can use a vector to keep track of the amplitudes of all the possible outcomes.

We now have a pretty simple way of simulating qubits, so what’s the point of trying to create a working quantum computer when we already know how to perfectly simulate it using traditional computers? Well, for a small number of qubits, simulating is easy, but if we want to simulate quantum computers with large numbers of qubits, keeping track of all these amplitudes can be very time-consuming and require a lot of memory.
<!-- ::: q-block.exercise -->
### Quick quiz
<!-- ::: q-quiz(goal="intro-what-is-0") -->
<!-- ::: .question -->
If you have $n$ qubits, how many possible outcomes are there?
<!-- ::: -->
<!-- ::: .option -->
1. $n$
<!-- ::: -->
<!-- ::: .option -->
2. $n^2$
<!-- ::: -->
<!-- ::: .option(correct) -->
3. $2^n$
<!-- ::: -->
<!-- ::: .option -->
4. $\log{n}$
<!-- ::: -->
<!-- ::: -->
***
<!-- ::: q-quiz(goal="intro-what-is-1") -->
<!-- ::: .question -->
If you want to predict the behaviour of $n$ qubits using vectors like those above, you need to keep track of _at most..._
<!-- ::: -->
<!-- ::: .option -->
1. $n$ amplitudes
<!-- ::: -->
<!-- ::: .option -->
2. $n^2$ amplitudes
<!-- ::: -->
<!-- ::: .option(correct) -->
3. $2^n$ amplitudes
<!-- ::: -->
<!-- ::: .option -->
4. $\log{n}$ amplitudes
<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
The number of possible outcomes doubles with each extra qubit we add, and if we use this technique, the size of our vectors will grow exponentially too. This isn’t true for every quantum circuit, for example, if we start with all our qubits in the state 0 and do nothing to them, then it’s pretty easy to predict what the output will be. But it seems to be that more difficult quantum circuits can only be simulated on classical computers through algorithms that grow exponentially with the number of qubits in the circuit. The upper limit for simulating a difficult quantum circuit tends to be somewhere between the 30-40 qubit mark.
## Summary
We’ve learnt a lot in this chapter, but the key points are:
- We can make bits out of objects that follow the rules of quantum mechanics, and we call these “qubits”
- These qubits can be described using probability amplitudes, which are like classical probabilities but with “phase”
- These amplitudes can cancel each other out (an effect called interference) and this is what causes the previously unexplained behaviour
- The best algorithms we have for simulating qubits use exponential resources with the number of qubits, so simulating large numbers of qubits is out of reach for classical computers.
We have seen that quantum computers can do something classical computers can’t: simulating the behaviour of qubits! The classical computer needs exponential resources, but the quantum computer can just carry out the operations and see what happens. The next interesting question is: “Can a quantum computer solve any other problems that a classical computer can’t?”. In this course we’ll see the answer is “yes”!
## Exercise
All the qubits on this page have been simulated using classical programs designed using this amplitude model, so why should you believe that's how actual qubits behave? If you want to play around with _real_ qubits on a real quantum computer, check out the [IBM Quantum Composer](https://quantum-computing.ibm.com/composer/files/new). Why don't you try and recreate this experiment and run it on a device?
You will see lots of other gates and instructions there, most of which we have _not_ covered before. In IBM's composer, if an instruction is grey, then it is not a quantum gate and won't follow the amplitude rules we've discovered in this chapter.
<!-- ::: q-block.exercise -->
### Try it yourself
Using your knowledge of amplitudes, see if you can work out how these gates transform amplitudes:
- The X-gate
- The Y-gate
- The Z-gate
<details>
<summary>No idea where to start?</summary>
<p>
Don't worry! This is a pretty tough exercise, so we'll give you a clue. Each of these three gates (X, Y and Z) will only perform at most two actions: They can swap the amplitudes of the states $|0\rangle$ and $|1\rangle$:
</p><p>
<img src="images/what-is/flipping-amplitudes.svg">
</p><p>
and/or they can reverse the direction of the state $|1\rangle$:
</p><p>
<img src="images/what-is/reverse-direction.svg">
</p><p>
Create small circuits with these gates and with H-gates. Try sandwiching the gate you're investigating between two H-gates. Guess the behaviour of each gate and draw amplitude trees to see if you're correct.
</p>
</details>
Try creating different circuits with H-gates, Z-gates and X-gates: Does your model still predict the right results?
[Try in the IBM Quantum Composer](https://quantum-computing.ibm.com/composer/files/new)
<!-- ::: -->
| github_jupyter |
```
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
```
### Train/Test split already done
```
#from sklearn.cross_validation import train_test_split
# create 80%-20% train-test split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5555)
twoD6_test = pd.read_csv("data/test2d6.csv", index_col='SID')
twoD6_train = pd.read_csv("data/training2d6.csv", index_col='SID')
twoD6_train.head()
col_names2D6 = twoD6_train.columns.tolist()
print('Column names:')
print(col_names2D6)
# Isolate response variable
ActivityScore = twoD6_train['ActivityScore']
y_train = np.where(ActivityScore >= 40,1,0)
ActivityScore2 = twoD6_test['ActivityScore']
y_test = np.where(ActivityScore2 >= 40,1,0)
# looks right sized
y_train.shape, y_test.shape
y_test
# We don't need this column anymore
to_drop = ['ActivityScore']
inhib_feat_space = twoD6_train.drop(to_drop,axis=1)
inhib_feat_space_test = twoD6_test.drop(to_drop,axis=1)
# Pull out features for future use
features = inhib_feat_space.columns
features_test = inhib_feat_space_test.columns
X_train = inhib_feat_space.as_matrix().astype(np.float)
X_test = inhib_feat_space_test.as_matrix().astype(np.float)
X_train.shape, X_test.shape
n_pos1 = y_test.sum()
n_pos1
n_pos2 = y_train.sum()
n_pos2
print('Feature space holds '+repr(X_train.shape[0])+' observations and '+repr(X_test.shape[1])+' features')
print('Unique target labels: '+repr(np.unique(y_train)))
print('Feature space holds '+repr(X_test.shape[0])+' observations and '+repr(X_test.shape[1])+' features')
print('Unique target labels: '+repr(np.unique(y_test)))
X_test.shape[1]
```
## Scale the features before training model
```
# This is important
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
from sklearn.cross_validation import KFold
def run_cv(X,y,clf_class,**kwargs):
# Construct a kfolds object
kf = KFold(len(y),n_folds=5,shuffle=True)
y_pred = y.copy()
# Iterate through folds
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train = y[train_index]
# Initialize a classifier with key word arguments
clf = clf_class(**kwargs)
clf.fit(X_train,y_train)
y_pred[test_index] = clf.predict(X_test)
return y_pred
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.neighbors import KNeighborsClassifier as KNN
def accuracy(y_true,y_pred):
# NumPy interpretes True and False as 1. and 0.
return np.mean(y_true == y_pred)
print("K-nearest-neighbors (training set):")
print("%.3f" % accuracy(y_train, run_cv(X_train,y_train,KNN)))
print("K-nearest-neighbors (test set):")
print("%.3f" % accuracy(y_test, run_cv(X_test,y_test,KNN)))
print('Support vector machines (training set):')
print("%.3f" % accuracy(y_train, run_cv(X_train,y_train,SVC)))
print('Support vector machines (test set):')
print("%.3f" % accuracy(y_test, run_cv(X_test,y_test,SVC)))
print("Random forest (training set):")
print("%.3f" % accuracy(y_train, run_cv(X_train,y_train,RF)))
print("Random forest (test set):")
print("%.3f" % accuracy(y_test, run_cv(X_test,y_test,RF)))
from sklearn.metrics import confusion_matrix
y_train = np.array(y_train)
class_names = np.unique(y_train)
confusion_matrices_training = [
( "K-Nearest-Neighbors training", confusion_matrix(y_train,run_cv(X_train,y_train,KNN)) ),
( "Support Vector Machines training", confusion_matrix(y_train,run_cv(X_train,y_train,SVC)) ),
( "Random Forest taining", confusion_matrix(y_train,run_cv(X_train,y_train,RF)) ),
]
y_test = np.array(y_test)
class_names = np.unique(y_test)
confusion_matrices_test = [
( "K-Nearest-Neighbors test", confusion_matrix(y_test,run_cv(X_test,y_test,KNN)) ),
( "Support Vector Machines test", confusion_matrix(y_test,run_cv(X_test,y_test,SVC)) ),
( "Random Forest test", confusion_matrix(y_test,run_cv(X_test,y_test,RF)) ),
]
#draw_confusion_matrices(confusion_matrices,class_names)
confusion_matrices_training, confusion_matrices_test
roc_auc_score(is_churn, pred_churn)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import seaborn as sns
import time
from numpy.linalg import norm
from sklearn.metrics import mean_squared_error
from scipy.sparse import coo_matrix
```
1. load data
- sgd
- threshold
- drop 0
- error function
- SGD function
- solver for matrix
# Loading data
```
dataFile='data\BX-Book-Ratings.csv'
df = pd.read_csv(dataFile,sep=";",
header=0,
names=["user","isbn","rating"],
encoding='iso-8859-1',
nrows=1000
)
df.head()
def loadingData(dataFile,nrows=None):
# if nrows =-1
df = pd.read_csv(dataFile,
sep=";",
header=0,
names=["user","isbn","rating"],
encoding='iso-8859-1',
nrows=nrows
)
return df
```
# Preprocessing
- select input dataset size
```
# does not work on whole data set coz its too large
# R = df.pivot(index='user',columns='isbn',values='rating')
```
encountered memory error
dropping zeros
```
print(df.shape)
df = df[df.rating!=0]
df.shape
```
still memory error
filter unpopular books?
fileter lazy users?
```
# sparse matrix works more efficiently
df['rating'] = df['rating'].astype(float)
df['user'] = df['user'].astype("category")
df['isbn'] = df['isbn'].astype("category")
# convert str to catergory codes because spare matrix cannot contain string
isbn_code = df['isbn'].cat.codes.copy()
user_code = df['user'].cat.codes.copy()
R = coo_matrix((df['rating'],(user_code, isbn_code)))
R.shape
def covertToSparse(df):
# sparse matrix works more efficiently
df['rating'] = df['rating'].astype(float)
df['user'] = df['user'].astype("category")
df['isbn'] = df['isbn'].astype("category")
# convert str to catergory codes because spare matrix cannot contain string
isbn_code = df['isbn'].cat.codes.copy()
user_code = df['user'].cat.codes.copy()
R = coo_matrix((df['rating'],(user_code, isbn_code)))
return R
```
# SGD to solve for P and Q
error = R - R_hat
=> R = Q @ P.T
qi = qi + gamma * (error @ pi - lambda @ qi)
pi = pi + gamma * (error @ qi - lambda @ pi)
```
# element by element approach
def SGD(R,K=5,lambda_=0.02,steps=10,gamma=0.001,verbose=False,rmse_target=1):
# lambda_: regularization
# gamma :learning rate
# initialise matrix P and Q
M,N = R.shape
P = np.random.rand(M,K)
Q = np.random.rand(K,N)
#initial RMSE
rmse = np.sqrt(mean_squared_error(R.toarray(), P@Q))
print(f"STARTING RMSE: {rmse:.2f}")
for step in range(steps):
for ui in range(len(R.data)):
rui = R.data[ui] # serialize matrix
u = R.row[ui] # get user index (row)
i = R.col[ui] # get item index (col)
# # adding bias
# mean = np.mean(R.data) # mean score of all rating
# ui = np.mean(P[u,:]) # mean rating given by that user
# bi = np.mean(Q[:,i]) # mean rating give to that movie
# bui = mean + ui + bi
# rui_hat = P[u,:] @ Q[:,i] + mean + ui + bi
rui_hat = P[u,:] @ Q[:,i] # sum(row x col)
error = rui - rui_hat
# update P,Q matrix
P[u,:] = P[u,:] + gamma * (error * Q[:,i] - lambda_ * P[u,:])
Q[:,i] = Q[:,i] + gamma * (error * P[u,:] - lambda_ * Q[:,i])
rmse = np.sqrt(mean_squared_error(R.toarray(), P@Q))
if verbose:
print(f"STEP NO: {step+1} - CURRENT RMSE:{rmse:.2f}")
if rmse < rmse_target:
break
if verbose:
print(f"STEP NO: {step+1} - CURRENT RMSE:{rmse:.2f}")
return P,Q,rmse
# element by element approach
def SGD_bias(R,K=5,lambda_=0.02,steps=10,gamma=0.001,verbose=False,rmse_target=1):
# lambda_: regularization
# gamma :learning rate
# initialise matrix P and Q
M,N = R.shape
P = np.random.rand(M,K)
Q = np.random.rand(K,N)
#initial RMSE
rmse = np.sqrt(mean_squared_error(R.toarray(), P@Q))
print(f"STARTING RMSE: {rmse:.2f}")
for step in range(steps):
for ui in range(len(R.data)):
rui = R.data[ui] # serialize matrix
u = R.row[ui] # get user index (row)
i = R.col[ui] # get item index (col)
# adding bias
mean = np.mean(R.data) # mean score of all rating
ui = np.mean(P[u,:]) # mean rating given by that user
bi = np.mean(Q[:,i]) # mean rating give to that movie
bui = mean + ui + bi
rui_hat = P[u,:] @ Q[:,i] + mean + ui + bi
error = rui - rui_hat
# update P,Q matrix
P[u,:] = P[u,:] + gamma * (error * Q[:,i] - lambda_ * P[u,:])
Q[:,i] = Q[:,i] + gamma * (error * P[u,:] - lambda_ * Q[:,i])
rmse = np.sqrt(mean_squared_error(R.toarray(), P@Q))
if verbose:
print(f"STEP NO: {step+1} - CURRENT RMSE:{rmse:.2f}")
if rmse < rmse_target:
break
if verbose:
print(f"STEP NO: {step+1} - CURRENT RMSE:{rmse:.2f}")
return P,Q,rmse
def cal_error(R,P,Q,lambda_=0.02):
# error function to be minimized
ratings = R.data
rows = R.row
cols = R.col
error = 0
for ui in range(len(ratings)):
rui = ratings[ui]
u= rows[ui]
i= cols[ui]
# adding bias
mean = np.mean(R.data) # mean score of all rating
ui = np.mean(P[u,:]) # mean rating given by that user
bi = np.mean(Q[:,i]) # mean rating give to that movie
bui = mean + ui + bi
if rui > 0:
rui_hat = P[u,:]@Q[:,i] + mean + ui + bi
# norm_target = [ui,bi,P[u,:],Q[:,i]]
terms = [ui,bi,norm(P[u,:],2),norm(Q[:,i],2)]
error = error + (rui - rui_hat)**2 + \
lambda_ * sum([i**2 for i in terms])
return error
def SGD_bias2(R,K=5,lambda_=0.02,steps=10,gamma=0.001,verbose=False,rmse_target=1):
# lambda_: regularization
# gamma :learning rate
# initialise matrix P and Q
M,N = R.shape
P = np.random.rand(M,K)
Q = np.random.rand(K,N)
#initial RMSE
rmse = np.sqrt(cal_error(R,P,Q,lambda_)/len(R.data))
print(f"STARTING RMSE: {rmse:.2f}")
for step in range(steps):
for ui in range(len(R.data)):
rui = R.data[ui] # serialize matrix
u = R.row[ui] # get user index (row)
i = R.col[ui] # get item index (col)
# adding bias
mean = np.mean(R.data) # mean score of all rating
ui = np.mean(P[u,:]) # mean rating given by that user
bi = np.mean(Q[:,i]) # mean rating give to that movie
bui = mean + ui + bi
# update P,Q matrix
rui_hat = P[u,:]@Q[:,i] + mean + ui + bi
eui = rui - rui_hat
P[u,:] = P[u,:] + gamma * (eui * Q[:,i] - lambda_ * P[u,:])
Q[:,i] = Q[:,i] + gamma * (eui * P[u,:] - lambda_ * Q[:,i])
rmse = np.sqrt(cal_error(R,P,Q,lambda_)/len(R.data))
if verbose:
print(f"STEP NO: {step+1} - CURRENT RMSE:{rmse:.2f}")
if rmse < rmse_target:
break
if verbose:
print(f"STEP NO: {step+1} - CURRENT RMSE:{rmse:.2f}")
return P,Q,rmse
# R = np.array([[3,0,2],[4,1,9],[9,2,1]])
M,N = R.shape
K=10
P = np.random.rand(M,K)
Q = np.random.rand(K,N)
cal_error(R,P,Q,0.02)
```
# Production
```
starttime = time.time()
dataFile='data\BX-Book-Ratings.csv'
# nrows = None
nrows= 500
df = loadingData(dataFile,nrows)
print(f"DF SIZE: {df.shape}")
df = df[df.rating!=0]
print(f"DF SIZE AFTER DROPPING 0: {df.shape}")
R = covertToSparse(df)
print(f"Rating Matrix shape: {R.shape}")
params = {'R':R,
'K':5,
'lambda_':0.02,
'steps':10,
'gamma':0.001,
'verbose':True,
'rmse_target':1
}
# P,Q,rmse = SGD(**params)
P,Q,rmse = SGD_bias2(**params)
duration = time.time() - starttime
print(f"Process time: {duration:.2f}")
```
# how to input new user?
layer 1: user demographic, bio
user content based filtering to generate
layer 2: punch in social media account, obtain social graph
recommend using colaborative filtering
layer 3: rate some movies to cold start the LFA process
save and load trained weiights, PQ
save and display rating matrix
load rating matrix to predicut user behaviour
# Misc code
```
import os
os.startfile(os.getcwd())
# element by element approach
def SGD_old(R,K=5,lambda_=0.02,steps=10,gamma=0.001,verbose=False,rmse_target=1):
# def SGD(R,K,lambda_,steps,gamma,verbose,rmse_target):
# lambda_: regularization
# gamma :learning rate
# initialise matrix P and Q
M,N = R.shape
P = np.random.rand(M,K)
Q = np.random.rand(K,N)
#initial RMSE
rmse = np.sqrt(mean_squared_error(R.toarray(), P@Q))
print(f"STARTING RMSE: {rmse:.2f}")
for step in range(steps):
for ui in range(len(R.data)):
rui = R.data[ui] # serialize matrix
u = R.row[ui] # get user index (row)
i = R.col[ui] # get item index (col)
rui_hat = P[u,:] @ Q[:,i] # sum(row x col)
error = rui - rui_hat
# update P,Q matrix
P[u,:] = P[u,:] + gamma * (error * Q[:,i] - lambda_ * P[u,:])
Q[:,i] = Q[:,i] + gamma * (error * P[u,:] - lambda_ * Q[:,i])
rmse = np.sqrt(mean_squared_error(R.toarray(), P@Q))
if rmse < rmse_target:
break
if verbose == True:
print(f"FINAL RMSE: {rmse:.2f}")
return P,Q,rmse
```
# Inspection , EDA
```
sns.distplot(df.rating,
bins=range(10),
hist_kws={"histtype": "step",
"linewidth": 3,
"alpha": 1,
"color": "r"},
kde=False,
)
sns.distplot(df[df.rating!=0].rating,
bins=range(10),
hist_kws={"histtype": "step",
"linewidth": 3,
"alpha": 1,
"color": "r"},
kde=False,
)
sns.heatmap(df==0,cmap='plasma_r')
```
| github_jupyter |
# 2.2 Transactions Data Cleaning
##### Description
Basic data visualization and data formatting for transactions.csv
##### Notebook Steps
1. Connect Spark
1. Input Data
1. Examine Data
1. Data Cleaning
1. Output Data
## 1. Connect Spark
```
%load_ext sparkmagic.magics
%manage_spark
```
## 2. Input Data
```
%%spark
df = spark.read.csv("s3://jolfr-capstone3/raw/transactions", header=True)
```
## 3. Examine Data
##### show()
```
%%spark
df.show()
```
##### count()
```
%%spark
df.count()
```
##### describe()
```
%%spark
df.describe().show()
```
##### printSchema()
```
%%spark
df.printSchema()
```
##### columns
```
%%spark
df.columns
```
##### head(5)
```
%%spark
df.head(5)
```
##### Null per Column
```
%%spark
from pyspark.sql.functions import isnan, when, count, col
df.select([count(when(isnan(c) | col(c).isNull(), c)).alias(c) for c in df.columns]).show()
```
##### Value Counts
```
%%spark
df.groupBy('payment_method_id').count().orderBy('count').show()
%%spark
df.groupBy('payment_plan_days').count().orderBy('count').show()
%%spark
df.groupBy('plan_list_price').count().orderBy('count').show()
%%spark
df.groupBy('actual_amount_paid').count().orderBy('count').show()
%%spark
df.groupBy('is_auto_renew').count().orderBy('count').show()
%%spark
df.groupBy('is_cancel').count().orderBy('count').show()
```
## 4. Data Cleaning
```
%%spark
from pyspark.sql import types
from pyspark.sql.functions import col, to_date
```
### Columns
##### msno
The msno column corresponds to user ids for the dataset, so the column is renamed from msno to user_id.
```
%%spark
df = df.withColumnRenamed("msno","user_id")
```
##### payment_method_id
The payment_method_id column is cast from string to integer.
```
%%spark
df = df.withColumn("payment_method_id",col("payment_method_id").cast(types.IntegerType()))
```
##### payment_plan_days
The payment_plan_days column is cast from string to integer.
```
%%spark
df = df.withColumn("payment_plan_days",col("payment_plan_days").cast(types.IntegerType()))
```
##### plan_list_price
The plan_list_price column is cast from string to integer.
```
%%spark
df = df.withColumn("plan_list_price",col("plan_list_price").cast(types.IntegerType()))
```
##### actual_amount_paid
The actual_amount_paid column is cast from string to integer.
```
%%spark
df = df.withColumn("actual_amount_paid",col("plan_list_price").cast(types.IntegerType()))
```
##### is_auto_renew
The is_auto_renew column is cast from string to boolean.
```
%%spark
df = df.withColumn("is_auto_renew",col("is_auto_renew").cast(types.BooleanType()))
```
##### transaction_date
The transaction_date column must be parsed and cast to a date object.
```
%%spark
df= df.withColumn('transaction_date',to_date(df.transaction_date, 'yyyyMMdd'))
```
##### membership_expire_date
The membership_expire_date column must be parsed and cast to a date object.
```
%%spark
df= df.withColumn('membership_expire_date',to_date(df.membership_expire_date, 'yyyyMMdd'))
```
##### is_cancel
The is_cancel column is cast from string to boolean.
```
%%spark
df = df.withColumn("is_cancel",col("is_cancel").cast(types.BooleanType()))
```
## 5. Data Output
##### Final Check
```
%%spark
df.printSchema()
%%spark
df.show()
```
##### Output to File
```
%%spark
df.write.format("com.databricks.spark.csv").option("header", "true").mode('overwrite').save('s3://jolfr-capstone3/interim/transactions')
```
| github_jupyter |
```
import os, sys, math, time
from glob import glob
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
sys.path.insert(0,"/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/LibFolder")
from Lib_GeneralFunctions import *
from Lib_GeneralSignalProcNAnalysis import *
from Lib_ProfilePlotting import *
from Lib_ProfileProcessing import *
#=================== Plotting style ===================
from matplotlib.animation import FuncAnimation
from IPython.display import clear_output
plt.style.use('seaborn-whitegrid')
from matplotlib import cm
from matplotlib.colors import ListedColormap
from matplotlib.lines import Line2D
from matplotlib.gridspec import GridSpec
#definition of colormap
from palettable.scientific.sequential import LaJolla_20
cmap = LaJolla_20.mpl_colormap
plt.register_cmap(cmap=cmap)
plt.set_cmap('LaJolla_20')
plt.rcParams['image.cmap'] = 'LaJolla_20'
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
def FontSizeControlFreak(SMALL_SIZE,MEDIUM_SIZE,BIGGER_SIZE):
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# Timestamp variable
start_time = time.time()
# Save into a class the
class SSCreference:
def __init__(self, filename, coordinates, RefSource="SEM2DPACK"):
line = pd.read_csv(filename.format("slip"), header=None)
self.Time = line[0]
self.Slip = line[1]
line = pd.read_csv(filename.format("sr"), header=None)
self.SlipRate = line[1]
self.Coord = coordinates #Only used for labels and printing
self.RefSource = RefSource
#end __init__
# Default object printing information
def __repr__(self):
return "The reference object was generated from: {} and the receiver is located at {}".format(self.RefSource, self.Coord)
#end __repr__
def __str__(self):
return "The reference object was generated from: {} and the receiver is located at {}".format(self.RefSource, self.Coord)
#end __str__
def PlotReference(self, ax, SlipSlipRate, filtering=True, **kwargs):
if SlipSlipRate=="Slip":
if(filtering):
ax.plot(self.Time, Butterworth(self.Slip, **kwargs), label = "", c = "k", ls = "--", zorder=1)
else:
ax.plot(self.Time, self.Slip, label = "", c = "k", ls = "--", zorder=1)
elif SlipSlipRate=="SlipRate":
if(filtering):
ax.plot(self.Time, Butterworth(self.SlipRate, **kwargs), label = "", c = "k", ls = "--", zorder=1)
else:
ax.plot(self.Time, self.SlipRate, label = "", c = "k", ls = "--", zorder=1)
return ax
path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/"
# Reference saved into a list of objects
RefList = [SSCreference(path + "Reference/sem2dpack/sem2d-{}-1.txt", "2km"),
SSCreference(path + "Reference/sem2dpack/sem2d-{}-2.txt", "4km"),
SSCreference(path + "Reference/sem2dpack/sem2d-{}-3.txt", "6km"),
SSCreference(path + "Reference/sem2dpack/sem2d-{}-4.txt", "8km"),
]
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P4/"
Data = pd.read_csv(Path+"SlipAtReceiver.txt",sep="\t")
Data.columns
def FilterDataForFloatEquality(Data, Column, Value, Tolerance = 1e-1):
return Data.loc[(abs(Data[Column]-Value).lt(Tolerance))]
Loc2000 = FilterDataForFloatEquality(Data, "FaultX", 2000, 1e-1)
#Loc2000
```
# Separate per faultY parameter
```
print(Loc2000["FaultY"].unique())
Loc2000f100 = FilterDataForFloatEquality(Loc2000, "FaultY", 100)
```
# Comparing the same location at different QP
```
for q in range(4):
LocQ1 = Loc2000f100.loc[Loc2000f100["q"].eq(q),["Time","Slip","SlipRate"]]
plt.plot(LocQ1.Time, LocQ1["Slip"])
# Live Plotting of a underwritting file
class SlipDataObject:
def __init__(self, filename):
self.FileName = filename
self.Data = pd.read_csv(filename,sep="\t")
#end __init__
# Default object printing information
def __repr__(self):
return "File loaded: {}".format(self.FileName)
#end __repr__
def __str__(self):
return "File loaded: {}".format(self.FileName)
#end __str__
def FilterDataForFloatEquality(self, Data, Column, Value, Tolerance = 1e-1):
return Data.loc[(abs(self.Data[Column]-Value).lt(Tolerance))]
def reload(self):
self.Data = pd.read_csv(self.FileName, sep="\t")
def getDataSubDomain(self, FaultX, FaultXValue, FaultY, FaultYValue, q):
FxData = self.FilterDataForFloatEquality(self.Data, FaultX, FaultXValue)
FyData = self.FilterDataForFloatEquality(FxData, FaultY, FaultYValue)
return FyData.loc[FyData["q"].eq(q)]
def format_axes(fig):
for i, ax in enumerate(fig.axes):
ax.set_xlim(-0.5,4)
ax.set_ylim(-0.5,10)
Lines = fig.axes[-1].get_lines()[-5:]
legend2 = fig.axes[-1].legend(Lines, ['Reference', '8km','6km', '4km', '2km'], loc=1)
fig.axes[-1].add_artist(legend2)
def GenericFigAxis():
fig = plt.figure(constrained_layout=True, figsize=[12,4])
gs = GridSpec(1, 2, figure=fig)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
return fig, [ax1, ax2]
def Plotting(fig, axis, DataObj,FilteringSpecs):
DataObj.reload()
FilteredDataframe = DataObj.getDataSubDomain(**FilteringSpecs)
x_values = FilteredDataframe.Time
y_values = FilteredDataframe.Slip
yPrime_values = FilteredDataframe.SlipRate
AA=0
axis[0].plot(x_values, np.cumsum(yPrime_values)*(x_values.iloc[1]-x_values.iloc[0]))
#axis[0].plot(x_values, np.cumsum(y_values))
#axis[0].plot(x_values, y_values)
axis[0].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
axis[0].set_ylabel('Slip')
axis[0].set_title('Slip')
axis[1].plot(x_values, yPrime_values)
axis[1].set_ylabel('Slip rate')
axis[1].set_title('Slip rate')
axis[1].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
fig.gca().relim()
fig.gca().autoscale_view()
```
# P6
$S = \Delta V\ dt + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
$dS = S - S_{t-1}$
$\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ =S_{t-1} + dS$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P6/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
# P1
$\dot{S} = \Delta V - \frac{\delta}{\mu}(\frac{f(t)-f(t-1)}{dt})(-\sigma^{avg}_{yy})$
$dS = \dot{S}\ dt $
$\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ += S$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P1/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
format_axes(fig)
```
# P9
$S = \Delta U + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
$dS = S - S_{t-1}$
$\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ = S$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P9/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
# Looks like I didnt save the code when I commented out the slip updating scheme. Of course it was not calculating correctly the accumulation
god...
I start again and the plotting function is changed to not scroll up again
```
def Plottings(fig, axis, DataObj,FilteringSpecs):
DataObj.reload()
FilteredDataframe = DataObj.getDataSubDomain(**FilteringSpecs)
x_values = FilteredDataframe.Time
y_values = FilteredDataframe.Slip
yPrime_values = FilteredDataframe.SlipRate
AA=0
#axis[0].plot(x_values, np.cumsum(yPrime_values)*(x_values.iloc[1]-x_values.iloc[0]))
axis[0].plot(x_values, y_values)
axis[0].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
axis[0].set_ylabel('Slip')
axis[0].set_title('Slip')
axis[1].plot(x_values, yPrime_values)
axis[1].set_ylabel('Slip rate')
axis[1].set_title('Slip rate')
axis[1].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
fig.gca().relim()
fig.gca().autoscale_view()
```
# P2
$S = \Delta U + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
$dS = S - S_{t-1}$
$\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ += dS$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P2/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
# ^After that change, now the slip is consistent with the slip rate
# P3
$S = \Delta U + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
$dS = S - S_{t-1}$
$\gamma=\frac{S}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ += dS$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P3/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
# P4
$\dot{S} = \Delta V - \frac{\delta}{\mu}(\frac{f(t)-f(t-1)}{dt})(-\sigma^{avg}_{yy})$
$dS = \dot{S}\ dt$
$\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ += dS$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P4/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
#Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
# P10
$S = \Delta U + \frac{\delta}{\mu}\ (f_{static}\sigma_{n}-f(t)\sigma^{avg}_{yy}) $
$dS = S - S_{t-1}$
$\gamma=\frac{S}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ += dS$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P10/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
# P11
$\dot{S} = \Delta V - \frac{\delta}{\mu}\ \frac{(f(t)-f(t-dt))}{dt}(-\sigma^{avg}_{yy}) $
$dS = \dot{S}dt$
$\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
--------------
$\sigma_{xy} = 2\mu\ \gamma$
$S_{q}\ += dS$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P11/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach (TransectStressAvg branch)")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
#Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P13/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
# P11
$\dot{S} = \Delta V - \frac{\delta}{\mu}\ \frac{(f(t)-f(t-dt))}{dt}(-\sigma^{avg}_{yy}) $
$dS = \dot{S}dt$
$\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
$S_{q}\ += dS$
$\dot{S}_{q}=dS\ /\ dt$
```
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion50m/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":50,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion50mdelta100/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/PlasticMultiplier/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/PlasticMultiplier2/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/PlasticMultiplier/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/PlasticMultiplier2/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
```
| github_jupyter |
# Create Training Set with Simulated Pulsars
Based on Python 3.
Required software:
1. sigpyproc
2. sigproc
3. scipy
4. pandas
5. numpy
6. SKA-TestVectorGenerationPipeline v2.0 from https://github.com/scienceguyrob/SKA-TestVectorGenerationPipeline
7. Tempo 2
We take a real observation and create an empty, downsampled version of this file. Into this file we inject the simulated pulsars.
```
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
from sigpyproc.Readers import FilReader as reader
import scipy.signal
import scipy
import pandas as pd
current_path = os.getcwd()
print(current_path)
# Folder where data will be saved
output_path = '/data/lkuenkel/data/pipeline_test/'
!mkdir {output_path}
# Define input file, if not filterbank file yet will be converted later
input_example_file = '/data/lkuenkel/data/PMPS/1997AUGT/raw/PM0001_00111.sf'
# Name for converted filterbank file
input_fil_file = f'{output_path}input.fil'
# Name of downsampled filterbank file
downsampled_input = f'{output_path}example_downsampled.fil'
# Name of empty filterbank file, this file be used to inject the simulated pulsars
empty_file = f'{output_path}zero.fil'
```
## Create Downsampled File
This step creates a downsampled version of one filterbank. If the file is in sigproc filterbank format and you do not want to downsample, then skip this step and set *downsampled_input* to the input file.
```
# Parameters for downsampling
t_downsample = 10
f_downsample = 2
nbits = 8
tstart = 50000
# No conversion to filterbank needed if data is already sigproc filterbank
# filterbank and decimate are sigproc tools
!filterbank {input_example_file} > {input_fil_file}
!decimate -c {f_downsample} -t {t_downsample} -n {nbits} {input_fil_file} > {downsampled_input}
```
## Create Empty File
```
fil = reader(downsampled_input)
channel_number = fil.header['nchans']
mask = np.zeros(channel_number)
f_range = (fil.header['fch1'], fil.header['fch1']+fil.header['foff']*fil.header['nchans'])
fil.header['tstart'] = tstart
fil.applyChannelMask(mask, empty_file)
fil.header
```
## Create Training Data Set
```
# Set the paths
pipeline_path = '/data/lkuenkel/newstack/SKA-TestVectorGenerationPipeline'
# Parameters
# number of predictor files
n_files = 20
#snr_para = '--snr uniform --snrparams 70:0'
#dm_para = '--dm uniform --dmparams 20:680'
#p0_para = '--p0 uniform --p0params 0.1:1.4'
dm_distribution = np.random.uniform(20, 700, n_files)
p0_distribution = np.random.uniform(0.1, 1, n_files)
set_name = 'training_set_1'
# Create Folders
set_path = f'{output_path}{set_name}'
pars_path = f'{output_path}{set_name}/pars'
preds_path = f'{output_path}{set_name}/pred'
cmd_path = f'{output_path}{set_name}/cmd'
data_path = f'{output_path}{set_name}/data'
fake_prof_path = f'{output_path}{set_name}/prof_fake'
#!mkdir {data_path}
# Reset Folders
!rm -r {set_path}
!mkdir {set_path}
!mkdir {pars_path}
!mkdir {preds_path}
!mkdir {cmd_path}
!mkdir {data_path}
!mkdir {fake_prof_path}
```
1. Creates Pars
2. Create Preds
3. Create command list
4. Execute command list
The Python 3 compatible version of SKA-TestVectorGenerationPipeline only supportes creating multiple pulsars with different periods at the same DM. FOr multiple DMs we have to run it multiple times.
```
%%capture
for (period, dm) in zip(p0_distribution, dm_distribution):
command_string = f'-m {dm:.2f} -d {pars_path} -p {period:.4f} -a 0'
%run {pipeline_path}/code/pulsar_injection_pipeline/beta/main/src/CreateFakeParFiles.py {command_string}
chunks = n_files // 1000
f_high = int(np.ceil(max(f_range)))
f_low = int(np.floor(min(f_range)))
command_string = f'--tel PARKES -p {pars_path} -d {preds_path} -s 2000 --f1 {f_low} --f2 {f_high} --mjd1 {tstart} --mjd2 {tstart+0.2}'
print(command_string)
for chunk in range(chunks+1):
%run {pipeline_path}/code/pulsar_injection_pipeline/beta/main/src/GeneratePredictorFiles.py {command_string}
snr_value = 70
#%%capture
#asc_paths = '/data/lkuenkel/software/SKA_rob/ASC/good/'
#asc_paths = f"{pipeline_path}/resources/ASC/all_asc/"
#!unzip {pipeline_path}/resources/ASC/ASC.zip -d {pipeline_path}/resources/ASC/all_asc/
```
Initially the SKA-TestVectorGenerationPipeline used real pulsar profiles in v1.0. In v2.0 Gaussian profiles are used. I used real pulsar profiles in my work but removed low quality profiles from the data set. Here we use the Gaussian profiles.
v1.0 is more useful in creating a varied training data set but requires Python 2.
```
profile_number = 5
duty_cycle_distribution = np.random.uniform(0.01, 0.5, profile_number)
%%capture
for duty_cycle in duty_cycle_distribution:
cycle_string = f"{duty_cycle:.2f}"
%run {pipeline_path}/code/pulsar_injection_pipeline/beta/main/src/GaussianProfileGen.py -d {cycle_string}
!mv Gaussian*.asc {fake_prof_path}
command_string = f'--asc {fake_prof_path} --pred {preds_path} --out {cmd_path} --noise {empty_file} --batch {n_files} --snr {snr_value} --label {set_name}'
print(command_string)
%run {pipeline_path}/code/pulsar_injection_pipeline/beta/main/src/InjectPulsarCommandCreator.py {command_string} -v
cmd_files = glob.glob(cmd_path+ '/*.txt')
print(cmd_files)
for cmd_file in cmd_files:
%run {pipeline_path}/code/pulsar_injection_pipeline/beta/main/src/InjectPulsarAutomator.py --cmd {cmd_file} --out {data_path}
#name_split
created_files = glob.glob(f'{data_path}/*')
print(len(created_files))
# Fixing a naming bug in v2.0
for file in created_files:
if '\n' in file:
new_path = file.split('\n')[0]
!mv "$file" {new_path}
created_files = glob.glob(f'{data_path}/*.fil')
print(len(created_files))
```
## Create arrays containing the peak position
```
%%capture
down_fac = 4
out_folder = f'{set_path}/data_dm_{down_fac}/'
!rm -r {out_folder}
!mkdir {out_folder}
correct_files = []
bad_files = []
approx = []
dedis_names = []
max_dedis_vals = []
tsamp = fil.header['tsamp']
for file in created_files[:]:
#file = '/data/lkuenkel/data/PMPS/fake/pmps_set1/data/FakePulsar_639_1.291910_396.4_70.0_ASC_J2019+2425_1380.fil'
name = file.split('/')[-1]
new_name = '_'.join(name.split('_')[:4])
name_split = name.split(set_name)[1].split('_')
full_path = out_folder + new_name+'.npy'
dm = float(name_split[4])
period = float(name_split[2])
approx_dist = 3/4. * period / tsamp /float(down_fac) / 1000
fil = reader(file)
#fil_data = fil.readBlock(0,10000)
#plt.imshow(fil_data, aspect='auto')
#plt.show()
dedis = fil.dedisperse(dm, gulp=10000000)
max_val = np.max(dedis)
down = dedis.downsample(down_fac)
down -= np.median(down)
#down = np.clip(down, 0,10000)
#down = np.clip(down, 0,np.percentile(down, 99))
down /= np.max(down)
peaks, prop = scipy.signal.find_peaks(down, height=0.3, distance=approx_dist)
#plt.plot(dedis)
#plt.show()
#peaks = scipy.signal.find_peaks_cwt(down, [5])
#print(peaks)
y_val = np.ones_like(peaks[:3])
#plt.plot(down[:2000])
#plt.scatter(peaks[:3], y_val)
#plt.show()
dummy = np.zeros_like(down).astype(bool)
dummy[peaks] =1
acf = scipy.signal.fftconvolve(dummy,dummy[::-1])
acf /= np.max(acf)
middle = int(len(acf)/2)
#plt.plot(acf[middle-2000: middle+2000])
#plt.show()
try:
height = np.max(acf[middle+25: middle+5000]) * 0.3
acf_peaks, _ = scipy.signal.find_peaks(acf[middle-5: middle+5000], height=height)
#print(acf_peaks)
per_calc = (acf_peaks[1] -acf_peaks[0])*down_fac * tsamp
approx.append((period, per_calc))
if np.abs(period/1000 - per_calc)/period>0.15:
bad_files.append(file)
else:
correct_files.append(file)
dedis_names.append(full_path)
max_dedis_vals.append(max_val)
except IndexError:
print(file + 'did not work')
np.save(full_path, dummy)
approx_arr = np.asarray(approx)
np.save(f'{set_path}/approx_periods_{down_fac}.npy', approx_arr)
plt.scatter(approx_arr[:,0]/1000, approx_arr[:,1])
print(len(correct_files))
```
## Create csv_file describing the data
We create two csv files. One containing only the simulated files, one another also containing the same amount of empty lines. When these are loaded during the training process, no pulsars are injected. Label 1 tells us that there is a simulated pulsar. Label 0 means no pulsar.
```
raw_file_paths = correct_files
raw_file_names = [i.split('/')[-1] for i in raw_file_paths]
raw_file_names_without_set = [i.split(set_name)[1] for i in raw_file_names]
psr_names = ['',]*len(raw_file_paths)
periods = [float(i.split('_')[2])/1000 for i in raw_file_names_without_set]
duty_cycles = [float(i.split('_')[3]) for i in raw_file_names_without_set]
dms = [float(i.split('_')[4]) for i in raw_file_names_without_set]
snrs = [float(i.split('_')[-1].split('.')[0]) for i in raw_file_names_without_set]
print(len(snrs), len(max_dedis_vals))
data_dict = {'JNAME':psr_names, 'P0':periods, 'DM':dms, 'Label':np.ones_like(psr_names), 'FileName':raw_file_paths,
'SNR': snrs, 'MaskName': dedis_names, 'MaxVal': max_dedis_vals, 'DutyCycle': duty_cycles}
df = pd.DataFrame(data=data_dict)
df[:3]
dummy_line = {'JNAME':'Noise', 'P0':np.nan, 'DM':np.nan, 'Label':0, 'FileName':'',
'SNR': np.nan, 'MaskName': '', 'MaxVal': np.nan, 'DutyCycle': np.nan}
df_noise = df.copy()
for i in range(len(df)):
df_noise = df_noise.append(dummy_line, ignore_index=True)
df_noise[-3:]
df.to_csv(f'../datasets/simset_{set_name}.csv')
df_noise.to_csv(f'../datasets/simset_{set_name}_noise.csv')
print(f"Created: ../datasets/simset_{set_name}.csv")
print(f"Created: ../datasets/simset_{set_name}_noise.csv")
print(f"To use the set use the option: --path simset_{set_name}_noise.csv")
plt.scatter(dms, max_dedis_vals)
plt.scatter(periods, max_dedis_vals)
plt.scatter(duty_cycles, max_dedis_vals)
```
| github_jupyter |
## Neural Network - Multiclass Classification
### This example shows use of sparse_categorical_crossentropy loss function
### With this loss function, you don't have to one hot encode labels
### Build the Neural Network using Keras - Easy and Portable across different implementations
https://keras.io/
For building on TensorFlow - Use a conda_tensorflow_py36 or equivalent kernel
For building on Apache MxNet - Use a conda_mxnet_py36 or equivalent kernel
### Iris Plant Classification
<h4>Input Features:</h4>
sepal_length, sepal_width, petal_length, petal_width
<h4>Target:</h4>
Iris Class ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
```
# https://keras.io/
# https://github.com/keras-team/keras/issues/2743
# Change Kernel to use Tensor Flow. For example: conda_tensorflow_p36
import sys
import numpy as np
# Set random seed
np.random.seed(0)
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from sklearn import preprocessing
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import StandardScaler, OneHotEncoder, MinMaxScaler, KBinsDiscretizer, LabelEncoder
from keras.models import Sequential
from keras.layers import Dense, Activation
import keras
column_list_file = 'iris_train_column_list.txt'
train_file = 'iris_train.csv'
validation_file = 'iris_validation.csv'
columns = ''
with open(column_list_file,'r') as f:
columns = f.read().split(',')
columns
# Encode Class Labels to integers
# Labeled Classes
labels=[0,1,2]
classes = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
le = preprocessing.LabelEncoder()
le.fit(classes)
# Specify the column names as the file does not have column header
df_train = pd.read_csv(train_file,names=columns)
df_validation = pd.read_csv(validation_file,names=columns)
df_train.head()
dt_train_no_head = pd.read_csv(train_file,header=None).head()
dt_train_no_head.iloc[:,0].values
df_train.head()
df_validation.head()
df_train['encoded_class'].head()
X_train = df_train.iloc[:,1:].values # Features: 1st column onwards
y_train = df_train.iloc[:,0].values
X_validation = df_validation.iloc[:,1:].values
y_validation = df_validation.iloc[:,0].values
```
## Build Model using Keras
Reference: https://keras.io/getting-started/sequential-model-guide/
```
# Create Model
# 1 hidden layer with 32 neurons with relu activation
# output layer - multi-class classification with 3 classes
# Use softmax activation - gives the probability of a sample belonging to the three classes
# select the one with highest probability
# Sum of the probabilities is 1.
# optimizer - use adam or rmsprop
# loss function - multinomial logistic loss function - called as categorical cross entropy in keras
# metrics - additional metrics to report
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=X_train.shape[1]))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
# Train the model, iterating on the data in batches of 32 samples
history = model.fit(X_train, y_train, epochs=300, batch_size=32,
validation_data=(X_validation,y_validation),callbacks=[early_stopping])
plt.scatter(x=history.epoch,y=history.history['loss'],label='Training Error')
plt.scatter(x=history.epoch,y=history.history['val_loss'],label='Validation Error')
plt.grid(True)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training Vs Validation Error')
plt.legend()
plt.show()
df = pd.read_csv(validation_file,names=columns)
df.head()
X_test = df.iloc[:,1:].values
print(X_test[:5])
result = model.predict(X_test)
# Probability of a sample belonging to the three classes
# Sample can belong to only class and the sum of the three probabilities is 1.
np.set_printoptions(suppress=True)
result[:5]
# reset print options
np.set_printoptions()
df['predicted_class'] = np.argmax(result,axis=1)
df.head()
```
<h2>Confusion Matrix</h2>
Confusion Matrix is a table that summarizes performance of classification model.<br><br>
```
# Reference:
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#print("Normalized confusion matrix")
#else:
# print('Confusion matrix, without normalization')
#print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Compute confusion matrix
cnf_matrix = confusion_matrix(df['encoded_class'],
df['predicted_class'],labels=labels)
cnf_matrix
# Plot confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes,
title='Confusion matrix - Count')
# Plot confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes,
title='Confusion matrix - Count',normalize=True)
print(classification_report(
df['encoded_class'],
df['predicted_class'],
labels=labels,
target_names=classes))
```
| github_jupyter |
# Evaluating Tikhonet Trained
In this Notebook we are going to evaluate the performance of a [Tikhonet](https://arxiv.org/pdf/1911.00443.pdf) trained.
## Required Libraries and Functions
```
%matplotlib inline
import sys
# Add library path to PYTHONPATH
data_path = './'
# Libraries
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
import pickle
# Function
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
```
## Load Images
```
g = open(data_path+"meerkat3600_examples.pkl", "rb")
batch = pickle.load(g)
g.close()
batch.keys()
batch['unflagged'].keys()
```
## Plot Successful Galaxy Images
```
batch['unflagged']['inputs'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['inputs']])
batch['unflagged']['inputs_tikho'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['inputs_tikho']])
batch['unflagged']['targets'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['targets']])
batch['unflagged']['tikhonet'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['tikhonet']])
batch['unflagged']['tikhonet_sc'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['tikhonet_sc']])
batch['unflagged']['clean'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['clean']])
batch['unflagged']['clean_iso'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['clean_iso']])
batch['unflagged']['sparsity'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['sparsity']])
batch['unflagged']['score'] = np.array([crop_center(im,64,64) for im in batch['unflagged']['score']])
# set seaborn theme and style
sns.set_theme()
sns.set_context("paper", font_scale=1.25, rc={"lines.linewidth": 2.5})
#remove grid from images
sns.set_style("whitegrid", {'axes.grid' : False})
list_im = []
keys = ['inputs', 'clean', 'clean_iso', 'sparsity', 'score', 'tikhonet', 'tikhonet_sc', 'targets']
for key in keys:
for i in range(5):
list_im += [batch['unflagged'][key][i]]
list_min = []
list_max = []
for im in batch['unflagged']['inputs']:
list_min += [np.min(im)]
list_max += [np.max(im)]
list_min = np.tile(np.array(list_min),8)
list_max = np.tile(np.array(list_max),8)
fig = plt.figure(figsize=(20, 20))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(8, 5), # creates 2x2 grid of axes
axes_pad=[0.4,0.25], # pad between axes in inch.
cbar_location="right",
cbar_mode="each",
cbar_pad=0.02
)
i=0
for ax, cax, im in zip(grid, grid.cbar_axes, list_im):
# Iterating over the grid returns the Axes.
imx = ax.imshow(im)#,vmin=list_min[i],vmax=list_max[i])
fmt = matplotlib.ticker.ScalarFormatter(useMathText=True)
fmt.set_powerlimits((0, 0))
cb = cax.colorbar(imx,format=fmt)
i+=1
labels = ['Observation', 'CLEAN', 'CLEAN iso', 'SRA', 'SCORE', 'Tikhonet', 'ShapeNet', 'Vérité terrain']
for ind, label in enumerate(labels):
grid[5*ind].set_ylabel(label, fontsize=15, rotation=90,labelpad=10)
plt.savefig('meerkat3600_images_unflagged.pdf', bbox_inches='tight')
```
## Plot Failed Galaxy Images
```
batch['flagged']['inputs'] = np.array([crop_center(im,64,64) for im in batch['flagged']['inputs']])
batch['flagged']['inputs_tikho'] = np.array([crop_center(im,64,64) for im in batch['flagged']['inputs_tikho']])
batch['flagged']['targets'] = np.array([crop_center(im,64,64) for im in batch['flagged']['targets']])
batch['flagged']['tikhonet'] = np.array([crop_center(im,64,64) for im in batch['flagged']['tikhonet']])
batch['flagged']['tikhonet_sc'] = np.array([crop_center(im,64,64) for im in batch['flagged']['tikhonet_sc']])
batch['flagged']['clean'] = np.array([crop_center(im,64,64) for im in batch['flagged']['clean']])
batch['flagged']['clean_iso'] = np.array([crop_center(im,64,64) for im in batch['flagged']['clean_iso']])
batch['flagged']['sparsity'] = np.array([crop_center(im,64,64) for im in batch['flagged']['sparsity']])
batch['flagged']['score'] = np.array([crop_center(im,64,64) for im in batch['flagged']['score']])
# set seaborn theme and style
sns.set_theme()
sns.set_context("paper", font_scale=1.25, rc={"lines.linewidth": 2.5})
#remove grid from images
sns.set_style("whitegrid", {'axes.grid' : False})
list_im = []
keys = ['inputs', 'clean', 'clean_iso', 'sparsity', 'score', 'tikhonet', 'tikhonet_sc', 'targets']
for key in keys:
for i in range(5):
list_im += [batch['flagged'][key][i]]
list_min = []
list_max = []
for im in batch['flagged']['inputs']:
list_min += [np.min(im)]
list_max += [np.max(im)]
list_min = np.tile(np.array(list_min),8)
list_max = np.tile(np.array(list_max),8)
fig = plt.figure(figsize=(20, 20))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(8, 5), # creates 2x2 grid of axes
axes_pad=[0.4,0.25], # pad between axes in inch.
cbar_location="right",
cbar_mode="each",
cbar_pad=0.02
)
i=0
for ax, cax, im in zip(grid, grid.cbar_axes, list_im):
# Iterating over the grid returns the Axes.
imx = ax.imshow(im)#,vmin=list_min[i],vmax=list_max[i])
fmt = matplotlib.ticker.ScalarFormatter(useMathText=True)
fmt.set_powerlimits((0, 0))
cb = cax.colorbar(imx,format=fmt)
i+=1
labels = ['Observation', 'CLEAN', 'CLEAN iso', 'SRA', 'SCORE', 'Tikhonet', 'Tikhonet CF', 'Vérité terrain']
for ind, label in enumerate(labels):
grid[5*ind].set_ylabel(label, fontsize=15, rotation=90,labelpad=10)
plt.savefig('meerkat3600_images_flagged.pdf', bbox_inches='tight')
```
| github_jupyter |
# Tutorial 9
**CS3481 Fundamentals of Data Science**
*Semester B 2019/20*
___
**Instructions:**
- same as [Tutorial 1](http://bit.ly/CS3481T1).
___
## Exercise 1 (submit via [uReply](https://cityu.ed2.mobi/student/mobile_index.php) section number **LM1100**)
For this question, you will use WEKA to analyze a skewed dataset.
1. Download the mamography dataset from [OpenML](https://www.openml.org/d/310).<br>
Woods, Kevin S., et al. "Comparative evaluation of pattern recognition techniques for detection of microcalcifications." Biomedical Image Processing and Biomedical Visualization. Vol. 1905. International Society for Optics and Photonics, 1993. [(Available via CityU VPN.)](https://www.worldscientific.com/doi/abs/10.1142/9789812797834_0011) The following is the description of the dataset excerpted from the paper:
> To obtain the training and test data, a segmentation routine is run on a set of digitized mammograms. The result of the segmentation routine is a template for each image which indicates the locations of possible microcalcifications called candidates. The segmentation routine is designed to locate small, bright spots (a characteristic of microcalcifications) in the raw image. It is important that most individual calcifications and all clusters of calcifications be segmented since the overall cluster detection accuracy can be limited by the results of the segmentation. Since the segmentation routine will detect objects other than microcalcifications, it is the job of the classifiers to label the candidates as either yes (a microcalcification) or no. A set of 7 features is systematically chosen and values are computed for each candidate. The feature values are organized into a feature vector, normalized, and written to a data file. Therefore, the training and test data is 7-dimensional feature vectors which are normalized between 0 and 1 using the (value-min)/(max-min) formula, where value is the feature vector element being normalized, and max and min are the maximum and minimum training set values for that feature.
2. Use the package manager to download a meta classifier called `ThresholdSelector`, and a filter `SMOTE` [Synthetic Minority Over-sampling Technique](https://doi.org/10.1613/jair.953).
Your goal is to detect microcalcifications in the mammographic images. Use $10$-fold stratified cross validation and a random seed of $1$ unless otherwise stated.
(a) Using ZeroR as the classifier, obtain the values of accuracy, precision, recall, and specificity. Verify the values by hand calculations. Is ZeroR a good baseline classifier?
[*Hint: Is the accuracy misleading? Can a random decision maker do better than zeroR?*]
___
**Answer:**
___
(b) Using J48 as the classifier instead, obtain the values of accuracy, precision, recall, and the F-score. Verify the value by hand calculations.
___
**Answer:**
___
## Exercise 2 (submit via Canvas discussion page)
For this question, your goal is to obtain the best performance and post your model and results on the [discussion page](https://canvas.cityu.edu.hk/courses/32828/discussion_topics/254800).
(a) In the result list, right click on your result using J48 as the classifier and choose Cost/Benefit analysis and $1$ as the positive class value. Your goal is to find the maximum value of the precision. Give a cost matrix that achieves the maximum precision.
[*Hint: Pay attention to the row and column labels of the confusion matrix. It changes after you specify $1$ as the positive class value.*]
___
**Answer:**
___
(b) The meta classifier ThresholdSelector uses the threshold-moving technique to optimize a performance measure you specify, which can be the precision, recall, $F$ score, etc. Use J48 as the base classifier, obtain the highest precision, recall and $F$ score reported. Is any of these scores equal to $100\%$?
[*Hint: See an explanation of threshold moving technique [here](https://machinelearningmastery.com/threshold-moving-for-imbalanced-classification/).*]
___
**Answer:**
___
(c) Using the FilteredClassifier with J48 as the classifer and SMOTE as the filter, try to tweek the setting of SMOTE to give the highest possilbe value of $F$ score you can get.
[*See an explanation of SMOTE [here](http://rikunert.com/SMOTE_explained).*]
___
**Answer:**
___
## Exercise 3 (Optional)
Load the dataset from OpenML.
```
import pandas as pd
import numpy as np
from sklearn.datasets import fetch_openml
mammo = fetch_openml(data_id=310)
mammo_pd = pd.DataFrame(data=np.c_[mammo.data,mammo.target],columns=mammo.feature_names+['target'])
mammo_pd
```
Import the libraries for decision tree, ROC analysis, and cross validation.
```
from sklearn import tree
from sklearn.metrics import auc, roc_curve
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from matplotlib import pyplot
```
Split the data into training and test data.
```
X = mammo_pd.iloc[:,:-1]
Y = mammo_pd.iloc[:,-1]
trainX, testX, trainY, testY = train_test_split(X,Y,test_size=0.5)
```
Train the classifier.
```
clf_gini = tree.DecisionTreeClassifier()
clf_gini.fit(trainX,trainY)
```
To compute the ROC curve (TPR and FPR), the classifer should return soft decisions on the test data in the form of probabilities for each class.
```
prob = clf_gini.predict_proba(testX)
prob_pos = prob[:,clf_gini.classes_=='1']
fpr, tpr, _ = roc_curve(testY,prob_pos,'1')
pyplot.plot(fpr, tpr, marker='.', label='Decision tree')
```
Compute the AUC.
```
roc_auc = roc_auc_score(testY,prob_pos)
print("ROC AUC: {:.2f}".format(roc_auc))
```
**Exercise:** Why does the ROC curve contain only a few points?
[*Hint: Check the number of distinct probabilities in `prob_pos`.*]
**Exercise:** Repeat the analysis for PR curve instead using [`pr_curve` and `pr_auc_score`](https://machinelearningmastery.com/roc-curves-and-precision-recall-curves-for-classification-in-python/).
**Exercise:** Repeat the analysis with Cross-validation using [`plot_roc_curve`](https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html) instead.
| github_jupyter |
<p style="font-family: Arial; font-size:3.75em;color:purple; font-style:bold"><br>
Pandas</p><br>
*pandas* is a Python library for data analysis. It offers a number of data exploration, cleaning and transformation operations that are critical in working with data in Python.
*pandas* build upon *numpy* and *scipy* providing easy-to-use data structures and data manipulation functions with integrated indexing.
The main data structures *pandas* provides are *Series* and *DataFrames*. After a brief introduction to these two data structures and data ingestion, the key features of *pandas* this notebook covers are:
* Generating descriptive statistics on data
* Data cleaning using built in pandas functions
* Frequent data operations for subsetting, filtering, insertion, deletion and aggregation of data
* Merging multiple datasets using dataframes
* Working with timestamps and time-series data
**Additional Recommended Resources:**
* *pandas* Documentation: http://pandas.pydata.org/pandas-docs/stable/
* *Python for Data Analysis* by Wes McKinney
* *Python Data Science Handbook* by Jake VanderPlas
Let's get started with our first *pandas* notebook!
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"><br>
Import Libraries
</p>
```
import pandas as pd
```
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
Introduction to pandas Data Structures</p>
<br>
*pandas* has two main data structures it uses, namely, *Series* and *DataFrames*.
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
pandas Series</p>
*pandas Series* one-dimensional labeled array.
```
ser = pd.Series([22,34,76,43,36], ['Deepak', 'Amit', 'Kavita', 'Rakesh', 'Rashmi'])
ser
ser.index
ser.loc[['Deepak','Kavita']]
ser[[4]] #Starting at 0 to 4
ser.iloc[2]
'Amit' in ser #Return Boolean
ser
ser * 2 #Multiplying by 2
ser[['Deepak', 'Amit']] ** 2 #Squaring the values
```
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
pandas DataFrame</p>
*pandas DataFrame* is a 2-dimensional labeled data structure.
<p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold">
Create DataFrame from dictionary of Python Series</p>
```
d = {'Age' : pd.Series([22,34,32,43,36], index=['Deepak', 'Amit', 'Kavita', 'Rakesh', 'Rashmi']),
'Sallary' : pd.Series([10000,20000,13000,14500,15000], index=['Deepak', 'Amit', 'Kavita', 'Rakesh', 'Rashmi'])}
df = pd.DataFrame(d)
print(df)
df.index
df.columns
pd.DataFrame(d, index=['Amit', 'Kavita', 'Rakesh'])
pd.DataFrame(d, index=['Amit', 'Kavita', 'Rakesh'], columns=['Age', 'Sallary'])
```
<p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold">
Create DataFrame from list of Python dictionaries</p>
```
data = [{'Deepak': 1, 'Ashish': 2}, {'Kavita': 5, 'Rajni': 10, 'Rakesh': 20}]
pd.DataFrame(data)
pd.DataFrame(data, index=['Section1', 'Section2'])
pd.DataFrame(data, columns=['Ashish', 'Rajni','Deepak'])
```
<p style="font-family: Arial; font-size:1.25em;color:#2462C0; font-style:bold">
Basic DataFrame operations</p>
```
df #Show the dataframe
df['Age']
df['HigherSalary'] = df['Sallary'] > 13000
df
Age = df.pop('Age') #Remove collumn
Age
df
del df['HigherSalary'] #Delete collmn from data frame
df
df.insert(0, 'copy1_of_Sallary', df['Sallary'])
df
df['one_upper_half'] = df['one'][:2]
df
```
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
Case Study: Movie Data Analysis</p>
<br>This notebook uses a dataset from the MovieLens website. We will describe the dataset further as we explore with it using *pandas*.
## Download the Dataset
Please note that **you will need to download the dataset**. Although the video for this notebook says that the data is in your folder, the folder turned out to be too large to fit on the edX platform due to size constraints.
Here are the links to the data source and location:
* **Data Source:** MovieLens web site (filename: ml-20m.zip)
* **Location:** https://grouplens.org/datasets/movielens/
Once the download completes, please make sure the data files are in a directory called *movielens* in your *Week-3-pandas* folder.
Let us look at the files in this dataset using the UNIX command ls.
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold">
Use Pandas to Read the Dataset<br>
</p>
<br>
In this notebook, we will be using three CSV files:
* **ratings.csv :** *userId*,*movieId*,*rating*, *timestamp*
* **tags.csv :** *userId*,*movieId*, *tag*, *timestamp*
* **movies.csv :** *movieId*, *title*, *genres* <br>
Using the *read_csv* function in pandas, we will ingest these three files.
```
movies = pd.read_csv('C:\\HenryHarvin\\Dataset\\movies.csv', sep=',')
print(type(movies))
movies.head(15)
# Timestamps represent seconds since midnight Coordinated Universal Time (UTC) of January 1, 1970
tags = pd.read_csv('C:\\HenryHarvin\\Dataset\\tags.csv', sep=',')
tags.head()
ratings = pd.read_csv('C:\\HenryHarvin\\Dataset\\ratings.csv', sep=',', parse_dates=['timestamp'])
ratings.head()
# For current analysis, we will remove timestamp (we will come back to it!)
del ratings['timestamp']
del tags['timestamp']
```
<h1 style="font-size:2em;color:#2467C0">Data Structures </h1>
<h1 style="font-size:1.5em;color:#2467C0">Series</h1>
```
#Extract 0th row: notice that it is infact a Series
row_0 = tags.iloc[0]
type(row_0)
print(row_0)
row_0.index
row_0['userId']
'rating' in row_0
row_0.name
row_0 = row_0.rename('first_row')
row_0.name
```
<h1 style="font-size:1.5em;color:#2467C0">DataFrames </h1>
```
tags.head()
tags.index
tags.columns
# Extract row 0, 11, 2000 from DataFrame
tags.iloc[ [0,11,2000] ]
```
<h1 style="font-size:2em;color:#2467C0">Descriptive Statistics</h1>
Let's look how the ratings are distributed!
```
ratings['rating'].describe() #Only Rating wil be reflected
ratings.describe() #Whole dataset will be described
ratings['rating'].mean()
ratings.mean()
ratings['rating'].min()
ratings['rating'].max()
ratings['rating'].std()
ratings['rating'].mode()
ratings.corr()
filter_1 = ratings['rating'] > 5
print(filter_1)
filter_1.any()
filter_2 = ratings['rating'] > 0
print(filter_2)
filter_2.all()
```
<h1 style="font-size:2em;color:#2467C0">Data Cleaning: Handling Missing Data</h1>
```
movies.shape
#is any row NULL ?
movies.isnull().any()
```
Thats nice ! No NULL values !
```
ratings.shape
#is any row NULL ?
ratings.isnull().any()
```
Thats nice ! No NULL values !
```
tags.shape
#is any row NULL ?
tags.isnull().any()
```
We have some tags which are NULL.
```
tags = tags.dropna()
#Check again: is any row NULL ?
tags.isnull().any()
tags.shape
```
Thats nice ! No NULL values ! Notice the number of lines have reduced.
<h1 style="font-size:2em;color:#2467C0">Data Visualization</h1>
```
%matplotlib inline
ratings.hist(column='rating', figsize=(15,10))
ratings.boxplot(column='rating', figsize=(15,20))
```
<h1 style="font-size:2em;color:#2467C0">Slicing Out Columns</h1>
```
tags['tag'].head()
movies[['title','genres']].head()
ratings[-10:]
tag_counts = tags['tag'].value_counts()
tag_counts[-10:]
tag_counts[:10].plot(kind='bar', figsize=(15,10))
```
<h1 style="font-size:2em;color:#2467C0">Filters for Selecting Rows</h1>
```
is_highly_rated = ratings['rating'] >= 4.0
ratings[is_highly_rated][30:50]
is_animation = movies['genres'].str.contains('Animation')
movies[is_animation][5:15]
movies[is_animation].head(15)
```
<h1 style="font-size:2em;color:#2467C0">Group By and Aggregate </h1>
```
ratings_count = ratings[['movieId','rating']].groupby('rating').count()
ratings_count
average_rating = ratings[['movieId','rating']].groupby('movieId').mean()
average_rating.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.tail()
```
<h1 style="font-size:2em;color:#2467C0">Merge Dataframes</h1>
```
tags.head()
movies.head()
t = movies.merge(tags, on='movieId', how='inner')
t.head()
```
More examples: http://pandas.pydata.org/pandas-docs/stable/merging.html
<p style="font-family: Arial; font-size:1.75em;color:#2462C0; font-style:bold"><br>
Combine aggreagation, merging, and filters to get useful analytics
</p>
```
avg_ratings = ratings.groupby('movieId', as_index=False).mean()
del avg_ratings['userId']
avg_ratings.head()
box_office = movies.merge(avg_ratings, on='movieId', how='inner')
box_office.tail()
is_highly_rated = box_office['rating'] >= 4.0
box_office[is_highly_rated][-5:]
is_comedy = box_office['genres'].str.contains('Comedy')
box_office[is_comedy][:5]
box_office[is_comedy & is_highly_rated][-5:]
```
<h1 style="font-size:2em;color:#2467C0">Vectorized String Operations</h1>
```
movies.head()
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
Split 'genres' into multiple columns
<br> </p>
```
movie_genres = movies['genres'].str.split('|', expand=True)
movie_genres[:10]
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
Add a new column for comedy genre flag
<br> </p>
```
movie_genres['isComedy'] = movies['genres'].str.contains('Comedy')
movie_genres[:10]
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
Extract year from title e.g. (1995)
<br> </p>
```
movies['year'] = movies['title'].str.extract('.*\((.*)\).*', expand=True)
movies.tail()
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold"><br>
More here: http://pandas.pydata.org/pandas-docs/stable/text.html#text-string-methods
<br> </p>
<h1 style="font-size:2em;color:#2467C0">Parsing Timestamps</h1>
Timestamps are common in sensor data or other time series datasets.
Let us revisit the *tags.csv* dataset and read the timestamps!
```
tags = pd.read_csv('C:\\HenryHarvin\\Dataset\\tags.csv', sep=',')
tags.dtypes
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Unix time / POSIX time / epoch time records
time in seconds <br> since midnight Coordinated Universal Time (UTC) of January 1, 1970
</p>
```
tags.head(5)
tags['parsed_time'] = pd.to_datetime(tags['timestamp'], unit='s')
tags.head(2)
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Selecting rows based on timestamps
</p>
```
greater_than_t = tags['parsed_time'] > '2015-02-01'
selected_rows = tags[greater_than_t]
tags.shape, selected_rows.shape
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Sorting the table using the timestamps
</p>
```
tags.sort_values(by='parsed_time', ascending=True)[:10]
```
<h1 style="font-size:2em;color:#2467C0">Average Movie Ratings over Time </h1>
## Are Movie ratings related to the year of launch?
```
average_rating = ratings[['movieId','rating']].groupby('movieId', as_index=False).mean()
average_rating.tail()
joined = movies.merge(average_rating, on='movieId', how='inner')
joined.head()
joined.corr()
yearly_average = joined[['year','rating']].groupby('year', as_index=False).mean()
yearly_average[:10]
yearly_average[-20:].plot(x='year', y='rating', figsize=(15,10), grid=True)
```
<p style="font-family: Arial; font-size:1.35em;color:#2462C0; font-style:bold">
Do some years look better for the boxoffice movies than others? <br><br>
Does any data point seem like an outlier in some sense?
</p>
| github_jupyter |
```
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
print(pd.__version__)
print(tf.__version__)
#@title Define the functions that build and train a model
def build_model(my_learning_rate):
"""Create and compile a simple linear regression model."""
# Most simple tf.keras models are sequential.
# A sequential model contains one or more layers.
model = tf.keras.models.Sequential()
# Describe the topography of the model.
# The topography of a simple linear regression model is a single node in a single layer.
model.add(tf.keras.layers.Dense(units = 1, input_shape = (1,)))
# Compile the model topography into code that
# TensorFlow can efficiently execute.
# Configure training to minimize the model's mean squared error.
model.compile(optimizer = tf.keras.optimizers.RMSprop(lr = my_learning_rate), loss = "mean_squared_error",
metrics = [tf.keras.metrics.RootMeanSquaredError()])
return model
## END
print("Defined create_model")
def train_model(model, feature, label, epochs, batch_size):
"""Train the model by feeding it data."""
# Feed the feature values and the label values to the model.
# The model will train for the specified number of epochs, gradually learning how the feature values relate to the label values.
history = model.fit(x = feature, y = label, batch_size = batch_size, epochs = epochs)
# Gather the trained model's weight and bias.
trained_weight = model.get_weights()[0]
trained_bias = model.get_weights()[1]
# The list of epochs is stored separately from the rest of history.
epochs = history.epoch
# Gather the history (a snapshot) of each epoch.
hist = pd.DataFrame(history.history)
# Specifically gather the model's root mean squared error at each epoch.
rmse = hist["root_mean_squared_error"]
return trained_weight, trained_bias, epochs, rmse
## END
print("Defined train_model")
def plot_the_model(trained_weight, trained_bias, feature, label):
"""Plot the trained model against the training feature and label."""
# Label the axes.
plt.xlabel("feature")
plt.ylabel("label")
# Plot the feature values vs. label values.
plt.scatter(feature, label)
# Create a red line representing the model. The red line starts
# at coordinates (x0, y0) and ends at coordinates (x1, y1).
x0 = 0
y0 = trained_bias
x1 = my_feature[-1]
y1 = trained_bias + (trained_weight * x1)
plt.plot([x0, x1], [y0, y1], c='r')
# Render the scatter plot and the red line.
plt.show()
## END
print("Defined plot_the_model")
def plot_the_loss_curve(epochs, rmse):
"""Plot the loss curve, which shows loss vs. epoch."""
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Root Mean Squared Error")
plt.plot(epochs, rmse, label="Loss")
plt.legend()
plt.ylim([rmse.min()*0.97, rmse.max()])
plt.show()
## END
print("Defined plot_the_loss_curve")
my_feature = ([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
my_label = ([5.0, 8.8, 9.6, 14.2, 18.8, 19.5, 21.4, 26.8, 28.9, 32.0, 33.8, 38.2])
learning_rate = 0.01 # alpha
# A full training pass over the entire dataset such that each example has been seen once.
# Thus, an epoch represents N/batch size training iterations, where N is the total number of examples.
epochs = 450 # try other values like 100, 200 and so on till 1000.
# The number of examples in a batch.
# For example, the batch size of SGD is 1, while the batch size of a mini-batch is usually between 10 and 1000.
# Batch size is usually fixed during training and inference; however, TensorFlow does permit dynamic batch sizes.
my_batch_size = 200
my_model = build_model(learning_rate)
trained_weight, trained_bias, epochs, rmse = train_model(my_model,
my_feature,
my_label,
epochs,
my_batch_size)
plot_the_model(trained_weight, trained_bias, my_feature, my_label)
plot_the_loss_curve(epochs, rmse)
# Most machine learning problems require a lot of hyperparameter tuning.
# Unfortunately, we can't provide concrete tuning rules for every model.
# Lowering the learning rate can help one model converge efficiently but make another model converge much too slowly.
# We must experiment to find the best set of hyperparameters for our dataset.
# That said, here are a few rules of thumb:
# Training loss should steadily decrease, steeply at first, and then more slowly until the slope of the curve reaches or approaches zero.
# If the training loss does not converge, train for more epochs.
# If the training loss decreases too slowly, increase the learning rate.
# Note that setting the learning rate too high may also prevent training loss from converging.
# If the training loss varies wildly (that is, the training loss jumps around), decrease the learning rate.
# Lowering the learning rate while increasing the number of epochs or the batch size is often a good combination.
# Setting the batch size to a very small batch number can also cause instability.
# First, try large batch size values. Then, decrease the batch size until you see degradation.
# For real-world datasets consisting of a very large number of examples, the entire dataset might not fit into memory.
# In such cases, you'll need to reduce the batch size to enable a batch to fit into memory.
# Remember: the ideal combination of hyperparameters is data dependent, so you must always experiment and verify.
```
| github_jupyter |
#### 선형 회귀분석의 기초
- 회귀 분석용 샘플 데이터
- 회귀분석용 가상 데이터 생성 방법
- 선형 회귀분석의 기초
- 확률론적 선형회귀 모형
- 레버리지와 아웃라이어
#### 회귀 분석용 샘플 데이터 소개
- 보스턴 주택 가격 데이터
- 1970년대 미국 보스턴의 주택 가격 데이터이다. load_boston() 명령으로 로드하며 다음과 같이 구성되어 있다.
- 타겟 데이터
- 1978 보스턴 주택 가격
- 506 타운의 주택 가격 중앙값 (단위 1,000 달러)
- 특징 데이터
- CRIM: 범죄율
- INDUS: 비소매상업지역 면적 비율
- NOX: 일산화질소 농도
- RM: 주택당 방 수
- LSTAT: 인구 중 하위 계층 비율
- B: 인구 중 흑인 비율
- PTRATIO: 학생/교사 비율
- ZN: 25,000 평방피트를 초과 거주지역 비율
- CHAS: 찰스강의 경계에 위치한 경우는 1, 아니면 0
- AGE: 1940년 이전에 건출된 주택의 비율
- RAD: 방사형 고속도리까지의 거리
- DIS: 직업센터의 거리
- TAX: 재산세율
- MEDV: Median value of owner-occupied homes in $1000's
```
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
dfx = pd.DataFrame(boston.data, columns = boston.feature_names)
dfy = pd.DataFrame(boston.target, columns = ['MEDV'])
df = pd.concat([dfx, dfy], axis=1)
df.tail()
df.describe()
%matplotlib inline
cols = ['LSTAT', 'NOX', 'RM', 'MEDV']
sns.pairplot(df[cols])
plt.show()
```
#### 당뇨병 데이터
- 442명의 당뇨병 환자의 검사 데이터이다. load_diabets() 명령으로 로드하며 다음과 같이 구성되어 있다.
- 타겟 데이터
- 1년 후의 당뇨병 진행도
- 특징 데이터
- 나이, 성병, BMI(body mass Index) 지수, 혈압 및 6개의 혈청 검사 수치
```
from sklearn.datasets import load_diabetes
diabetes = load_diabetes()
df = pd.concat([pd.DataFrame(diabetes.data, columns = ['x%d' % (i + 1) for i in range(diabetes.data.shape[1])]),
pd.DataFrame(diabetes.target, columns = ['target'])], axis = 1)
df.head()
df.describe()
sns.pairplot(df.ix[:, :4])
plt.show()
print(diabetes.DESCR)
```
#### 체력 검사 데이터
- 신체 상황 및 운동 능력을 측정한 데이터이다. load_linnerud() 명령으로 로드하며 다음과 같이 구성되어 있다.
- 턱걸이, 앉았다 일어나기, 점프의 세가지 운동 능력
- 체중, 허리 둘레, 맥박의 세가지 신체 상황
```
from sklearn.datasets import load_linnerud
linnerud = load_linnerud()
print(linnerud.DESCR)
df = pd.concat([pd.DataFrame(linnerud.data, columns = linnerud.feature_names),
pd.DataFrame(linnerud.target, columns=linnerud.target_names)], axis=1)
df.tail()
sns.pairplot(df.ix[:, :4])
plt.show()
```
#### 회귀분석용 가상 데이터 생성 방법
scikit-learn의 datasets 서브 패키지는 회귀분석용 가상 데이터를 생성하는 명령어인 make_regression()를 제공한다. make_regression()명령으로 만들어진 데이터는 종속 변수 y의 값이 독립변수 x벡터의 선형 조합인 선형 관계를 가진다. (b는 y절편 즉, 바이어스(bias) 값이다)
여기에 정규분포 e 만큼의 오차(disturbance)가 추가되는데 이 오차는 종속 변수 y에 영향을 미치는 요인 중에서 우리가 고려하지 않는 것들의 영향을 모두 합친것 이라고 생각하면 된다. 중심 극한 정리에 따라 이러한 모든 영향의 합은 정규분포를 따를 가능성이 높기 때문에 오차 e는 보통 기댓값이 0인 정규 분포 확률 변수로 가정한다.
make_regression()명령은 내부적으로 입력(독립 변수) 데이터인 X행렬, 오차벡터, 계수 w벡터를 확률적으로 생성한 후, 위 관계식에 따라 출력(종속 변수) 데이터 y 벡터를 계산하여 X, y 값을 출력한다.
```
X, y = make_regression(...)
X, y, w = make_regression(..., coef=True, ...)
```
- 입력 인수는 다음과 같다.
- n_samples: 정수 (옵션, 디폴트 100)
- 표본 데이터의 갯수 N
- n_features: 정수 (옵션, 디폴트 100)
- 독립 변수(feature)의 수(차원) M
- n_targets: 정수(옵션, 디폴트 1)
- 종속 변수(target)의 수(차원)
- bias: 실수 (옵션, 디폴트 0.0)
- y 절편
- noise: 실수 (옵션, 디폴트 0.0)
- 출력 즉, 종속 변수에 더해지는 오차 e의 표준 편차
- coef: 불리언 (옵션, 디폴트 False)
- True이면 선형 모형의 계수도 출력
- random_state: 정수 (옵션, 디폴트 None)
- 난수 발생용 시드값
- 출력은 다음과 같다.
- X : [n_samples, n_features] 형상의 2차원 배열
- 독립 변수의 표본 데이터 행렬 X
- y : [n_samples] 형상의 1차원 배열 또는 [n_samples, n_targets] 형상의 2차원 배열
- 종속 변수의 표본 데이터 벡터 y
- coef: [n_features] 형상의 1차원 배열 또는 [n_features, n_targets] 형상의 2차원 배열 (옵션)
- 선형 모형의 계수 벡터 w, 입력 인수 coef가 True인 경우에만 출력 됨
다음은 독립 변수가 1개, 종속 변수가 1개이며 noise 인수값이 0이므로 오차가 없는 경우이다.
```
from sklearn.datasets import make_regression
X, y, w = make_regression(n_samples = 10, n_features = 1, bias=0, noise=0,
coef=True, random_state = 0)
X
y
w
plt.scatter(X, y, s=100)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
noise 인수를 증가시키면, Var[e]가 증가하고 bias 인수를 증가시키면 y 절편 b가 증가한다
```
X, y, w = make_regression(n_samples = 50, n_features=1, bias=100, noise=10,
coef=True, random_state=0)
plt.scatter(X, y, s=100)
plt.xlabel('X')
plt.ylabel('y')
plt.show()
```
#### 연습 문제 1
1. make_regression과 같은 기능을 하는 함수 make_regression2를 만들어라.
단 make_regression2는 coef=True라고 가정한다. 즉 항상 가중치 계수를 반환한다.
또한 1차원 독립 변수만 생성할 수 있으므로 다음과 같은 인수만 가진다.
- n_samples
- bias
- noise
- random_state
2. make_regression2함수에 coef 인수를 추가하여 make_regression3함수를 만들어라. make_regression3 함수는 가중치를 스스로 생성하지 않고 coef 인수로 받은 가중치 계수 값을 그대로 사용하며 가중치 계수를 반환하지 않는다.
```
def make_regression2(n_samples, bias, noise, random_state):
from sklearn.datasets import make_regression
return make_regression(n_samples=n_samples, bias=bias , noise=noise , random_state=random_state, n_features=1 ,coef=True)
X, y, w = make_regression2(n_samples=10, bias=10, noise=10, random_state=0)
w
X, y, w = make_regression(n_samples=10, bias=10, noise=10, random_state=0, coef=True, n_features = 1)
w
def make_regression2(n_samples, bias, noise, random_state, coef):
from sklearn.datasets import make_regression
return make_regression(n_samples=n_samples, bias=bias , noise=noise , random_state=random_state, n_features=1 ,coef=True)
%matplotlib inline
def make_regression2(n_sample, bias, noise, random_state):
from sklearn.datasets import make_regression
np.random.seed(0)
X = np.random.rand(n_sample) * 100
W = np.random.rand(1) * 1
t = np.random.randn(n_sample) * noise
Y = X * W + bias + t
return X, W, Y
a, b, c = make_regression2(10, 10, 10, 1)
plt.scatter(a, c)
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Benjamindavid03/MachineLearningLab/blob/main/CRF.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Conditional Random Fields (CRF)
The bag of words (BoW) approach works well for multiple text classification problems.
This approach assumes that presence or absence of word(s) matter more than the sequence of the words. However, there are problems such as entity recognition, part of speech identification where word sequences matter as much, if not more. Conditional Random Fields (CRF) comes to the rescue here as it uses word sequences as opposed to just words.
## Part-of-Speech(POS) Tagging
In POS tagging, the goal is to label a sentence (a sequence of words or tokens) with tags like ADJECTIVE, NOUN, PREPOSITION, VERB, ADVERB, ARTICLE.
For example, given the sentence “Bob drank coffee at Starbucks”, the labeling might be “Bob (NOUN) drank (VERB) coffee (NOUN) at (PREPOSITION) Starbucks (NOUN)”.
So we will build a Conditional Random Field to label sentences with their parts of speech.
```
# install crf and nltk in python if not installed
!pip install python-crfsuite
!pip install nltk
```
NOTE: Download this file (https://raw.githubusercontent.com/Benjamindavid03/MachineLearningLab/main/reuters.xml) and upload in the notebook before proceeding.
<p>A few words about the Dataset
To train a named entity recognition model, we need some labelled data. The dataset that will be used below is the Reuters-128 dataset, which is an English corpus in the NLP Interchange Format (NIF). It contains 128 economic news articles. The dataset contains information for 880 named entities with their position in the document and a URI of a DBpedia resource identifying the entity. It was created by the Agile Knowledge Engineering and Semantic Web research group at Leipzig University, Germany. More details can be found in their paper.
In the following, we will use the XML verison of the dataset, which can be downloaded from https://github.com/AKSW/n3-collection.
</p>
# Step 1: Prepare the Dataset for Training from the XML format
In order to prepare the dataset for training, we need to label every word (or token) in the sentences to be either irrelevant or part of a named entity. Since the data is in XML format, we can make use of BeautifulSoup to parse the file and extract the data as follows:
```
from bs4 import BeautifulSoup as bs
from bs4.element import Tag
import codecs
# Read data file and parse the XML
with codecs.open("reuters.xml", "r", "utf-8") as infile:
soup = bs(infile, "html5lib")
docs = []
for elem in soup.find_all("document"):
texts = []
# Loop through each child of the element under "textwithnamedentities"
for c in elem.find("textwithnamedentities").children:
if type(c) == Tag:
if c.name == "namedentityintext":
label = "N" # part of a named entity
else:
label = "I" # irrelevant word
for w in c.text.split(" "):
if len(w) > 0:
texts.append((w, label))
docs.append(texts)
#Prepare labels as "N" representing part of a named entity and "I" for irrelevant word
docs[0]
```
# Step 2 : Generating Part-of-Speech Tags
```
import nltk
nltk.download('averaged_perceptron_tagger')
data = []
for i, doc in enumerate(docs):
# Obtain the list of tokens in the document
tokens = [t for t, label in doc]
# Perform POS tagging
tagged = nltk.pos_tag(tokens)
# Take the word, POS tag, and its label
data.append([(w, pos, label) for (w, label), (word, pos) in zip(doc, tagged)])
data[0]
```
# Step 3 : Generating Features
To train a CRF model, we need to create features for each of the tokens in the sentences. One particularly useful feature in NLP is the part-of-speech (POS) tags of the words. They indicates whether a word is a noun, a verb or an adjective. (In fact, a POS tagger is also usually a trained CRF model.)
Below are some of the commonly used features for a word w in named entity recognition:
* The word w itself (converted to lowercase for normalisation)
* The prefix/suffix of w (e.g. -ion)
* The words surrounding w, such as the previous and the next word
* Whether w is in uppercase or lowercase
* Whether w is a number, or contains digits
* The POS tag of w, and those of the surrounding words
* Whether w is or contains a special character (e.g. hypen, dollar sign)
We can use NLTK's POS tagger to generate the POS tags for the tokens in our documents as follows:
```
def word2features(doc, i):
word = doc[i][0]
postag = doc[i][1]
# Common features for all words
features = [
'bias',
'word.lower=' + word.lower(),
'word[-3:]=' + word[-3:],
'word[-2:]=' + word[-2:],
'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(),
'word.isdigit=%s' % word.isdigit(),
'postag=' + postag
]
# Features for words that are not
# at the beginning of a document
if i > 0:
word1 = doc[i-1][0]
postag1 = doc[i-1][1]
features.extend([
'-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(),
'-1:word.isdigit=%s' % word1.isdigit(),
'-1:postag=' + postag1
])
else:
# Indicate that it is the 'beginning of a document'
features.append('BOS')
# Features for words that are not
# at the end of a document
if i < len(doc)-1:
word1 = doc[i+1][0]
postag1 = doc[i+1][1]
features.extend([
'+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(),
'+1:word.isdigit=%s' % word1.isdigit(),
'+1:postag=' + postag1
])
else:
# Indicate that it is the 'end of a document'
features.append('EOS')
return features
```
# Step 4 : Training the Model
To train the model, we need to first prepare the training data and the corresponding labels. Also, to be able to investigate the accuracy of the model, we need to separate the data into training set and test set. Below are some codes for preparing the training data and test data, using the train_test_split function in scikit-learn.
```
from sklearn.model_selection import train_test_split
# A function for extracting features in documents
def extract_features(doc):
return [word2features(doc, i) for i in range(len(doc))]
# A function fo generating the list of labels for each document
def get_labels(doc):
return [label for (token, postag, label) in doc]
X = [extract_features(doc) for doc in data]
y = [get_labels(doc) for doc in data]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
```
In pycrfsuite, A CRF model in can be trained by first creating a trainer, and then submit the training data and corresponding labels to the trainer. After that, set the parameters and call train() to start the training process. For the complete list of parameters, one can refer to the documentation of CRFSuite. With the very small dataset in this example, the training with max_iterations=200 can be finished in a few seconds. Below is the code for creating the trainer and start training the model:
```
import pycrfsuite
trainer = pycrfsuite.Trainer(verbose=True)
# Submit training data to the trainer
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
# Set the parameters of the model
trainer.set_params({
# coefficient for L1 penalty
'c1': 0.1,
# coefficient for L2 penalty
'c2': 0.01,
# maximum number of iterations
'max_iterations': 200,
# whether to include transitions that
# are possible, but not observed
'feature.possible_transitions': True
})
# Provide a file name as a parameter to the train function, such that
# the model will be saved to the file when training is finished
trainer.train('crf.model')
```
# Step 5 : Checking the Results
Once we have the model trained, we can apply it on our test data and see whether it gives reasonable results. Assuming that the model is saved to a file named crf.model. The following block of code shows how we can load the model into memory, and apply it on to our test data.
```
tagger = pycrfsuite.Tagger()
tagger.open('crf.model')
y_pred = [tagger.tag(xseq) for xseq in X_test]
# Let's take a look at a random sample in the testing set
i = 12
for x, y in zip(y_pred[i], [x[1].split("=")[1] for x in X_test[i]]):
print("%s (%s)" % (y, x))
```
To study the performance of the CRF tagger trained above in a more quantitative way, we can check the precision and recall on the test data. This can be done very easily using the classification_report function in scikit-learn. However, given that the predictions are sequences of tags, we need to transform the data into a list of labels before feeding them into the function.
```
import numpy as np
from sklearn.metrics import classification_report
# Create a mapping of labels to indices
labels = {"N": 1, "I": 0}
# Convert the sequences of tags into a 1-dimensional array
predictions = np.array([labels[tag] for row in y_pred for tag in row])
truths = np.array([labels[tag] for row in y_test for tag in row])
# Print out the classification report
print(classification_report(
truths, predictions,
target_names=["I", "N"]))
```
We can see that we have achieved 98% precision and 98% recall in predicting whether a word is part of a named entity.
# References
1. https://homepages.inf.ed.ac.uk/csutton/publications/crftut-fnt.pdf
2. http://blog.echen.me/2012/01/03/introduction-to-conditional-random-fields/
3. https://albertauyeung.github.io/2017/06/17/python-sequence-labelling-with-crf.html/#prepare-the-dataset-for-training
4. https://homepages.inf.ed.ac.uk/csutton/publications/crftut-fnt.pdf
4. Lafferty, J., McCallum, A., Pereira, F. (2001). "Conditional random fields: Probabilistic models for segmenting and labeling sequence data". Proc. 18th International Conf. on Machine Learning. Morgan Kaufmann. pp. 282–289.
5. Erdogan, H. (2010). Sequence Labeling: Generative and Discriminative Approaches - Hidden Markov Models, Conditional Random Fields and Structured SVMs. ICMLA 2010 Tutorial.
| github_jupyter |
```
import pandas as pd
import scipy
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import operator
import matplotlib.pyplot as plt
def plot_cut_2mers(datafr, cut_mat) :
cut_mer2 = {}
seqs = list(datafr['seq'].values)
seqs = np.array(seqs, dtype=np.object)
total_count = np.array(datafr['total_count'])
cx = sp.coo_matrix(cut_mat)
for i,j,v in zip(cx.row, cx.col, cx.data) :
seq = seqs[i]
mer2 = seq[j-1:j+1]
if mer2 not in cut_mer2 :
cut_mer2[mer2] = 0
cut_mer2[mer2] += v
cut_mer2_sorted = sorted(cut_mer2.items(), key=operator.itemgetter(1))
mer2_list = []
mer2_vals = []
for i in range(0, len(cut_mer2_sorted)) :
mer2_list.append(cut_mer2_sorted[i][0])
mer2_vals.append(cut_mer2_sorted[i][1])
f = plt.figure(figsize=(6, 4))
plt.bar(mer2_list, mer2_vals, color='black')
plt.title('Proximal cleavage dinuc.', fontsize=14)
plt.xlabel('Dinucleotide', fontsize=14)
plt.ylabel('Read count', fontsize=14)
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
library_name = 'array_noacut_score_50'
library_version = 'unfiltered'
data = pd.read_pickle('unprocessed_data/d.pkl')
data[data.gene.str.contains('FOXP3')]
emitted_proximal_count = []
emitted_distal_count = []
emitted_total_count = []
emitted_seq = []
emitted_mask = []
emitted_lib = []
emitted_lib_index = []
emitted_sublib = []
emitted_sublib_index = []
emitted_experiment = []
emitted_gene = []
emitted_subexperiment = []
emitted_array_version = []
emitted_barcode = []
emitted_master_seq = []
proximal_cuts = sp.lil_matrix((len(data), 206)) #PAS CSE starts at 70
up_constant = 'ATGGGCTGGGAGGCCTCCTCCGAGCGGATGTACCCCGAGGACGGCGCCCTGAAGGGCGAGATCAAGCAGAGGCTGAAGCTGAAGGACGGCGGCCACTACGACGCTGAGGTCAAGACCACCTACAAGGCCAAGAAGCCCGTGCAGCTGCCCGGCGCCTACAACGTCAACATCAAGTTGGACATCACCTCCCACAACGAGGACTACACCATCGTGGAACAGTACGAACGCGCCGAGGGCCGCCACTCCACCGGCGGCATGGACGAGCTGTACAAGTCTTGATACACGACGCTCTTCCGATCT'
dn_constant = 'GGAGCAGATACTGGCTTAACTATGCGCCTCGACTGTGCCTTCTAGTTGCCAGCCATCTGTTGTTTGCCCCTCCCCCGTGCCTTCCTTGACCCTGGAAGGTGCCACTCCCACTGTCCTTTCCTAATAAAATGAGGAAATTGCATCGCATTGTCTGAGTAGGTGTCATTCTATTCTGGGGGGTGGGGTGGGGCAGGACAGCAAGGGGGAGGATTGGGAAGACAATAGCAGGCATGCTGGGGACGACGGTGCTCGAAGCAGCGCAAAACGCCTAACCCTAAGCAGATTCTTCATGCAATTGTC'
up_constant_dataframe = ''
up_constant_metadata = up_constant[-180:]
dn_constant_dataframe = dn_constant[:22]
dn_constant_metadata = dn_constant[22:22 + 120]
#Store library meta data
df_metadata = pd.DataFrame(
{
'library' : ['array'],
'library_index' : [40],
'sublibrary' : ['array'],
'sublibrary_index' : [40],
'upstream_padding' : [up_constant_metadata],
'downstream_padding' : [dn_constant_metadata]
}
)
df_metadata = df_metadata[['library', 'library_index', 'sublibrary', 'sublibrary_index', 'upstream_padding', 'downstream_padding']]
df_metadata.to_csv(library_name + '_metadata.csv', header=True, index=False, sep=',')
move_cut_to_non_a = True
cut_score_filter = 50
for index, row in data.iterrows() :
if index % 100000 == 0:
print("Read up to sequence: " + str(index))
master_seq = row['seq']
barcode = row['N20']
seq = barcode + master_seq
mask = 'N' * len(seq)
cut_mat = row['CUT_SSW']
proximal_count = 0
distal_count = 0
total_count = 0
full_seq = up_constant_dataframe + seq + dn_constant_dataframe
full_mask = ('X' * len(up_constant_dataframe)) + mask + ('X' * len(dn_constant_dataframe))
for j in range(0, cut_mat.shape[0]) :
cutpos = int(cut_mat[j, 0]) + 70
cutscore = cut_mat[j, 1]
if cutscore >= cut_score_filter :
total_count += 1
if cutpos >= 206 :
distal_count += 1
else :
proximal_count += 1
if move_cut_to_non_a :
jj_char = 0
while full_seq[cutpos - 1] == 'A' and jj_char < 2 and cutpos > 0 :
cutpos -= 1
jj_char += 1
proximal_cuts[index, cutpos] += 1
#Emit 5' Proximal PAS variant
#Sequence length 206
emitted_seq.append(full_seq)
emitted_mask.append(full_mask)
emitted_lib.append('array')
emitted_lib_index.append(40)
emitted_sublib.append('array')
emitted_sublib_index.append(40)
emitted_proximal_count.append(proximal_count)
emitted_distal_count.append(distal_count)
emitted_total_count.append(total_count)
#Extra array dimensions
emitted_experiment.append(row['exp'])
emitted_gene.append(row['gene'])
emitted_subexperiment.append(row['info'])
emitted_array_version.append(row['lib'].lower())
emitted_barcode.append(barcode)
emitted_master_seq.append(master_seq)
#Inflate dataframe
df = pd.DataFrame({'seq' : emitted_seq,
'mask' : emitted_mask,
'proximal_count' : emitted_proximal_count,
'distal_count' : emitted_distal_count,
'total_count' : emitted_total_count,
'library' : emitted_lib,
'library_index' : emitted_lib_index,
'sublibrary' : emitted_sublib,
'sublibrary_index' : emitted_sublib_index,
'experiment' : emitted_experiment,
'subexperiment' : emitted_subexperiment,
'gene' : emitted_gene,
'array_version' : emitted_array_version,
'barcode' : emitted_barcode,
'master_seq' : emitted_master_seq,
})
#Simultaneously sort dataframe and cut matrices
total_count = np.array(df['total_count'])
sort_index = np.argsort(total_count)
df = df.iloc[sort_index].reset_index(drop=True)
proximal_cuts = proximal_cuts[sort_index]
#Filter final version on read count
total_count = np.array(df['total_count'])
filter_index = np.nonzero(total_count >= 1)[0]
df = df.iloc[filter_index].reset_index(drop=True)
proximal_cuts = proximal_cuts[filter_index]
print(len(df))
print(proximal_cuts.shape[0])
#Store dataframe and cut matrices
df = df[['seq', 'mask', 'proximal_count', 'distal_count', 'total_count', 'library', 'library_index', 'sublibrary', 'sublibrary_index', 'experiment', 'subexperiment', 'gene', 'array_version', 'barcode', 'master_seq']]
df.to_csv(library_name + '_' + library_version + '.csv', header=True, index=False, sep=',')
spio.savemat(library_name + '_' + library_version + '_cuts', {'cuts' : proximal_cuts})
#Read dataframe and cut matrices
df = pd.read_csv(library_name + '_' + library_version + '.csv', delimiter=',').reset_index(drop=True)
proximal_cuts = spio.loadmat(library_name + '_' + library_version + '_cuts.mat')['cuts']
print(len(df))
plot_cut_2mers(df, proximal_cuts)
import regex as re
#Filter dataframe and cut matrices
misprime_regexes = [
re.compile(r"(AAAAAAAAAAAA){s<=1}"),
re.compile(r"(AAAAAAAAAAAAAAAA){s<=2}"),
re.compile(r"(AAAAAAAAAAAAAAAAAAAA){s<=3}")
]
keep_index = []
for index, row in df.iterrows() :
if index % 100000 == 0:
print("Read up to sequence: " + str(index))
curr_seq = row['seq']
internal_priming = False
for misprime_regex in misprime_regexes :
if re.search(misprime_regex, curr_seq) :
internal_priming = True
break
if not internal_priming :
keep_index.append(index)
df_cleaned = df.iloc[keep_index].reset_index(drop=True)
proximal_cuts_cleaned = proximal_cuts[keep_index]
print(len(df_cleaned))
print(proximal_cuts_cleaned.shape[0])
#Store dataframe and cut matrices
library_version = 'cleaned'
df_cleaned.to_csv(library_name + '_' + library_version + '.csv', header=True, index=False, sep=',')
spio.savemat(library_name + '_' + library_version + '_cuts', {'cuts' : proximal_cuts_cleaned})
#Read dataframe and cut matrices
library_version = 'cleaned'
df_cleaned = pd.read_csv(library_name + '_' + library_version + '.csv', delimiter=',').reset_index(drop=True)
proximal_cuts_cleaned = spio.loadmat(library_name + '_' + library_version + '_cuts.mat')['cuts']
print(len(df_cleaned))
plot_cut_2mers(df_cleaned, proximal_cuts_cleaned)
proximal_profile = np.ravel(proximal_cuts_cleaned.sum(axis=0))
f = plt.figure(figsize=(8, 6))
plt.plot(np.arange(len(proximal_profile)), proximal_profile, c='darkgreen', linewidth=2)
#Proximal 1
plt.axvline(x=70, linewidth=2, c='black', linestyle='--')
plt.axvline(x=70 + 6, linewidth=2, c='black', linestyle='--')
plt.axvline(x=70 + 21, linewidth=2, c='orange', linestyle='--')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('Position', fontsize=16)
plt.ylabel('Read count', fontsize=16)
plt.title('Proximal site', fontsize=16)
plt.tight_layout()
plt.show()
#Filter final version on read count
total_count = np.array(df_cleaned['total_count'])
filter_index = np.nonzero(total_count >= 10)[0]
df_final = df_cleaned.iloc[filter_index].reset_index(drop=True)
proximal_cuts_final = proximal_cuts_cleaned[filter_index]
print(len(df_final))
print(proximal_cuts_final.shape[0])
#Store dataframe and cut matrices
library_version = 'final'
df_final.to_csv(library_name + '_' + library_version + '.csv', header=True, index=False, sep=',')
spio.savemat(library_name + '_' + library_version + '_cuts', {'cuts' : proximal_cuts_final})
plot_cut_2mers(df_final, proximal_cuts_final)
#Deflate and map rows to wt sequences
library_name = 'array_noacut_score_50'
library_version = 'final2'
#Read dataframe and cut matrices
folder_path = 'processed_data/' + library_version + '/'
df = pd.read_csv(folder_path + library_name + '_' + library_version + '.csv', delimiter=',').reset_index(drop=True)
cuts = spio.loadmat(folder_path + library_name + '_' + library_version + '_cuts.mat')['cuts']
print(len(df))
def str_to_dict(dict_str) :
dict_parts = dict_str[1:-1].split(',')
inflated_dict = {}
#print(dict_str)
#print(dict_parts)
for dict_part in dict_parts :
inflated_key = dict_part.split(':')[0].strip()[1:-1]
inflated_val = dict_part.split(':')[1].strip()
if inflated_val[0] == "'" and inflated_val[-1] == "'" :
inflated_val = inflated_val[1:-1]
inflated_dict[inflated_key] = inflated_val
return inflated_dict
emitted_subexperiment = []
emitted_variant = []
emitted_sitetype = []
emitted_clinvar_id = []
emitted_in_acmg = []
emitted_significance = []
emitted_predicted_usage = []
emitted_predicted_logodds = []
for index, row in df.iterrows() :
if index % 100000 == 0:
print("Read up to sequence: " + str(index))
experiment_dict = row['subexperiment']
experiment = row['experiment']
subexp = ''
variant = ''
sitetype = 'Missing'
clinvar_id = 'Missing'
in_acmg = False
significance = 'Missing'
predicted_usage = -np.nan
predicted_logodds = -np.nan
if experiment in ['max_iso', 'max_cut'] :
experiment_dict = str_to_dict(experiment_dict)
subexp_parts = experiment_dict['tag'].split('_')
variant = subexp_parts[-1]
if variant == 'consensus' :
variant = 'wt'
subexp = '_'.join(subexp_parts[:-1])
sitetype = 'UTR3'
clinvar_id = 'Missing'
in_acmg = False
significance = 'Missing'
predicted_usage = round(float(experiment_dict['predicted_usage']), 4)
predicted_logodds = round(float(experiment_dict['predicted_logodds']), 4)
elif experiment in ['acmg_apadb', 'acmg_polyadb', 'clinvar_mut', 'sensitive_genes'] :
experiment_dict = str_to_dict(experiment_dict)
subexp = row['gene']
variant = experiment_dict['variant']
if variant == 'mutant' :
variant = 'snv'
if variant == 'wildtype' :
variant = 'wt'
sitetype = 'Missing'
clinvar_id = 'Missing'
in_acmg = True
if experiment in ['clinvar_mut', 'sensitive_genes'] :
in_acmg = False
significance = 'Missing'
elif experiment in ['clinvar_wt'] :
experiment_dict = str_to_dict(experiment_dict)
subexp = row['gene']
variant = 'wt'
sitetype = experiment_dict['var_region']
clinvar_id = 'Missing'
in_acmg = experiment_dict['in_acmg']
if in_acmg == 'Yes' :
in_acmg = True
else :
in_acmg = False
significance = 'Missing'
elif experiment in ['human variant'] :
subexp = row['gene']
exp_parts = experiment_dict.split(':')
subexp = exp_parts[0]
sitetype = exp_parts[1]
variant = exp_parts[2]
if variant == 'SNP' :
variant = 'snv'
variant = variant.lower()
in_acmg = exp_parts[3]
if in_acmg == 'Yes' :
in_acmg = True
else :
in_acmg = False
significance = exp_parts[5]
clinvar_id = ':'.join(exp_parts[9:])
elif experiment in ['intronic_pas'] :
subexp = row['gene']
variant = 'wt'
sitetype = 'Intron'
clinvar_id = 'Missing'
in_acmg = False
significance = 'Missing'
elif experiment in ['tgta'] :
clinvar_id = experiment_dict
experiment_dict = str_to_dict(experiment_dict.split(',')[0] + '}')
subexp = 'n=' + experiment_dict['n']
variant = 'tgta'
sitetype = 'UTR3'
in_acmg = False
significance = 'Missing'
else :
print('ERROR! Unmapped experiment: ' + str(experiment))
break
emitted_subexperiment.append(subexp)
emitted_variant.append(variant)
emitted_sitetype.append(sitetype)
emitted_clinvar_id.append(clinvar_id)
emitted_in_acmg.append(in_acmg)
emitted_significance.append(significance)
emitted_predicted_usage.append(predicted_usage)
emitted_predicted_logodds.append(predicted_logodds)
df['subexperiment'] = emitted_subexperiment
df['variant'] = emitted_variant
df['sitetype'] = emitted_sitetype
df['clinvar_id'] = emitted_clinvar_id
df['in_acmg'] = emitted_in_acmg
df['significance'] = emitted_significance
df['predicted_usage'] = emitted_predicted_usage
df['predicted_logodds'] = emitted_predicted_logodds
exp_subexp_wt_dict = {}
exp_subexp_wt_visited = {}
wt_mapping = {}
unique_experiments = df['experiment'].unique()
for experiment in unique_experiments :
wt_mapping[experiment] = [experiment]
wt_mapping['clinvar_wt'] = wt_mapping['clinvar_wt'] + ['clinvar_mut', 'human variant', 'sensitive_genes', 'acmg_apadb', 'acmg_polyadb']
wt_mapping['acmg_apadb'] = wt_mapping['acmg_apadb'] + ['acmg_polyadb', 'human variant', 'sensitive_genes']
wt_mapping['acmg_polyadb'] = wt_mapping['acmg_polyadb'] + ['acmg_apadb', 'human variant', 'sensitive_genes']
for index, row in df.iterrows() :
master_seq = row['master_seq']
varexp = row['experiment']
subexp = row['subexperiment']
for exp in wt_mapping[varexp] :
if exp not in exp_subexp_wt_dict :
exp_subexp_wt_dict[exp] = {}
exp_subexp_wt_visited[exp] = {}
if subexp not in exp_subexp_wt_dict[exp] :
exp_subexp_wt_dict[exp][subexp] = []
exp_subexp_wt_visited[exp][subexp] = {}
if row['variant'] == 'wt' and master_seq not in exp_subexp_wt_visited[exp][subexp] :
exp_subexp_wt_dict[exp][subexp].append(master_seq)
exp_subexp_wt_visited[exp][subexp][master_seq] = True
def hamming_distance(seq1, seq2) :
dist = 0
for j in range(0, len(seq1)) :
if seq1[j] != seq2[j] :
dist += 1
return dist
#Map variants to wt sequences
mapped_wt_seqs = []
for index, row in df.iterrows() :
if index % 100000 == 0:
print("Read up to sequence: " + str(index))
master_seq = row['master_seq']
exp = row['experiment']
subexp = row['subexperiment']
candidate_wt_seqs = exp_subexp_wt_dict[exp][subexp]
wt_seq = 'Unmapped'
if row['variant'] == 'wt' :
wt_seq = master_seq
else :
hamming_limit = 50
if row['variant'] == 'snv' :
hamming_limit = 1
#Do hamming search (limited linear scan)
min_hamming_wt_dist = 1000
min_hamming_wt_seq = 'Unmapped'
for candidate_wt_seq in candidate_wt_seqs :
hamming_dist = hamming_distance(master_seq, candidate_wt_seq)
if hamming_dist < min_hamming_wt_dist :
min_hamming_wt_dist = hamming_dist
min_hamming_wt_seq = candidate_wt_seq
if min_hamming_wt_dist <= hamming_limit :
wt_seq = min_hamming_wt_seq
mapped_wt_seqs.append(wt_seq)
df['wt_seq'] = mapped_wt_seqs
def get_snv_position(row) :
var_seq = row['master_seq']
ref_seq = row['wt_seq']
if ref_seq == 'Unmapped' or row['variant'] != 'snv' :
return -1
for i in range(0, len(var_seq)) :
if var_seq[i] != ref_seq[i] :
return i
df['snv_pos'] = df.apply(get_snv_position, axis=1)
#Store dataframe and cut matrices
library_version = 'final2_mapped'
df.to_csv(library_name + '_' + library_version + '.csv', header=True, index=False, sep=',')
unique_experiments = df['experiment'].unique()
unmapped_count_dict = {}
total_count_dict = {}
for experiment in unique_experiments :
total_count_dict[experiment] = len(df.query("experiment == '" + experiment + "'"))
unmapped_count_dict[experiment] = len(df.query("experiment == '" + experiment + "' and wt_seq == 'Unmapped'"))
print(experiment)
print(' Unmapped count = ' + str(unmapped_count_dict[experiment]))
print(' Total count = ' + str(total_count_dict[experiment]))
print(' Unmapped fraction = ' + str(round(float(unmapped_count_dict[experiment]) / float(total_count_dict[experiment]), 2)))
library_name = 'array_noacut_score_50'
library_version = 'final2'
#Read dataframe and cut matrices
folder_path = 'processed_data/' + library_version + '/'
df = pd.read_csv(folder_path + library_name + '_' + library_version + '_mapped.csv', delimiter=',').reset_index(drop=True)
cuts = spio.loadmat(folder_path + library_name + '_' + library_version + '_cuts.mat')['cuts']
print(len(df))
#Estimate observed measures per barcode
pPas_cut_start = 77
pPas_cut_end = 107#105#125
pPas_count = np.ravel(sp.csc_matrix(cuts)[:, pPas_cut_start:pPas_cut_end].sum(axis=1))
pseudo_count = 0.0
df['pPas_count'] = pPas_count
df['pPas_usage'] = (df['pPas_count'] + pseudo_count) / (df['total_count'] + pseudo_count)
df['pPas_logodds'] = np.log(df['pPas_usage'] / (1.0 - df['pPas_usage']))
df['pPas_logodds_isnan'] = df['pPas_logodds'].apply(np.isnan)
df['pPas_logodds_isinf'] = df['pPas_logodds'].apply(np.isinf)
pPas_cuts = sp.csr_matrix(sp.csc_matrix(cuts)[:, pPas_cut_start:pPas_cut_end]) / sp.csc_matrix(cuts)[:, pPas_cut_start:pPas_cut_end].sum(axis=1)
df['pPas_avg_cut'] = np.ravel(np.sum(pPas_cuts * np.arange(pPas_cuts.shape[1]).reshape((-1, 1)), axis=1))
def filter_dataframe(df, cuts, total_count_filter=100, drop_nans=True) :
filter_query = "total_count >= " + str(total_count_filter)
if drop_nans :
filter_query += " and pPas_logodds_isnan == False and pPas_logodds_isinf == False"
valid_pPas_df = df.query(filter_query).copy()
keep_index = np.ravel(valid_pPas_df.index)
valid_pPas_cuts = cuts[keep_index, :]
valid_pPas_df = valid_pPas_df.reset_index(drop=True)
print('Dataframe size = ' + str(len(valid_pPas_df)))
return valid_pPas_df, valid_pPas_cuts
def aggregate_dataframe(df, cuts, groupby_list) :
print('Aggregating with groupby = ' + str(groupby_list))
pseudo_count = 0.0
df_copy = df.copy().reset_index(drop=True)
df_group = df_copy.groupby(groupby_list)
df_copy['n_barcodes'] = df_group['barcode'].transform( lambda x : x.count() )
df_copy['pooled_pPas_count'] = df_group['pPas_count'].transform( lambda x : x.sum() )
df_copy['pooled_proximal_count'] = df_group['proximal_count'].transform( lambda x : x.sum() )
df_copy['pooled_distal_count'] = df_group['distal_count'].transform( lambda x : x.sum() )
df_copy['pooled_total_count'] = df_group['total_count'].transform( lambda x : x.sum() )
df_copy['pooled_pPas_usage'] = (df_copy['pooled_pPas_count'] + pseudo_count) / (df_copy['pooled_total_count'] + pseudo_count)
df_copy['pooled_pPas_logodds'] = np.log(df_copy['pooled_pPas_usage'] / (1.0 - df_copy['pooled_pPas_usage']))
df_copy['std_pooled_pPas_usage'] = (df_copy['pooled_pPas_usage'] - df_copy['pPas_usage'])**2
df_copy['std_pooled_pPas_logodds'] = (df_copy['pooled_pPas_logodds'] - df_copy['pPas_logodds'])**2
df_group = df_copy.groupby(groupby_list)
df_copy['std_pooled_pPas_usage'] = df_group['std_pooled_pPas_usage'].transform( lambda x : x.sum() )
df_copy['std_pooled_pPas_usage'] = np.sqrt(df_copy['std_pooled_pPas_usage'] / (df_copy['n_barcodes'] - 1.))
df_copy['std_pooled_pPas_logodds'] = df_group['std_pooled_pPas_logodds'].transform( lambda x : x.sum() )
df_copy['std_pooled_pPas_logodds'] = np.sqrt(df_copy['std_pooled_pPas_logodds'] / (df_copy['n_barcodes'] - 1.))
df_copy['avg_pPas_count'] = df_group['pPas_count'].transform( lambda x : x.mean() )
df_copy['avg_total_count'] = df_group['total_count'].transform( lambda x : x.mean() )
df_copy['avg_pPas_usage'] = df_group['pPas_usage'].transform( lambda x : x.mean() )
df_copy['avg_pPas_logodds'] = df_group['pPas_logodds'].transform( lambda x : x.mean() )
df_copy['avg_pPas_avg_cut'] = df_group['pPas_avg_cut'].transform( lambda x : x.mean() )
df_copy['std_pPas_usage'] = df_group['pPas_usage'].transform( lambda x : x.std() )
df_copy['std_pPas_logodds'] = df_group['pPas_logodds'].transform( lambda x : x.std() )
df_copy['std_pPas_avg_cut'] = df_group['pPas_avg_cut'].transform( lambda x : x.std() )
print('Calculated statistics.')
#Create grouped dataframe
picked_keys = {}
keep_index = []
for index, row in df_copy.iterrows() :
row_key = ''
for key_part in groupby_list :
row_key += '_' + str(row[key_part])
if row_key not in picked_keys :
picked_keys[row_key] = len(picked_keys)
keep_index.append(index)
df_grouped = df_copy.loc[keep_index].copy().reset_index(drop=True)
drop_columns = ['barcode', 'pPas_count', 'total_count', 'pPas_usage', 'pPas_logodds', 'pPas_avg_cut']
if 'array_version' not in groupby_list :
drop_columns.append('array_version')
df_grouped = df_grouped.drop(columns=drop_columns)
print('Prepared grouped dataframe.')
#Aggregate cut matrices
cuts_dense = np.array(cuts.todense())
cleavage = cuts_dense / (np.reshape(np.sum(cuts_dense, axis=1), (-1, 1)) + np.reshape(np.ravel(df_copy['distal_count']), (-1, 1)))
pooled_counts = np.zeros((len(df_grouped), cuts_dense.shape[1]))
mean_cleavage = np.zeros((len(df_grouped), cuts_dense.shape[1]))
n_barcodes = np.reshape(np.ravel(df_grouped['n_barcodes']), (-1, 1))
for index, row in df_copy.iterrows() :
if index % 100000 == 0 :
print('Processing cuts for barcode ' + str(index))
row_key = ''
for key_part in groupby_list :
row_key += '_' + str(row[key_part])
index_grouped = picked_keys[row_key]
pooled_counts[index_grouped, :] += cuts_dense[index, :]
mean_cleavage[index_grouped, :] += cleavage[index, :]
mean_cleavage /= n_barcodes
pooled_cleavage = pooled_counts / (np.reshape(np.sum(pooled_counts, axis=1), (-1, 1)) + np.reshape(np.ravel(df_grouped['pooled_distal_count']), (-1, 1)))
print('Re-counted matrices.')
return df_grouped, sp.csr_matrix(pooled_counts), sp.csr_matrix(pooled_cleavage), sp.csr_matrix(mean_cleavage)
#Multiplex filter and aggregate for different conditions
for total_count_filter in [100, 10] :
for drop_nans in [True, False] :
valid_pPas_df, valid_pPas_cuts = filter_dataframe(df, cuts, total_count_filter=total_count_filter, drop_nans=drop_nans)
seq_ver_df, seq_ver_pooled_counts, seq_ver_pooled_cleavage, seq_ver_mean_cleavage = aggregate_dataframe(valid_pPas_df, valid_pPas_cuts, ['array_version', 'master_seq'])
seq_df, seq_pooled_counts, seq_pooled_cleavage, seq_mean_cleavage = aggregate_dataframe(valid_pPas_df, valid_pPas_cuts, ['master_seq'])
#Store dataframe and cut matrices
grouped_version = library_version + '_grouped_seq_ver_count_' + str(total_count_filter) + '_dropnans_' + str(drop_nans)
seq_ver_df.to_csv('array_' + grouped_version + '.csv', header=True, index=False, sep=',')
spio.savemat('array_' + grouped_version + '_cuts',
{
'pooled_cuts' : seq_ver_pooled_counts,
'pooled_distribution' : seq_ver_pooled_cleavage,
'mean_distribution' : seq_ver_mean_cleavage
})
grouped_version = library_version + '_grouped_seq_count_' + str(total_count_filter) + '_dropnans_' + str(drop_nans)
seq_df.to_csv('array_' + grouped_version + '.csv', header=True, index=False, sep=',')
spio.savemat('array_' + grouped_version + '_cuts',
{
'pooled_cuts' : seq_pooled_counts,
'pooled_distribution' : seq_pooled_cleavage,
'mean_distribution' : seq_mean_cleavage
})
library_name = 'array_noacut_score_50'
library_version = 'unfiltered'
#Read dataframe and cut matrices
folder_path = 'processed_data/' + library_version + '/'
df = pd.read_csv(folder_path + library_name + '_' + library_version + '_mapped.csv', delimiter=',').reset_index(drop=True)
print(len(df))
import regex as re
#Filter dataframe and cut matrices
misprime_regexes = [
('misprime_11_of_12', re.compile(r"(AAAAAAAAAAAA){s<=1}")),
('misprime_13_of_16', re.compile(r"(AAAAAAAAAAAAAAAA){s<=3}")),
('misprime_16_of_20', re.compile(r"(AAAAAAAAAAAAAAAAAAAA){s<=4}")),
('misprime_10_of_12', re.compile(r"(AAAAAAAAAAAA){s<=2}")),
('misprime_12_of_16', re.compile(r"(AAAAAAAAAAAAAAAA){s<=4}")),
('misprime_15_of_20', re.compile(r"(AAAAAAAAAAAAAAAAAAAA){s<=5}"))
]
misprime_result_dict = {
'misprime_11_of_12' : [],
'misprime_13_of_16' : [],
'misprime_16_of_20' : [],
'misprime_10_of_12' : [],
'misprime_12_of_16' : [],
'misprime_15_of_20' : []
}
for index, row in df.iterrows() :
if index % 100000 == 0:
print("Read up to sequence: " + str(index))
curr_seq = row['seq']
internal_priming = False
for misprime_id, misprime_regex in misprime_regexes :
if re.search(misprime_regex, curr_seq) :
misprime_result_dict[misprime_id].append(True)
else :
misprime_result_dict[misprime_id].append(False)
for misprime_id in misprime_result_dict :
df[misprime_id] = misprime_result_dict[misprime_id]
#Store misprime-marked dataframe and cut matrices
df.to_csv(folder_path + library_name + '_' + library_version + '_misprime_mapped.csv', header=True, index=False, sep=',')
```
| github_jupyter |

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/Transformations/transformations-of-objects-and-shapes.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# Transformations: Positions and Motions of Objects and Shapes
## Instructions - from the "Kernel" menu, select "Restart & Run All" to begin.
```
%%html
<button onclick="run_all()">Run All Cells</button>
<script>
function run_all(){
Jupyter.actions.call('jupyter-notebook:run-all-cells-below');
Jupyter.actions.call('jupyter-notebook:save-notebook');
}
</script>
%%html
<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){ code_shown=false; $('div.input').hide() });
</script>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
```
## Introduction
In this notebook we will be describing and analyzing the position and motion of objects and shapes in a 2-Dimensional space. To examine position and motion we will look at the **Translations**, **Reflections** and **Rotations** of objects and shapes in space. **Translations** are the change in position of an object or shape. **Reflections** are the mirror image of an object or shape with some axis on a graph. **Rotations** are an object or shape being rotated around a central point from 0 to 360 degrees.
## Translations: Objects and Shapes in 2D Space
To analyze and describe objects and shapes in 2-Dimensional space we will use a **Coordinate Plane** with <b>X</b> and <b>Y</b> axes ranging from -10 to +10. When we translate an object or shape we are moving its position to another spot on the coordinate plane without changing the actual object or or its orientation. We will start off with a few examples showing how the translation of a rectangle works.
When we translate an object we are moving its position on the X and Y axis of the coordinate plane. When we refer to an object we are referring to a collection of points in the coordinate plane. Each point has an X and Y value that defines its position commonly written as (X, Y). To translate an object on the X axis we are adding or subtracting the same amount from every point that makes up an object.
> For example we will look at the point (4,1)
>
> We will translate the point +5 along the X axis and -3 along the Y axis
>
> To translate the point we need to add the amount we are translating to the coordinates of the point.
>
> (4+5,1-3) = (9,-2)
>
> The new coordinates of the translated point are (9,-2)
### Interactive Examples
The coordinate plane below allows you to translate an object's position on the X axis by moving the slider. From the object's current X position you can translate it back or forth by adding the value listed on the slider. Each value on the slider is the amount added to the original X position of the object. The original position of the object will remain highlighted in <span style="color:#00FF00"> green </span> and its translation will be highlighted in <span style="color:#00CED1"> blue</span>.
```
from plotly.offline import init_notebook_mode, iplot, plot
from ipywidgets import HBox
import plotly.graph_objs as go
import numpy as np
import random
from math import radians, cos, sin, sqrt, tan, atan
import ipywidgets as widgets
from IPython.display import Markdown
from IPython.display import HTML, clear_output
init_notebook_mode(connected=True)
square_clicked = True
triangle_clicked = False
start = False
square_button = widgets.Button(
description='Square',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
triangle_button = widgets.Button(
description='Triangle',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
text_widget = widgets.HTML("<strong>Object: </strong>")
input_widget = widgets.HBox(children=[text_widget, square_button, triangle_button])
def graph_square():
data = []
data.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
data.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
steps = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'x[0]':[1,-10+i],'x[1]':[1,-10+i],'x[2]':[4,-7+i],'x[3]':[4,-7+i],'x[4]':[1,-10+i]}],
label = temp
)
steps.append(step)
sliders = [dict(
active = 11,
currentvalue = {"prefix": "X axis translation: ", },
pad = {"t": 35},
steps = steps
)]
layout = go.Layout(
title = 'Translation on the X Axis',
sliders=sliders,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
#xanchor = 'left',
#yanchor = 'top',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
#autosize=False,
#width=950,
#height=650,
)
fig = dict(data=data, layout=layout)
clear_output(wait=True)
display(input_widget)
#iplot(fig, filename = 'filename')
return fig
def graph_triangle():
dataT = []
dataT.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
dataT.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
stepsT = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'x[0]':[1,-10+i],'x[1]':[2.5,-8.5+i],'x[2]':[4,-7+i],'x[3]':[1,-10+i]}],
label = temp
)
stepsT.append(step)
slidersT = [dict(
active = 11,
currentvalue = {"prefix": "X axis translation: ", },
pad = {"t": 35},
steps = stepsT
)]
layoutT = go.Layout(
title = 'Translation on the X Axis',
sliders=slidersT,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
#autosize=False,
#width=950,
#height=650,
)
figT = dict(data=dataT, layout=layoutT)
clear_output(wait=True)
display(input_widget)
# Plot and embed in ipython notebook!
iplot(figT, filename='basic-scatter')
def square_update(change):
global square_clicked
global triangle_clicked
if(not(square_clicked)):
square_clicked = True
triangle_clicked = False
square_button.button_style = 'success'
triangle_button.button_style = ''
graph_square()
def triangle_update(change):
global square_clicked
global triangle_clicked
if(not(triangle_clicked)):
triangle_clicked = True
square_clicked = False
square_button.button_style = ''
triangle_button.button_style = 'success'
graph_triangle()
square_button.on_click(square_update)
triangle_button.on_click(triangle_update)
fig = graph_square()
iplot(fig)
```
The coordinate plane below allows you to translate an object's position on the Y axis by moving the slider. From the object's current Y position you can translate it up or down by adding the value listed on the slider. Each value on the slider is the amount added to the original Y position of the object. The original position of the object will remain highlighted in <span style="color:#00FF00"> green </span> and its translation will be highlighted in <span style="color:#00CED1"> blue</span>.
```
square_clicked1 = True
triangle_clicked1 = False
square_button1 = widgets.Button(
description='Square',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
triangle_button1 = widgets.Button(
description='Triangle',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
text_widget1 = widgets.HTML("<strong>Object: </strong>")
input_widget1 = widgets.HBox(children=[text_widget1, square_button1, triangle_button1])
def graph_square1():
init_notebook_mode(connected=True)
data1 = []
data1.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
data1.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
steps1 = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'y[0]':[1,-7+i],'y[1]':[4,-10+i],'y[2]':[4,-10+i],'y[3]':[1,-7+i],'y[4]':[1,-7+i]}],
label = temp
)
steps1.append(step)
sliders1 = [dict(
active = 11,
currentvalue = {"prefix": "Y axis translation: ", },
pad = {"t": 35},
steps = steps1
)]
layout1 = go.Layout(
title = 'Translation on the Y Axis',
sliders=sliders1,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
fig1 = dict(data=data1, layout=layout1)
clear_output(wait=True)
display(input_widget1)
iplot(fig1, filename='basic-scatter')
def graph_triangle1():
init_notebook_mode(connected=True)
dataT1 = []
dataT1.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
dataT1.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
stepsT1 = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'y[0]':[1,-10+i],'y[1]':[4,-7+i],'y[2]':[1,-10+i],'y[3]':[1,-10+i]}],
label = temp
)
stepsT1.append(step)
slidersT1 = [dict(
active = 11,
currentvalue = {"prefix": "Y axis translation: ", },
pad = {"t": 35},
steps = stepsT1
)]
layoutT1 = go.Layout(
title = 'Translation on the Y Axis',
sliders=slidersT1,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
figT1 = dict(data=dataT1, layout=layoutT1)
clear_output(wait=True)
display(input_widget1)
iplot(figT1, filename='basic-scatter')
def square_update1(change):
global square_clicked1
global triangle_clicked1
if(not(square_clicked1)):
square_clicked1 = True
triangle_clicked1 = False
square_button1.button_style = 'success'
triangle_button1.button_style = ''
graph_square1()
def triangle_update1(change):
global square_clicked1
global triangle_clicked1
if(not(triangle_clicked1)):
triangle_clicked1 = True
square_clicked1 = False
square_button1.button_style = ''
triangle_button1.button_style = 'success'
graph_triangle1()
square_button1.on_click(square_update1)
triangle_button1.on_click(triangle_update1)
graph_square1()
```
The coordinate plane below allows you to translate an object's position on the X and Y axes by moving the two sliders. From the object's original X position you can move it back or forth by adding the value listed on the top slider. From the object's original Y position you can move it up or down by adding the value listed on the bottom slider. The original position of the object will remain highlighted in <span style="color:#00FF00"> green </span> and its translation will be highlighted in <span style="color:#00CED1"> blue</span>.
```
square_clicked2 = True
triangle_clicked2 = False
square_button2 = widgets.Button(
description='Square',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
triangle_button2 = widgets.Button(
description='Triangle',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
text_widget2 = widgets.HTML("<strong>Object: </strong>")
input_widget2 = widgets.HBox(children=[text_widget2, square_button2, triangle_button2])
def graph_square2():
init_notebook_mode(connected=True)
data2 = []
data2.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
data2.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
steps2 = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'x[0]':[1,-10+i],'x[1]':[1,-10+i],'x[2]':[4,-7+i],'x[3]':[4,-7+i],'x[4]':[1,-10+i]}],
label = temp
)
steps2.append(step)
steps3 = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'y[0]':[1,-7+i],'y[1]':[4,-10+i],'y[2]':[4,-10+i],'y[3]':[1,-7+i],'y[4]':[1,-7+i]}],
label = temp
)
steps3.append(step)
sliders2 = [dict(
active = 11,
currentvalue = {"prefix": "X axis translation: ", },
pad = {"t": 35},
steps = steps2
)]
tempSlider = dict(
active = 11,
currentvalue = {"prefix": "Y axis translation: ", },
pad = {"t": 120},
steps = steps3
)
sliders2.append(tempSlider)
layout2 = go.Layout(
title = 'Translation on the Coordinate Plane',
sliders=sliders2,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
autosize=False,
width=950,
height=650,
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
fig2 = dict(data=data2, layout=layout2)
clear_output(wait=True)
display(input_widget2)
iplot(fig2, filename='basic-scatter')
def graph_triangle2():
init_notebook_mode(connected=True)
dataT2 = []
dataT2.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
dataT2.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
stepsT2 = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'x[0]':[1,-10+i],'x[1]':[2.5,-8.5+i],'x[2]':[4,-7+i],'x[3]':[1,-10+i]}],
label = temp
)
stepsT2.append(step)
stepsT3 = []
for i in range(18):
if((i-11) > 0):
temp = "+" + str(i-11)
else:
temp = str(i-11)
step = dict(
method = 'restyle',
args = [{'y[0]':[1,-10+i],'y[1]':[4,-7+i],'y[2]':[1,-10+i],'y[3]':[1,-10+i]}],
label = temp
)
stepsT3.append(step)
slidersT2 = [dict(
active = 11,
currentvalue = {"prefix": "X axis translation: ", },
pad = {"t": 35},
steps = stepsT2
)]
tempSlider = dict(
active = 11,
currentvalue = {"prefix": "Y axis translation: ", },
pad = {"t": 120},
steps = stepsT3
)
slidersT2.append(tempSlider)
layoutT2 = go.Layout(
title = 'Translation on the Coordinate Plane',
sliders=slidersT2,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
autosize=False,
width=950,
height=650,
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
figT2 = dict(data=dataT2, layout=layoutT2)
clear_output(wait=True)
display(input_widget2)
iplot(figT2, filename='basic-scatter')
def square_update2(change):
global square_clicked2
global triangle_clicked2
if(not(square_clicked2)):
square_clicked2 = True
triangle_clicked2 = False
square_button2.button_style = 'success'
triangle_button2.button_style = ''
graph_square2()
def triangle_update2(change):
global square_clicked2
global triangle_clicked2
if(not(triangle_clicked2)):
triangle_clicked2 = True
square_clicked2 = False
square_button2.button_style = ''
triangle_button2.button_style = 'success'
graph_triangle2()
square_button2.on_click(square_update2)
triangle_button2.on_click(triangle_update2)
graph_square2()
```
The coordinate plane below allows you to translate an object's position on the X and Y axes by entering the amount you want to translate it on the inputs below. This interactive example allows you to continually translate the object from its translated position. This means that the original position becomes the previous translated position once new coordinates have been entered. As above the original position of the object will remain highlighted in <span style="color:#00FF00"> green </span> and its translation will be highlighted in <span style="color:#00CED1"> blue</span>.
```
square_clickedTr = True
triangle_clickedTr = False
initialX = [1, 1, 4, 4, 1]
initialY = [1, 4, 4, 1, 1]
init_notebook_mode(connected=True)
square_buttonTr = widgets.Button(
description='Square',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
triangle_buttonTr = widgets.Button(
description='Triangle',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
text_widgetTr = widgets.HTML("<strong>Object: </strong>")
x_translation = widgets.Text(
value='0',
placeholder='',
description='',
disabled=False
)
y_translation = widgets.Text(
value='0',
placeholder='',
description='',
disabled=False
)
input_button = widgets.Button(
description='Translate',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
error_output = widgets.HTML("")
x_text = widgets.HTML("<strong>X Translation: </strong>")
y_text = widgets.HTML("<strong>Y Translation: </strong>")
object_widgetTr = widgets.HBox(children=[text_widgetTr, square_buttonTr, triangle_buttonTr])
submit_output = widgets.HBox(children=[x_text, x_translation, y_text, y_translation, input_button])
input_widgetTr = widgets.VBox(children=[object_widgetTr, submit_output, error_output])
x_translation.layout.width = '100px'
y_translation.layout.width = '100px'
def translationSq(xT,yT,xTo,yTo):
dataSq = []
dataSq.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = xTo,#[1, 2.5, 4, 1],
y = yTo))#[1, 4, 1, 1]))
dataSq.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = xT,#[1, 2.5, 4, 1],
y = yT))#[1, 4, 1, 1]))
layoutSq = go.Layout(
title = 'Translation on the Coordinate Plane',
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
autosize=True,
#width=750,
#height=725,
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
figSq = dict(data=dataSq, layout=layoutSq)
clear_output(wait=True)
display(input_widgetTr)
iplot(figSq, filename = 'filename')
def translationTr(xT,yT,xTo,yTo):
dataTr = []
dataTr.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = xTo,#[1, 2.5, 4, 1],
y = yTo))#[1, 4, 1, 1]))
dataTr.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = xT,#[1, 2.5, 4, 1],
y = yT))#[1, 4, 1, 1]))
layoutTr = go.Layout(
title = 'Translation on the Coordinate Plane',
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
autosize=True,
#width=750,
#height=725,
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
figTr = dict(data=dataTr, layout=layoutTr)
clear_output(wait=True)
display(input_widgetTr)
iplot(figTr, filename = 'filename')
def update_graph(change):
global initialX
global initialY
global square_clickedTr
global triangle_clickedTr
error = False
try:
translationX = float(x_translation.value)
translationY = float(y_translation.value)
except ValueError:
error_output.value = "Those were not translation numbers. Please enter valid numbers and submit again"
error = True
x_translation.value = '0'
y_translation.value = '0'
if(not(error)):
if((initialX[2] + translationX) > 10):
error_output.value = "The X translation is too high."
error = True
elif((initialX[0] + translationX) < -10):
error_output.value = "The X translation is too low."
error = True
elif((initialY[1] + translationY) > 10):
error_output.value = "The Y translation is too high."
error = True
elif((initialY[0] + translationY) < -10):
error_output.value = "The Y translation is too low."
error = True
if(not(error)):
error_output.value = ""
update_X = initialX[:]
update_Y = initialY[:]
for i in range(len(initialX)):
update_X[i] = update_X[i] + translationX
update_Y[i] = update_Y[i] + translationY
if(square_clickedTr):
translationSq(update_X,update_Y, initialX, initialY)
elif(triangle_clickedTr):
translationTr(update_X,update_Y, initialX, initialY)
initialX = update_X[:]
initialY = update_Y[:]
def square_updateTr(change):
global square_clickedTr
global triangle_clickedTr
global initialX
global initialY
if(not(square_clickedTr)):
square_clickedTr = True
triangle_clickedTr = False
initialX = [1, 1, 4, 4, 1]
initialY = [1, 4, 4, 1, 1]
square_buttonTr.button_style = 'success'
triangle_buttonTr.button_style = ''
clear_output(wait=True)
display(input_widgetTr)
x_translation.value = '0'
y_translation.value = '0'
translationSq(initialX, initialY, initialX, initialY)
def triangle_updateTr(change):
global square_clickedTr
global triangle_clickedTr
global initialX
global initialY
if(not(triangle_clickedTr)):
triangle_clickedTr = True
square_clickedTr = False
initialX = [1, 2.5, 4, 1]
initialY = [1, 4, 1, 1]
square_buttonTr.button_style = ''
triangle_buttonTr.button_style = 'success'
clear_output(wait=True)
display(input_widgetTr)
x_translation.value = '0'
y_translation.value = '0'
translationTr(initialX, initialY, initialX, initialY)
square_buttonTr.on_click(square_updateTr)
triangle_buttonTr.on_click(triangle_updateTr)
input_button.on_click(update_graph)
translationSq(initialX, initialY, initialX, initialY)
#input_widgetTr
```
## Reflections: Objects and Shapes in 2D Space
When we reflect an object or shape across an axis we are changing the signs of the coordinate points of the object. If an object is reflected about the X axis we would reverse the signs of all the X coordinates of the object to determine the reflection. You can think of it as multiplying all the X coordinates by -1. Similarly if you reflect an object about the Y axis we would reverse the signs of all the points' Y coordinates to find the reflection.
An object can also be reflected across a line that is not at the origin (X=0 or Y=0). You could have an object being reflected across a line positioned at x = 1 or it could be at a positioned at y = 1. If a line is horizontal similar to the X axis we will be changing the Y coordinates of the object and if the line is vertical we will only be changing the X coordinates.
To determine how we change the coordinates of an object when the line of reflection is not at the origin we need to examine the distance between the points and the line. As an example we will look at the point (4,1) and reflect it about the line Y=3.
> When we have a line at Y=3 it will be a line horizontally across going through the point (0,3) as seen in the image below.
>
> When we reflect a point about some line the reflection will be the same distance from the line as the original point. First we need to find the distance the point is from the line. In this example the line is at Y=3 and the point is at (4,1). Since it is a horizontal line only the Y coordinates will change so the distance is then:
>
> 1 - 3 = - 2
>
> Make sure when you find the difference you subtract the coordinate of the point from the line. This shows us whether the point is above or below the line. In our example we have -2 which means it is below the line and a distance of 2 from the line.
>
> Now that we know the distance from the line we need the reflection to be the same distance from the line but on the other side of it. Adding the distance 2, to the line to get the X coordinate of 5 is the correct coordinate of the reflection. Using the formula below uses all the steps above without you having to manually go through them.
>
> 3 - (1 - 3) = 3 - (-2) = 5
>
> The correct reflection point is then (4,5)
>
> The formula uses the fact that when you subtract the point coordinate from the line value you get a negative number if the line is above the point and a positive value if it is below the point. Then when we subtract the line value from the value just determined and we get the reflection point coordinate correctly. If the first subtraction value was negative the reflection would be above the line so when we subtract the line value from a negative distance value it becomes a plus sign and we add it giving us the correct reflection value. In general the formula would look like:
>
> (Line value) - (Coordinate value - Line value) = Reflection coordinate value
>
> The only thing you would need to determine is whether the X coordinate or the Y coordinate is changing. This depends on whether the line is vertical or horizontal.
>
> <img src="images/newplot.png">
### Interactive Example
The coordinate plane below allows you to reflect the object about a line on the coordinate plane. The two drop down menus allow you to choose a vertical line to reflect the object on and to choose a horizontal line to reflect the object on. The original position of the object will remain highlighted in <span style="color:#00FF00"> green </span> and its reflection will be highlighted in <span style="color:#00CED1"> blue</span>.
```
square_clicked3 = True
triangle_clicked3 = False
square_button3 = widgets.Button(
description='Square',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
triangle_button3 = widgets.Button(
description='Triangle',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
text_widget3 = widgets.HTML("<strong>Object: </strong>")
input_widget3 = widgets.HBox(children=[text_widget3, square_button3, triangle_button3])
def graph_square3():
init_notebook_mode(connected=True)
data3 = []
data3.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
data3.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Reflection',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
updatemenus=list([
dict(
buttons=list([
dict(
args=[{'x[0]': [1,1] ,'x[1]':[1,1],'x[2]':[4,4],'x[3]':[4,4],'x[4]':[1,1]}],
label='No Horizontal Reflection',
method='restyle'
),
dict(
args=[{'x[0]':[1,5-(1-5)],'x[1]':[1,5-(1-5)],'x[2]':[4,5-(4-5)],'x[3]':[4,5-(4-5)],'x[4]':[1,5-(1-5)]}],
label='Reflection about: X = 5',
method='restyle'
),
dict(
args=[{'x[0]':[1,4-(1-4)],'x[1]':[1,4-(1-4)],'x[2]':[4,4-(4-4)],'x[3]':[4,4-(4-4)],'x[4]':[1,4-(1-4)]}],
label='Reflection about: X = 4',
method='restyle'
),
dict(
args=[{'x[0]':[1,3-(1-3)],'x[1]':[1,3-(1-3)],'x[2]':[4,3-(4-3)],'x[3]':[4,3-(4-3)],'x[4]':[1,3-(1-3)]}],
label='Reflection about: X = 3',
method='restyle'
),
dict(
args=[{'x[0]':[1,2-(1-2)],'x[1]':[1,2-(1-2)],'x[2]':[4,2-(4-2)],'x[3]':[4,2-(4-2)],'x[4]':[1,2-(1-2)]}],
label='Reflection about: X = 2',
method='restyle'
),
dict(
args=[{'x[0]':[1,1-(1-1)],'x[1]':[1,1-(1-1)],'x[2]':[4,1-(4-1)],'x[3]':[4,1-(4-1)],'x[4]':[1,1-(1-1)]}],
label='Reflection about: X = 1',
method='restyle'
),
dict(
args=[{'x[0]':[1,0-(1-0)],'x[1]':[1,0-(1-0)],'x[2]':[4,0-(4-0)],'x[3]':[4,0-(4-0)],'x[4]':[1,0-(1-0)]}],
label='Reflection about: X = 0',
method='restyle'
),
dict(
args=[{'x[0]':[1,-1-(1+1)],'x[1]':[1,-1-(1+1)],'x[2]':[4,-1-(4+1)],'x[3]':[4,-1-(4+1)],'x[4]':[1,-1-(1+1)]}],
label='Reflection about: X = -1',
method='restyle'
),
dict(
args=[{'x[0]':[1,-2-(1+2)],'x[1]':[1,-2-(1+2)],'x[2]':[4,-2-(4+2)],'x[3]':[4,-2-(4+2)],'x[4]':[1,-2-(1+2)]}],
label='Reflection about: X = -2',
method='restyle'
),
dict(
args=[{'x[0]':[1,-3-(1+3)],'x[1]':[1,-3-(1+3)],'x[2]':[4,-3-(4+3)],'x[3]':[4,-3-(4+3)],'x[4]':[1,-3-(1+3)]}],
label='Reflection about: X = -3',
method='restyle'
)
]),
direction = 'down',
pad = {'r': -60, 't': -50},
showactive = True,
x = 0.1,
y = 1.1,
),
dict(
buttons=list([
dict(
args=[{'y[0]':[1,1],'y[1]':[4,4],'y[2]':[4,4],'y[3]':[1,1],'y[4]':[1,1]}],
label='No Vertical Reflection',
method='restyle'
),
dict(
args=[{'y[0]':[1,5-(1-5)],'y[1]':[4,5-(4-5)],'y[2]':[4,5-(4-5)],'y[3]':[1,5-(1-5)],'y[4]':[1,5-(1-5)]}],
label='Reflection about: Y = 5',
method='restyle'
),
dict(
args=[{'y[0]':[1,4-(1-4)],'y[1]':[4,4-(4-4)],'y[2]':[4,4-(4-4)],'y[3]':[1,4-(1-4)],'y[4]':[1,4-(1-4)]}],
label='Reflection about: Y = 4',
method='restyle'
),
dict(
args=[{'y[0]':[1,3-(1-3)],'y[1]':[4,3-(4-3)],'y[2]':[4,3-(4-3)],'y[3]':[1,3-(1-3)],'y[4]':[1,3-(1-3)]}],
label='Reflection about: Y = 3',
method='restyle'
),
dict(
args=[{'y[0]':[1,2-(1-2)],'y[1]':[4,2-(4-2)],'y[2]':[4,2-(4-2)],'y[3]':[1,2-(1-2)],'y[4]':[1,2-(1-2)]}],
label='Reflection about: Y = 2',
method='restyle'
),
dict(
args=[{'y[0]':[1,1-(1-1)],'y[1]':[4,1-(4-1)],'y[2]':[4,1-(4-1)],'y[3]':[1,1-(1-1)],'y[4]':[1,1-(1-1)]}],
label='Reflection about: Y = 1',
method='restyle'
),
dict(
args=[{'y[0]':[1,0-(1-0)],'y[1]':[4,0-(4-0)],'y[2]':[4,0-(4-0)],'y[3]':[1,0-(1-0)],'y[4]':[1,0-(1-0)]}],
label='Reflection about: Y = 0',
method='restyle'
),
dict(
args=[{'y[0]':[1,-1-(1+1)],'y[1]':[4,-1-(4+1)],'y[2]':[4,-1-(4+1)],'y[3]':[1,-1-(1+1)],'y[4]':[1,-1-(1+1)]}],
label='Reflection about: Y = -1',
method='restyle'
),
dict(
args=[{'y[0]':[1,-2-(1+2)],'y[1]':[4,-2-(4+2)],'y[2]':[4,-2-(4+2)],'y[3]':[1,-2-(1+2)],'y[4]':[1,-2-(1+2)]}],
label='Reflection about: Y = -2',
method='restyle'
),
dict(
args=[{'y[0]':[1,-3-(1+3)],'y[1]':[4,-3-(4+3)],'y[2]':[4,-3-(4+3)],'y[3]':[1,-3-(1+3)],'y[4]':[1,-3-(1+3)]}],
label='Reflection about: Y = -3',
method='restyle'
)
]),
direction = 'down',
pad = {'r': -60, 't': -15},
showactive = True,
x = 0.1,
y = 1.1,
),
])
layout3 = go.Layout(
title = 'Reflection on the Coordinate Plane',
showlegend=False,
updatemenus = updatemenus,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
fig3 = dict(data=data3, layout=layout3)
clear_output(wait=True)
display(input_widget3)
iplot(fig3, filename='basic-scatter')
def graph_triangle3():
init_notebook_mode(connected=True)
dataT3 = []
dataT3.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
dataT3.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Reflection',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
updatemenusT=list([
dict(
buttons=list([
dict(
args=[{'x[0]': [1,1] ,'x[1]':[2.5,2.5],'x[2]':[4,4],'x[3]':[1,1]}],
label='No Horizontal Reflection',
method='restyle'
),
dict(
args=[{'x[0]':[1,5-(1-5)],'x[1]':[2.5,5-(2.5-5)],'x[2]':[4,5-(4-5)],'x[3]':[1,5-(1-5)]}],
label='Reflection about: X = 5',
method='restyle'
),
dict(
args=[{'x[0]':[1,4-(1-4)],'x[1]':[2.5,4-(2.5-4)],'x[2]':[4,4-(4-4)],'x[3]':[1,4-(1-4)]}],
label='Reflection about: X = 4',
method='restyle'
),
dict(
args=[{'x[0]':[1,3-(1-3)],'x[1]':[2.5,3-(2.5-3)],'x[2]':[4,3-(4-3)],'x[3]':[1,3-(1-3)]}],
label='Reflection about: X = 3',
method='restyle'
),
dict(
args=[{'x[0]':[1,2-(1-2)],'x[1]':[2.5,2-(2.5-2)],'x[2]':[4,2-(4-2)],'x[3]':[1,2-(1-2)]}],
label='Reflection about: X = 2',
method='restyle'
),
dict(
args=[{'x[0]':[1,1-(1-1)],'x[1]':[2.5,1-(2.5-1)],'x[2]':[4,1-(4-1)],'x[3]':[1,1-(1-1)]}],
label='Reflection about: X = 1',
method='restyle'
),
dict(
args=[{'x[0]':[1,0-(1-0)],'x[1]':[2.5,0-(2.5-0)],'x[2]':[4,0-(4-0)],'x[3]':[1,0-(1-0)]}],
label='Reflection about: X = 0',
method='restyle'
),
dict(
args=[{'x[0]':[1,-1-(1+1)],'x[1]':[2.5,-1-(2.5+1)],'x[2]':[4,-1-(4+1)],'x[3]':[1,-1-(1+1)]}],
label='Reflection about: X = -1',
method='restyle'
),
dict(
args=[{'x[0]':[1,-2-(1+2)],'x[1]':[2.5,-2-(2.5+2)],'x[2]':[4,-2-(4+2)],'x[3]':[1,-2-(1+2)]}],
label='Reflection about: X = -2',
method='restyle'
),
dict(
args=[{'x[0]':[1,-3-(1+3)],'x[1]':[2.5,-3-(2.5+3)],'x[2]':[4,-3-(4+3)],'x[3]':[1,-3-(1+3)]}],
label='Reflection about: X = -3',
method='restyle'
)
]),
direction = 'down',
pad = {'r': -60, 't': -50},
showactive = True,
x = 0.1,
y = 1.1,
),
dict(
buttons=list([
dict(
args=[{'y[0]':[1,1],'y[1]':[4,4],'y[2]':[1,1],'y[3]':[1,1]}],
label='No Vertical Reflection',
method='restyle'
),
dict(
args=[{'y[0]':[1,5-(1-5)],'y[1]':[4,5-(4-5)],'y[2]':[1,5-(1-5)],'y[3]':[1,5-(1-5)]}],
label='Reflection about: Y = 5',
method='restyle'
),
dict(
args=[{'y[0]':[1,4-(1-4)],'y[1]':[4,4-(4-4)],'y[2]':[1,4-(1-4)],'y[3]':[1,4-(1-4)]}],
label='Reflection about: Y = 4',
method='restyle'
),
dict(
args=[{'y[0]':[1,3-(1-3)],'y[1]':[4,3-(4-3)],'y[2]':[1,3-(1-3)],'y[3]':[1,3-(1-3)]}],
label='Reflection about: Y = 3',
method='restyle'
),
dict(
args=[{'y[0]':[1,2-(1-2)],'y[1]':[4,2-(4-2)],'y[2]':[1,2-(1-2)],'y[3]':[1,2-(1-2)]}],
label='Reflection about: Y = 2',
method='restyle'
),
dict(
args=[{'y[0]':[1,1-(1-1)],'y[1]':[4,1-(4-1)],'y[2]':[1,1-(1-1)],'y[3]':[1,1-(1-1)]}],
label='Reflection about: Y = 1',
method='restyle'
),
dict(
args=[{'y[0]':[1,0-(1-0)],'y[1]':[4,0-(4-0)],'y[2]':[1,0-(1-0)],'y[3]':[1,0-(1-0)]}],
label='Reflection about: Y = 0',
method='restyle'
),
dict(
args=[{'y[0]':[1,-1-(1+1)],'y[1]':[4,-1-(4+1)],'y[2]':[1,-1-(1+1)],'y[3]':[1,-1-(1+1)]}],
label='Reflection about: Y = -1',
method='restyle'
),
dict(
args=[{'y[0]':[1,-2-(1+2)],'y[1]':[4,-2-(4+2)],'y[2]':[1,-2-(1+2)],'y[3]':[1,-2-(1+2)]}],
label='Reflection about: Y = -2',
method='restyle'
),
dict(
args=[{'y[0]':[1,-3-(1+3)],'y[1]':[4,-3-(4+3)],'y[2]':[1,-3-(1+3)],'y[3]':[1,-3-(1+3)]}],
label='Reflection about: Y = -3',
method='restyle'
)
]),
direction = 'down',
pad = {'r': -60, 't': -15},
showactive = True,
x = 0.1,
y = 1.1,
),
])
layoutT3 = go.Layout(
title = 'Reflection on the Coordinate Plane',
showlegend=False,
updatemenus = updatemenusT,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
annotations=[
dict(
x=1.0,
y=-0.16,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
figT3 = dict(data=dataT3, layout=layoutT3)
clear_output(wait=True)
display(input_widget3)
iplot(figT3, filename='basic-scatter')
def square_update3(change):
global square_clicked3
global triangle_clicked3
if(not(square_clicked3)):
square_clicked3 = True
triangle_clicked3 = False
square_button3.button_style = 'success'
triangle_button3.button_style = ''
graph_square3()
def triangle_update3(change):
global square_clicked3
global triangle_clicked3
if(not(triangle_clicked3)):
triangle_clicked3 = True
square_clicked3 = False
square_button3.button_style = ''
triangle_button3.button_style = 'success'
graph_triangle3()
square_button3.on_click(square_update3)
triangle_button3.on_click(triangle_update3)
graph_square3()
```
## Rotations: Objects and Shapes in 2D Space
When we rotate an object or shape we are changing its coordinates based on its rotation around some point. The point an object is rotated around could be some point on the object, the origin or some random point on the coordinate plane. In this notebook the object can be rotated around a point on the object, a point at the center of the object and at the origin. The angles of rotations to be looked at will be 90°, 180°, 270° and 360° both clockwise and counter-clockwise. Other angles and points to rotate around are beyond the scope of this notebook.
### 90° Rotation
We will start off by showing how to rotate a point by 90° using the origin as the point of rotation. To rotate a point counter-clockwise by 90° you need to swap the current (X,Y) coordinates with each other and then multiply the new X coordinate by -1.
> We start off with: (X,Y)
>
> Swap the coordinates: (Y,X)
>
> Then multiply the new X coordinate by -1: (-Y,X)
This will rotate any point on the coordinate plane by 90° counter-clockwise. To rotate a point clockwise by 90° you follow the same method except you multiply the new Y coordinate by -1 instead of the new X coordinate.
> We start off with: (X,Y)
>
> Swap the coordinates: (Y,X)
>
> Then multiply the new X coordinate by -1: (Y,-X)
As stated above following this method will rotate any point on the coordinate plane by 90° clockwise. Let us go through an example of a 90° clockwise and counter-clockwise rotation of the point (8,3) about the origin.
> For a clockwise rotation:
>
> Original coordinates: (8,3) <br>
> Swap coordinates: (3,8) <br>
> Multiply by -1 on the Y coordinate: (3,-8) <br>
>
> This point matches the graph below for a 90° rotation clockwise.
> For a counter-clockwise rotation:
>
> Original coordinates: (8,3) <br>
> Swap coordinates: (3,8) <br>
> Multiply by -1 on the X coordinate: (-3,8) <br>
>
> This point matches the graph below for a 90° rotation counter-clockwise.
You can see on the graph below that this method does work clockwise and counter-clockwise as described above.
<img src="images/rotationimage.png" width="600" height="600">
From the graph above it can be seen that you can rotate a point by 90° and then rotate the new point by 90° and you would get a 180° rotation from the original point. If you continue rotating the new points you will eventually rotate 360° and be back at the original point of 0°.
### 180°, 270° and 360° Rotation
You can gather from the section above that the 180°, 270° and 360° rotations are going to be using the same method in the 90° section. For 180° you will rotate the original point by 90° and then you will rotate the new point by 90° to get the coordinates for the point rotated by 180°. For 270° you will follow the same process as in the 180° but will do one more 90° rotation on the 180° point. If we wanted to do the whole 360° we would do one more 90° rotation.This process works for clockwise and counter-clockwise rotations.
You should notice here that we are using the same 90° operation multiple times to find the various rotations. This can be seen on the graph above that the point has been rotated by 90° 4 times. Using the original point as (8,3) the counter-clockwise rotations of 90°, 180°, 270° and 360° are listed on the same graph below.
<img src="images/rotationimage2.png" width="600" height="600">
### Non-Origin Point of Rotation
The 90° rotation method works for any point on the coordinate plane using the origin as the point of rotation. When we have a point of rotation that is not at the origin this method won't work unless we adjust the coordinate points.
To adjust the points so the method works we need to translate the point of rotation to the origin and then do the same translation to all other points being rotated.
For example if our point of rotation was at (1,1) we would need to translate all the points horizontally and vertically by -1 for it to be at (0,0). Now we could use the method described above to rotate the points. Once all the points have been rotated we would then do the opposite translation by translating all the points horizontally and vertically by +1 so all the points are back to their original positions.
### Example
We will go through an example below rotating a triangle around one of its points at the position (1,1).
> We start off with the triangle in its original position.
>
> <img src="images/image1.png" width="500" height="500">
>
> For the 90° rotation method to work we need to translate the point of rotation to the origin. Since we are rotating around the point (1,1) we will need to translate the triangle by -1 on the X and Y axes.
>
> <img src="images/image2.png" width="500" height="500">
>
> Once the point of rotation is at the origin the other points of the triangle can be rotated using the 90° rotation method. Rotating the points by 90° gives us the new coordinate points below.
>
> <img src="images/image3.png" width="500" height="500">
>
> As you can see on the graph above the lines connecting the points are different colors so you can see each rotation done is in fact 90°. If we rotate the new triangle position by 90° we will get the coordinates for the triangle as if it had been rotated by 180° from the original position.
>
> <img src="images/image4.png" width="500" height="500">
>
> Now if we rotate the 180° triangle position by 90° we will get the coordinates of the original triangle being rotated by 270°. Another 90° rotation would bring the orignal rotation to 360° which is the same as not rotating the triangle at all.
>
> <img src="images/image5.png" width="500" height="500">
>
> From the graph above you can see we have the rotations for the triangle of 90°, 180°, 270° and 360° clockwise and counter-clockwise. As an exercise you can test out the 90° method on the triangle points and compare them to the graph. The only thing we have left to do is to translate the triangle points back to its original position. We started off translating the points by -1 on the X and Y axes so we will need to do the opposite and translate the points by +1 on the X and Y axes.
>
> <img src="images/image6.png" width="500" height="500">
>
> We now have the positions of the various rotations for the triangle in their actual positions. In this example we showed the 90°, 180° and 270° rotations and then translated the points back to the original positions. As a note you don't have to do all the rotations of the figure before you translate it back to the original position. You could do a 90° rotation clockwise or counter-clockwise and translate the points back.
### Interactive Example
In the interactive example below you can choose the **Object**, **Rotation Direction** and the **Rotation Point** (the point you want to rotate the object around). Your choices for the rotation point can be seen on the graph below and can be chosen from dropdown menu. You can choose to rotate the object around the origin, a point on the object or the center of the object. Once you have chosen the rotation point you can adjust the degrees of rotation from 0° to 360° clockwise or counter-clockwise using the slider on the plot. As in the examples above the <span style="color:#00FF00"> green </span>object represents the original position of the object and the rotated object will be <span style="color:#00CED1"> blue</span>.
<img style="float: left;" src="images/pointplot2.png" width="600">
```
from plotly.offline import init_notebook_mode, iplot, plot
from ipywidgets import HBox
import plotly.graph_objs as go
import numpy as np
import random
from math import radians, cos, sin, sqrt, tan, atan
import ipywidgets as widgets
from IPython.display import Markdown
from IPython.display import HTML, clear_output
init_notebook_mode(connected=True)
square_clicked4 = True
triangle_clicked4 = False
clockwise_rotation = True
counterclockwise_rotation = False
current_point = 0
square_button4 = widgets.Button(
description='Square',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
triangle_button4 = widgets.Button(
description='Triangle',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
rotation_choice = widgets.Dropdown(
options=[('',0),('Point A', 1), ('Point B', 2), ('Point C', 3), ('Point D', 4), ('Point E', 5), ('Origin', 6)],
value=0,
description='',
)
clockwise_button = widgets.Button(
description='Clockwise',
disabled=False,
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
counterclockwise_button = widgets.Button(
description='Counter-Clockwise',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
)
text_widget4 = widgets.HTML("<strong>Object: </strong>")
text2_widget4 = widgets.HTML("<strong>Rotation Direction: </strong>")
text3_widget4 = widgets.HTML("<strong>Rotation Point: </strong>")
choice_widget4 = widgets.HBox(children=[text_widget4, square_button4, triangle_button4])
rotation_widget4 = widgets.HBox(children=[text2_widget4, clockwise_button, counterclockwise_button])
point_widget4 = widgets.HBox(children=[text3_widget4, rotation_choice])
input_widget4 = widgets.VBox(children=[choice_widget4, rotation_widget4, point_widget4])
rotation_choice.layout.width = '150px'
def rotation(p):
point = p
data4 = []
data4.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
data4.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Translation',
hoverinfo = 'x+y',
text=['A','B','C','D',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 1, 4, 4, 1],
y = [1, 4, 4, 1, 1]))
temp_degrees = 0
rotationX = 0
rotationY = 0
x0 = 0
x1 = 0
x2 = 0
x3 = 0
y0 = 0
y1 = 0
y2 = 0
y3 = 0
steps4 = []
steps5 = []
for i in range(37):
temp_degrees = radians((36-i)*10)
if(point == 1):
rotationX = 1
rotationY = 1
x0 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(1-rotationY)**2))
y0 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(1-rotationY)**2))
x1 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(4-rotationY)**2))
y1 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(4-rotationY)**2))
x2 = rotationX + cos(temp_degrees+atan((4-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(4-rotationY)**2))
y2 = rotationY + sin(temp_degrees+atan((4-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(4-rotationY)**2))
x3 = rotationX + cos(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
y3 = rotationY + sin(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(point == 2):
rotationX = 1
rotationY = 4
x0 = rotationX + cos(temp_degrees-1.5707963268)*(sqrt((1-rotationX)**2+(1-rotationY)**2))
y0 = rotationY + sin(temp_degrees-1.5707963268)*(sqrt((1-rotationX)**2+(1-rotationY)**2))
x1 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(4-rotationY)**2))
y1 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(4-rotationY)**2))
x2 = rotationX + cos(temp_degrees+atan((4-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(4-rotationY)**2))
y2 = rotationY + sin(temp_degrees+atan((4-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(4-rotationY)**2))
x3 = rotationX + cos(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
y3 = rotationY + sin(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(point == 3):
rotationX = 4
rotationY = 4
x0 = rotationX - cos(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
y0 = rotationY - sin(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
x1 = rotationX - cos(temp_degrees+atan((4-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(4-rotationY)**2))
y1 = rotationY - sin(temp_degrees+atan((4-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(4-rotationY)**2))
x2 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(4-rotationY)**2))
y2 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(4-rotationY)**2))
x3 = rotationX + cos(temp_degrees-1.5707963268)*(sqrt((4-rotationX)**2+(1-rotationY)**2))
y3 = rotationY + sin(temp_degrees-1.5707963268)*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(point == 4):
rotationX = 4
rotationY = 1
x0 = rotationX - cos(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
y0 = rotationY - sin(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
x1 = rotationX - cos(temp_degrees+atan((4-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(4-rotationY)**2))
y1 = rotationY - sin(temp_degrees+atan((4-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(4-rotationY)**2))
x2 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(4-rotationY)**2))
y2 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(4-rotationY)**2))
x3 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(1-rotationY)**2))
y3 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(point == 5):
rotationX = (4-1)/2 + 1
rotationY = (4-1)/2 + 1
x0 = rotationX - cos(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
y0 = rotationY - sin(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
x1 = rotationX - cos(temp_degrees+atan((4-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(4-rotationY)**2))
y1 = rotationY - sin(temp_degrees+atan((4-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(4-rotationY)**2))
x2 = rotationX + cos(temp_degrees+atan((4-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(4-rotationY)**2))
y2 = rotationY + sin(temp_degrees+atan((4-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(4-rotationY)**2))
x3 = rotationX + cos(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
y3 = rotationY + sin(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(point == 6):
rotationX = 0
rotationY = 0
tempX = rotationX + cos(temp_degrees+atan((2.5-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(2.5-rotationY)**2))
tempY = rotationY + sin(temp_degrees+atan((2.5-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(2.5-rotationY)**2))
x0T = tempX - 1.5
y0T = tempY - 1.5
x1T = tempX - 1.5
y1T = tempY + 1.5
x2T = tempX + 1.5
y2T = tempY + 1.5
x3T = tempX + 1.5
y3T = tempY - 1.5
rotationX = tempX
rotationY = tempY
x0 = rotationX - cos(temp_degrees+atan((y0T-rotationY)/(x0T-rotationX)))*(sqrt((x0T-rotationX)**2+(y0T-rotationY)**2))
y0 = rotationY - sin(temp_degrees+atan((y0T-rotationY)/(x0T-rotationX)))*(sqrt((x0T-rotationX)**2+(y0T-rotationY)**2))
x1 = rotationX - cos(temp_degrees+atan((y1T-rotationY)/(x1T-rotationX)))*(sqrt((x1T-rotationX)**2+(y1T-rotationY)**2))
y1 = rotationY - sin(temp_degrees+atan((y1T-rotationY)/(x1T-rotationX)))*(sqrt((x1T-rotationX)**2+(y1T-rotationY)**2))
x2 = rotationX + cos(temp_degrees+atan((y2T-rotationY)/(x2T-rotationX)))*(sqrt((x2T-rotationX)**2+(y2T-rotationY)**2))
y2 = rotationY + sin(temp_degrees+atan((y2T-rotationY)/(x2T-rotationX)))*(sqrt((x2T-rotationX)**2+(y2T-rotationY)**2))
x3 = rotationX + cos(temp_degrees+atan((y3T-rotationY)/(x3T-rotationX)))*(sqrt((x3T-rotationX)**2+(y3T-rotationY)**2))
y3 = rotationY + sin(temp_degrees+atan((y3T-rotationY)/(x3T-rotationX)))*(sqrt((x3T-rotationX)**2+(y3T-rotationY)**2))
step = dict(
method = 'update',
args = [{'x[0]':[1,x0],'x[1]':[1,x1],'x[2]':[4,x2],'x[3]':[4,x3],'x[4]':[1,x0],'y[0]':[1,y0],'y[1]':[4,y1],'y[2]':[4,y2],'y[3]':[1,y3],'y[4]':[1,y0]}],
label = str(i*10) + "°"
)
steps4.append(step)
step2 = dict(
method = 'update',
args = [{'x[0]':[1,x0],'x[1]':[1,x1],'x[2]':[4,x2],'x[3]':[4,x3],'x[4]':[1,x0],'y[0]':[1,y0],'y[1]':[4,y1],'y[2]':[4,y2],'y[3]':[1,y3],'y[4]':[1,y0]}],
label = str((36-i)*10) + "°"
)
steps5.insert(0,step2)
if(clockwise_rotation):
sliders4 = [dict(
active = 0,
currentvalue = {"prefix": "Clockwise Rotation: ", },
pad = {"t": 35},
steps = steps4
)]
else:
sliders4 = [dict(
active = 0,
currentvalue = {"prefix": "Counter-Clockwise Rotation: ", },
pad = {"t": 35},
steps = steps5
)]
title = ''
if(point == 1):
title = 'Rotation about Point A'
elif(point == 2):
title = 'Rotation about Point B'
elif(point == 3):
title = 'Rotation about Point C'
elif(point == 4):
title = 'Rotation about Point D'
elif(point == 5):
title = 'Rotation about Point E'
elif(point == 6):
title = 'Rotation about the Origin'
layout4 = go.Layout(
title = title,
sliders=sliders4,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
autosize=True,
#width=750,
height=725,
annotations=[
dict(
x=1.0,
y=-0.10,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
fig4 = dict(data=data4, layout=layout4)
# if(not(point == 0)):
clear_output(wait=True)
display(Markdown("<img src='images/pointplot2.png' align='left'>"))
display(input_widget4)
iplot(fig4, filename = 'filename')
def rotationT(p):
pointT = p
dataT4 = []
dataT4.append(dict(
visible = True,
line=dict(color='#00FF00', width=3),
mode = 'lines+markers+text',
name = 'Original',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
dataT4.append(dict(
visible = True,
line=dict(color='#00CED1', width=3),
mode = 'lines+markers+text',
name = 'Rotation',
hoverinfo = 'x+y',
text=['A','B','C',''],
textposition='top left',
textfont=dict(
color='#ff0000'
),
x = [1, 2.5, 4, 1],
y = [1, 4, 1, 1]))
temp_degrees = 0
rotationX = 0
rotationY = 0
xT0 = 0
xT1 = 0
xT2 = 0
yT0 = 0
yT1 = 0
yT2 = 0
stepsT4 = []
stepsT5 = []
for i in range(37):
temp_degrees = radians((36-i)*10)
if(pointT == 1):
rotationX = 1
rotationY = 1
xT0 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(1-rotationY)**2))
yT0 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((1-rotationX)**2+(1-rotationY)**2))
xT1 = rotationX + cos(temp_degrees+atan((4-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
yT1 = rotationY + sin(temp_degrees+atan((4-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
xT2 = rotationX + cos(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
yT2 = rotationY + sin(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(pointT == 2):
rotationX = 2.5
rotationY = 4
xT0 = rotationX - cos(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
yT0 = rotationY - sin(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
xT1 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
yT1 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
xT2 = rotationX + cos(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
yT2 = rotationY + sin(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(pointT == 3):
rotationX = 4
rotationY = 1
xT0 = rotationX - cos(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
yT0 = rotationY - sin(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
xT1 = rotationX - cos(temp_degrees+atan((4-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
yT1 = rotationY - sin(temp_degrees+atan((4-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
xT2 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(1-rotationY)**2))
yT2 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(pointT == 4):
rotationX = (4-1)/2 + 1
rotationY = (4-1)/2 + 1
xT0 = rotationX - cos(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
yT0 = rotationY - sin(temp_degrees+atan((1-rotationY)/(1-rotationX)))*(sqrt((1-rotationX)**2+(1-rotationY)**2))
xT1 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
yT1 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((2.5-rotationX)**2+(4-rotationY)**2))
xT2 = rotationX + cos(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
yT2 = rotationY + sin(temp_degrees+atan((1-rotationY)/(4-rotationX)))*(sqrt((4-rotationX)**2+(1-rotationY)**2))
elif(pointT == 5):
rotationX = 0
rotationY = 0
tempTX = rotationX + cos(temp_degrees+atan((2.5-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(2.5-rotationY)**2))
tempTY = rotationY + sin(temp_degrees+atan((2.5-rotationY)/(2.5-rotationX)))*(sqrt((2.5-rotationX)**2+(2.5-rotationY)**2))
xT0T = tempTX - 1.5
yT0T = tempTY - 1.5
xT1T = tempTX
yT1T = tempTY + 1.5
xT2T = tempTX + 1.5
yT2T = tempTY - 1.5
rotationX = tempTX
rotationY = tempTY
xT0 = rotationX - cos(temp_degrees+atan((yT0T-rotationY)/(xT0T-rotationX)))*(sqrt((xT0T-rotationX)**2+(yT0T-rotationY)**2))
yT0 = rotationY - sin(temp_degrees+atan((yT0T-rotationY)/(xT0T-rotationX)))*(sqrt((xT0T-rotationX)**2+(yT0T-rotationY)**2))
xT1 = rotationX + cos(temp_degrees+1.5707963268)*(sqrt((xT1T-rotationX)**2+(yT1T-rotationY)**2))
yT1 = rotationY + sin(temp_degrees+1.5707963268)*(sqrt((xT1T-rotationX)**2+(yT1T-rotationY)**2))
xT2 = rotationX + cos(temp_degrees+atan((yT2T-rotationY)/(xT2T-rotationX)))*(sqrt((xT2T-rotationX)**2+(yT2T-rotationY)**2))
yT2 = rotationY + sin(temp_degrees+atan((yT2T-rotationY)/(xT2T-rotationX)))*(sqrt((xT2T-rotationX)**2+(yT2T-rotationY)**2))
step = dict(
method = 'update',
args = [{'x[0]':[1,xT0],'x[1]':[2.5,xT1],'x[2]':[4,xT2],'x[3]':[1,xT0],'y[0]':[1,yT0],'y[1]':[4,yT1],'y[2]':[1,yT2],'y[3]':[1,yT0]}],
label = str(i*10) + "°"
)
stepsT4.append(step)
step2 = dict(
method = 'update',
args = [{'x[0]':[1,xT0],'x[1]':[2.5,xT1],'x[2]':[4,xT2],'x[3]':[1,xT0],'y[0]':[1,yT0],'y[1]':[4,yT1],'y[2]':[1,yT2],'y[3]':[1,yT0]}],
label = str((36-i)*10) + "°"
)
stepsT5.insert(0,step2)
if(clockwise_rotation):
slidersT4 = [dict(
active = 0,
currentvalue = {"prefix": "Clockwise Rotation: ", },
pad = {"t": 35},
steps = stepsT4
)]
else:
slidersT4 = [dict(
active = 0,
currentvalue = {"prefix": "Counter-Clockwise Rotation: ", },
pad = {"t": 35},
steps = stepsT5
)]
title = ''
if(pointT == 1):
title = 'Rotation about Point A'
elif(pointT == 2):
title = 'Rotation about Point B'
elif(pointT == 3):
title = 'Rotation about Point C'
elif(pointT == 4):
title = 'Rotation about Point D'
elif(pointT == 5):
title = 'Rotation about the Origin'
layoutT4 = go.Layout(
title = title,
sliders=slidersT4,
showlegend=False,
hovermode = 'closest',
yaxis = dict(
title= '',
ticklen= 5,
dtick= 1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
xaxis= dict(
title= '',
ticklen= 5,
dtick=1,
gridwidth= 2,
range=[-10,10],
showgrid=True,
),
autosize=True,
#width=750,
height=725,
annotations=[
dict(
x=1.0,
y=-0.10,
showarrow=False,
text='X Axis',
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
dict(
x=-0.06,
y=1.0,
showarrow=False,
text='Y Axis',
textangle=-90,
xref='paper',
yref='paper',
font=dict(
size=14,
),
),
],
)
figT4 = dict(data=dataT4, layout=layoutT4)
# if(not(pointT == 0)):
clear_output(wait=True)
display(Markdown("<img src='images/pointplot4.png' align='left'>"))
display(input_widget4)
iplot(figT4, filename = 'filename')
def graph_rotation(change):
global current_point
current_point = change.new
if(square_clicked4):
rotation(change.new)
elif(triangle_clicked4):
rotationT(change.new)
def square_update4(change):
global square_clicked4
global triangle_clicked4
if(not(square_clicked4)):
square_clicked4 = True
triangle_clicked4 = False
square_button4.button_style = 'success'
triangle_button4.button_style = ''
rotation_choice.options = [('',0),('Point A', 1), ('Point B', 2), ('Point C', 3), ('Point D', 4), ('Point E', 5), ('Origin', 6)]
rotation_choice.value = 0
clear_output(wait=True)
display(Markdown("<img src='images/pointplot2.png' align='left'>"))
display(input_widget4)
def triangle_update4(change):
global square_clicked4
global triangle_clicked4
if(not(triangle_clicked4)):
triangle_clicked4 = True
square_clicked4 = False
square_button4.button_style = ''
triangle_button4.button_style = 'success'
rotation_choice.options = options=[('',0),('Point A', 1), ('Point B', 2), ('Point C', 3), ('Point D', 4), ('Origin', 5)]
rotation_choice.value = 0
clear_output(wait=True)
display(Markdown("<img src='images/pointplot4.png' align='left'>"))
display(input_widget4)
def clockwise_update(change):
global clockwise_rotation
global counterclockwise_rotation
global current_point
if(not(clockwise_rotation)):
clockwise_rotation = True
counterclockwise_rotation = False
clockwise_button.button_style = "success"
counterclockwise_button.button_style = ""
clear_output(wait=True)
if(square_clicked4):
display(Markdown("<img src='images/pointplot2.png' align='left'>"))
if(current_point == 0):
display(input_widget4)
rotation(current_point)
else:
display(Markdown("<img src='images/pointplot4.png' align='left'>"))
if(current_point == 0):
display(input_widget4)
rotationT(current_point)
def counterclockwise_update(change):
global clockwise_rotation
global counterclockwise_rotation
global current_point
if(not(counterclockwise_rotation)):
clockwise_rotation = False
counterclockwise_rotation = True
clockwise_button.button_style = ""
counterclockwise_button.button_style = "success"
clear_output(wait=True)
if(square_clicked4):
display(Markdown("<img src='images/pointplot2.png' align='left'>"))
if(current_point == 0):
display(input_widget4)
rotation(current_point)
else:
display(Markdown("<img src='images/pointplot4.png' align='left'>"))
if(current_point == 0):
display(input_widget4)
rotationT(current_point)
square_button4.on_click(square_update4)
triangle_button4.on_click(triangle_update4)
clockwise_button.on_click(clockwise_update)
counterclockwise_button.on_click(counterclockwise_update)
rotation_choice.observe(graph_rotation, names='value')
clear_output(wait=True)
#display(Markdown("<img src='images/pointplot2.png' align='left'>"))
display(input_widget4)
rotation(0)
```
## Conclusion
In this notebook we have covered the topic of transformations of an object in a 2-Dimensional coordinate plane. We examined how to do **Translations**, **Reflections** and **Rotations** of objects on a coordinate plane and provided multiple interactive examples for the reader. Once the reader has gone through each section and looked at all the interactive examples they will good understanding of transformations on a 2-D coordinate plane and will be able to do the exercises in the Exercises - Transformations of Objects and Shapes notebook. In future courses and notebooks the reader will be introduced to more complex translations, reflections and rotations in both 2-D and 3-D coordinate planes.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| github_jupyter |
## THIS IS THE FIRST NOTEBOOK FOR THE FINAL PROJECT
```
# A. downloads data from Oanda (2. Connect to the Oanda API and download data)
# and saves to an HDF5 file (3. Store dataseries fulldata with HDF5) called
# 'data.h5'
# B. to avoid having to download data from Oanda start running notebook
# from number (4. start with data saved from HDF5 file) which accesses
# the data previously saved in (2. Connect to the Oanda API and download data).
# C. calculates features (5. Calculate features for data) - RSI and MACD using
# functions and then...
# D. calculates lagged variables (6. Creates lagged data for the features RSI,
# MACD and Returns) for RSI, MACD and Returns
# E. (7. Create a short and long term momentum signal) also creates two
# momentum features (short and long term)
# F. Finally all the data and features are saved in a file 'data_
# features.h5' for later use in the backtesting notebooks (8. store the
# data and features in an HDF5 file 'data_features.h5')
```
## 1. Imports
```
import sys
sys.path.insert(0, '/root/')
from tpqoa import tpqoa
import numpy as np
import pandas as pd
import tables as tb
import tstables as tstb
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn')
```
## 2. Connect to the Oanda API and download data
```
# connects to Oanda and saves 4 months (jan 18 - Apr 18) of
# AUDUSD data concatenates the four months into a single pandas
# dataseries called "fulldata"
oanda = tpqoa('/root/pyalgo.cfg')
instrument_1 = 'AUD_USD'
month1 = oanda.get_history(instrument_1,
start = '2017-12-31',
end = '2018-2-1',
granularity = 'M10',
price = 'A')
month2 = oanda.get_history(instrument_1,
start = '2018-2-1',
end = '2018-3-1',
granularity ='M10',
price = 'A')
month3 = oanda.get_history(instrument_1,
start = '2018-3-1',
end = '2018-3-31',
granularity = 'M10',
price = 'A')
month4 = oanda.get_history(instrument_1,
start = '2018-3-31',
end = '2018-4-30',
granularity = 'M10',
price = 'A')
month1cls = month1['c']
month2cls = month2['c']
month3cls = month3['c']
month4cls = month4['c']
seg_data = [month1cls, month2cls, month3cls, month4cls]
fulldata = pd.concat(seg_data)
```
# 3. Store dataseries 'fulldata' with HDF5
```
# writes the pandas dataseries fulldata to an HDF5 file
h5 = pd.HDFStore('/root/pyalgocourse/final_project/data.h5','w')
h5['data'] = fulldata
h5
ls -n data.*
h5.close()
```
## 4. Start with data saved from HDF5 file
```
# to avoid calling the data from Oanda the notebook can
# be run from here using data previously stored in HDF5 file
# in number (3) above
import pandas as pd
h5 = pd.HDFStore('/root/pyalgocourse/final_project/data.h5','r')
fulldata_copy = h5['data']
h5.close()
fulldata_copy.plot();
```
## 5. Calculate features for data
```
# a function to calulate the RSI Index.
def relative_strength(data, n = 21):
'''Creates RSI feature -
initial RSI value created here'''
abchange = (data - data.shift(1)) # calculate absolute daily change
rsperiod = abchange[:n + 1]
upday = rsperiod[rsperiod >= 0].sum() / n # in the RSI period what is the up day change
dnday = -rsperiod[rsperiod < 0].sum() / n # in the RSI period ehat is the down day change
rs = upday / dnday # up day change/down day change ratio
rsi = np.zeros_like(data)
rsi[:n] = 100. - (100. / ( 1. + rs)) # formula for RSI Index calculation
'''calculates subsequent change in RSI values'''
for i in range(n, len(data)):
abchg = abchange[i - 1]
if abchg > 0:
upval = abchg
dnval = 0
else:
upval = 0
dnval = abs(abchg)
# iterate through each daily change proportionally adding it
# to the respective RSI period cahnge
upday = (upday * (n - 1) + upval) / n
dnday = (dnday * (n - 1) + dnval) / n
rs = upday / dnday # up day change/down day change ratio
rsi[i] = 100. - (100. / ( 1. + rs)) # formula for RSI Index calculation
rsi = pd.DataFrame(rsi)
rsi.index = fulldata_copy.index
rsi.columns = ['RSI']
return rsi # return the RSI Index value calculated
# run function 'relative_strength' to create RSI values
# for the dataseries
rsi = relative_strength(fulldata_copy)
fulldata_copy = pd.DataFrame(fulldata_copy)
fulldata_copy['RSI'] = rsi['RSI']
fulldata_copy.columns = ['AUDUSD','RSI']
fulldata_copy.head()
# a function to calulate the MACD Index.
def macd(data, slow = 26, fast = 12, signal = 9):
# calculate respective fast and slow exponential moving
# averages (ema)
ema_fast = data.ewm(span = fast).mean()
ema_slow = data.ewm(span = slow).mean()
# MACD line is slow m.a. minus fast m.a.
macd_line = ema_slow - ema_fast
# signal line is 9 day ema of macd line
sig_line = macd_line.ewm(span = signal).mean()
# macd histogram is the macd line minus the
# signal line
macd_hist = macd_line - sig_line
macd_hist = pd.DataFrame(macd_hist)
macd_hist.columns = ['MACD']
return macd_hist
# run function 'macd' to create macd values
# for the dataseries
macd = macd(fulldata_copy['AUDUSD'])
fulldata_copy['MACD'] = macd['MACD']
fulldata_copy.head()
# calulate the daily returns for the dataseries
fulldata_copy['Returns'] = np.log(fulldata_copy['AUDUSD'] / fulldata_copy['AUDUSD'].shift(1))
# plot AUDUSD vs its RSI Index
fulldata_copy[['AUDUSD','RSI']][:500].plot(figsize=(10,6),secondary_y = 'RSI');
# Plot AUDUSD vs its MACD Histogram (plotted as a line)
fulldata_copy[['AUDUSD','MACD']][:500].plot(figsize=(10,6),secondary_y = 'MACD');
```
## 6. Creates lagged data for the features RSI, MACD and Returns
```
# This cell created lagged data for the features, the loop takes
# each feature e.g. RSI, MACD,... and creates lag 1 day, 2 day to
# result in RSI_lag_1, RSI_lag_2,...,MACD_lag_1,...
lags = 29
features = fulldata_copy.columns
new_features = features.drop('AUDUSD')
cols = []
for feat in new_features:
for lags in range(1, lags + 1):
col = '%s_lag_%d' % (feat, lags)
fulldata_copy[col] = fulldata_copy[feat].shift(lags)
cols.append(col)
fulldata_copy.tail()
```
## 7. Create a short and long term momentum signal
```
# creates twp momentum signals MOM1 and MOM2. The signal is 1 is the momentum for
# the signal period is positive and 0 is it is negative
fulldata_copy['MOM1'] = np.where(fulldata_copy['Returns'].rolling(10).mean() > 0, 1, 0)
fulldata_copy['MOM1'] = fulldata_copy['MOM1'].shift(1)
cols.append('MOM1')
fulldata_copy['MOM2'] = np.where(fulldata_copy['Returns'].rolling(50).mean() > 0, 1, 0)
fulldata_copy['MOM2'] = fulldata_copy['MOM2'].shift(1)
cols.append('MOM2')
fulldata_copy.dropna(inplace = True)
fulldata_copy.head()
```
# 8. store the data and features in an HDF5 file 'data_features.h5'
```
# create a new HDF5 file to store the full datas plus the lagged
# features that we have calculated.
h6 = pd.HDFStore('/root/pyalgocourse/final_project/data_features.h5','w')
h6['data'] = fulldata_copy
h6
ls -n data.*
h6.close()
```
## **NOW GO TO NOTEBOOK -> TWO_backtest_initial.ipynb**
| github_jupyter |
# Sequential models
The `Sequential` model is a linear stack of layers.
You can create a `Sequential` model by passing a list of layer instances to the constructor:
```
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential([
Dense(32, input_dim=784),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
```
You can also simply add layers via the `.add()` method:
```
model = Sequential()
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
```
----
## Specifying the input shape
The model needs to know what input shape it should expect. For this reason, the first layer in a `Sequential` model (and only the first, because following layers can do automatic shape inference) needs to receive information about its input shape. There are several possible ways to do this:
- pass an `input_shape` argument to the first layer. This is a shape tuple (a tuple of integers or `None` entries, where `None` indicates that any positive integer may be expected). In `input_shape`, the batch dimension is not included.
- pass instead a `batch_input_shape` argument, where the batch dimension is included. This is useful for specifying a fixed batch size (e.g. with stateful RNNs).
- some 2D layers, such as `Dense`, support the specification of their input shape via the argument `input_dim`, and some 3D temporal layers support the arguments `input_dim` and `input_length`.
As such, the following three snippets are strictly equivalent:
```
model = Sequential()
model.add(Dense(32, input_shape=(784,)))
model = Sequential()
model.add(Dense(32, batch_input_shape=(None, 784)))
# note that batch dimension is "None" here,
# so the model will be able to process batches of any size.
model = Sequential()
model.add(Dense(32, input_dim=784))
```
And so are the following three snippets:
```
from keras.layers.recurrent import LSTM
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
model = Sequential()
model.add(LSTM(32, batch_input_shape=(None, 10, 64)))
model = Sequential()
model.add(LSTM(32, input_length=10, input_dim=64))
```
Now you know enough to be able to define *almost* any model with Keras. For complex models that cannot be expressed via `Sequential` (and `Merge` layers, which we also did not cover), you can use [the functional API](/getting-started/functional-api-guide).
----
## Compilation
Before training a model, you need to configure the learning process, which is done via the `compile` method. It receives three arguments:
- an optimizer. This could be the string identifier of an existing optimizer (such as `rmsprop` or `adagrad`), or an instance of the `Optimizer` class. See: [optimizers](/optimizers).
- a loss function. This is the objective that the model will try to minimize. It can be the string identifier of an existing loss function (such as `categorical_crossentropy` or `mse`), or it can be an objective function. See: [objectives](/objectives).
- a list of metrics. For any classification problem you will want to set this to `metrics=['accuracy']`. A metric could be the string identifier of an existing metric (only `accuracy` is supported at this point), or a custom metric function.
```
# for a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# for a binary classification problem
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# for a mean squared error regression problem
model.compile(optimizer='rmsprop',
loss='mse')
```
----
## Training
Keras models are trained on Numpy arrays of input data and labels. For training a model, you will typically use the `fit` function.
```
# for a single-input model with 2 classes (binary):
model = Sequential()
model.add(Dense(1, input_dim=784, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
data = np.random.random((1000, 784))
labels = np.random.randint(2, size=(1000, 1))
# train the model, iterating on the data in batches
# of 32 samples
model.fit(data, labels, nb_epoch=10, batch_size=32)
```
`model.fit` has several optional parameters:
- `verbose`: how often the progress log is printed (0 for no log, 1 for progress bar logging, 2 for one line per epoch)
- `callbacks`: a list of callback objects that perform actions at certain events (see below)
- `validation_split`: splits the training data into training and validation sets. The value passed corresponds to the fraction of the data used for validation (
- `validation_data`: when you already have a validation set, pass a list in the format `[input, output]` here. Overrides `validation_split`.
- `shuffle` (default: `True`): shuffles the training data at each epoch.
- `class_weight` and `sample_weight`: used when you want to give different weights during training for certain classes or samples.
## Callbacks
While training a model, you often want to perform certain operations whenever certain events happen (for example, at the start or the end of every epoch or iteration). Keras supports several different callbacks that do things such as save your training history, save the model after each epoch (or whenever there is an improvement), and early stopping. Typical usage is as follows:
```
from keras.callbacks import ModelCheckpoint, EarlyStopping
save_best = ModelCheckpoint('best_model.h5', save_best_only=True)
early_stop = EarlyStopping(patience=5)
history = model.fit(..., callbacks=[save_best, early_stop])
```
`history` is generated by the default callback, `keras.callbacks.History`, which is used every time `model.fit` is called so you never need to pass it as a callback.
# Saving and loading a model
Any given model will be composed of two separate parts:
- Its architecture, defined by your Keras code;
- the parameters, learned after training.
In order to reuse a previously trained model, Keras provides methods to serialize both the model architecture and its parameters separately. While it would be possible to use Python standard serialzation method (`pickle`), remember it is not necessarily portable across different Python versions (e.g., a file serialized using pickle on Python 2 will not load on Python 3 and vice-versa). Serializing the model architecture only works when the model does not use any `Lambda` layers; in that case, you have to reinstantiate the model programatically.
To serialize a model architecture, you can use the methods `model.to_json()` and `model.to_yaml()`. The only difference between both methods is the textual format used to serialize (YAML is meant to be more human-readable than JSON).
```
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(100,)))
model.add(Dense(10, activation='softmax'))
print(model.to_json())
print(model.to_yaml())
```
Note: both methods only output a string, and it's up to you to write that string to a text file.
To save the model parameters, you can use the method `model.save_weights(filename)`. This saves *all* the model weights to an HDF5 file (which supports saving hierarchical data structures). This method already writes the file to the disk (as opposed to the methods described above). The `ModelCheckpoint` callback described above uses this function to save your model weights.
Having both the serialized architecture and parameters, you do not need the original code that generated the model to execute it anymore. This allows you to avoid the usual guesswork from iteratively running some experiments and fiddling with your code.
```
from keras.models import model_from_json
model = model_from_json(json_string)
model.load_weights('my_weights_file.h5')
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.figure import Figure
data=pd.read_csv("data.csv",encoding='ISO-8859-1')
data.head()
plt.figure(figsize=(20, 10))
plt.suptitle("Countplots")
plt.subplot(2, 3, 1)
plt.title("City counts")
sns.countplot(data['city'])
plt.subplot(2, 3, 2)
plt.title("Motor way counts")
sns.countplot(data['motor_way'])
plt.subplot(2, 3, 3)
plt.title("country_roads counts")
sns.countplot(data['country_roads'])
plt.subplot(2, 3, 4)
plt.title("driving_style counts")
sns.countplot(data['driving_style'])
plt.subplot(2, 3, 5)
plt.title("A/C counts")
sns.countplot(data['A/C'])
plt.subplot(2, 3, 6)
plt.title("park_heating counts")
sns.countplot(data['park_heating'])
plt.show()
plt.figure(figsize=(20, 10))
plt.suptitle("quantity(kWh)")
plt.subplot(1, 2, 1)
plt.title("Distribution plot")
sns.distplot(data['quantity(kWh)'], kde = True)
plt.subplot(1, 2, 2)
plt.title("Box plot")
sns.boxplot(y=data['quantity(kWh)'])
plt.show()
plt.figure(figsize=(20, 10))
plt.suptitle("Distance(KM)")
plt.subplot(1, 2, 1)
plt.title("Distribution plot")
sns.distplot(data['trip_distance(km)'], kde = True)
plt.subplot(1, 2, 2)
plt.title("Box plot")
sns.boxplot(y=data['trip_distance(km)'])
plt.show()
data.columns
plt.figure(figsize=(20, 10))
plt.suptitle("consumption(kWh/100km)")
plt.subplot(1, 2, 1)
plt.title("Distribution plot")
sns.distplot(data['consumption(kWh/100km)'], kde = True)
plt.subplot(1, 2, 2)
plt.title("Box plot")
sns.boxplot(y=data['consumption(kWh/100km)'])
plt.show()
plt.figure(figsize=(20, 10))
plt.suptitle("avg_speed(km/h)")
plt.subplot(1, 2, 1)
plt.title("Distribution plot")
sns.distplot(data['avg_speed(km/h)'], kde = True)
plt.subplot(1, 2, 2)
plt.title("Box plot")
sns.boxplot(y=data['avg_speed(km/h)'])
plt.show()
plt.figure(figsize=(20, 10))
plt.subplot(2, 2, 1)
plt.title("Average speeds vs city")
sns.violinplot(x = 'city', y = 'avg_speed(km/h)', data = data)
plt.subplot(2, 2, 2)
plt.title("Average speeds vs motor_way")
sns.violinplot(x = 'motor_way', y = 'avg_speed(km/h)', data = data)
plt.subplot(2, 2, 3)
plt.title("Average speeds vs country_roads")
sns.violinplot(x = 'country_roads', y = 'avg_speed(km/h)', data = data)
plt.subplot(2, 2, 4)
plt.title("Average speeds vs driving_style")
sns.violinplot(x = 'driving_style', y = 'avg_speed(km/h)', data = data)
plt.show()
plt.figure(figsize=(20, 10))
plt.subplot(2, 2, 1)
plt.title("Range vs city")
sns.violinplot(x = 'city', y = 'trip_distance(km)', data = data)
plt.subplot(2, 2, 2)
plt.title("Range vs motor_way")
sns.violinplot(x = 'motor_way', y = 'trip_distance(km)', data = data)
plt.subplot(2, 2, 3)
plt.title("Range vs country_roads")
sns.violinplot(x = 'country_roads', y = 'trip_distance(km)', data = data)
plt.subplot(2, 2, 4)
plt.title("Range vs AC")
sns.violinplot(x = 'A/C', y = 'trip_distance(km)', data = data)
plt.show()
plt.figure(figsize=(12, 5))
plt.title("average spee vs driving range")
sns.regplot(x = 'avg_speed(km/h)', y = 'trip_distance(km)', data = data)
plt.show()
plt.figure(figsize=(20, 8))
plt.suptitle("car battery info. vs driving range")
plt.subplot(2, 2, 1)
plt.title("quantity(kWh) vs trip_distance(km)")
sns.scatterplot(x = 'quantity(kWh)', y = 'trip_distance(km)', data = data)
plt.subplot(2, 2, 2)
plt.title("consumption(kWh/100km) vs trip_distance(km)")
sns.scatterplot(x = 'consumption(kWh/100km)', y = 'trip_distance(km)', data = data)
plt.show()
plt.figure(figsize=(20, 10))
plt.subplot(2, 2, 1)
plt.title("consumption(kWh/100km) vs a/c")
sns.boxplot(x = 'A/C', y = 'consumption(kWh/100km)', data = data)
plt.subplot(2, 2, 2)
plt.title("consumption(kWh/100km) vs park_heating")
sns.boxplot(x = 'park_heating', y = 'consumption(kWh/100km)', data = data)
data.corr()
sns.heatmap(data.corr())
plt.show()
```
| github_jupyter |
# TACRED Processing
This notebook processes the TACRED dataset to fit the slot-filling setup
```
import pandas as pd
import random
import json
train_dataset = pd.read_json('../data/original/tacred/train.json')
dev_dataset = pd.read_json('../data/original/tacred/dev.json')
test_dataset = pd.read_json('../data/original/tacred/test.json')
test_dataset_new = []
dev_dataset_new = []
train_dataset_new = []
for ind, row in test_dataset.iterrows():
tokens = row['token']
row['subject'] = " ".join(tokens[row["subj_start"]:row["subj_end"]+1])
row['object'] = " ".join(tokens[row["obj_start"]:row["obj_end"]+1])
test_dataset_new.append(row)
for ind, row in train_dataset.iterrows():
tokens = row['token']
row['subject'] = " ".join(tokens[row["subj_start"]:row["subj_end"]+1])
row['object'] = " ".join(tokens[row["obj_start"]:row["obj_end"]+1])
train_dataset_new.append(row)
for ind, row in dev_dataset.iterrows():
tokens = row['token']
row['subject'] = " ".join(tokens[row["subj_start"]:row["subj_end"]+1])
row['object'] = " ".join(tokens[row["obj_start"]:row["obj_end"]+1])
dev_dataset_new.append(row)
test_dataset = pd.DataFrame(test_dataset_new)
train_dataset = pd.DataFrame(train_dataset_new)
dev_dataset = pd.DataFrame(dev_dataset_new)
#len(test_dataset[test_dataset["relation"]=="no_relation"])
dev_dataset
dataset_no_relation = test_dataset[test_dataset["relation"]=="no_relation"]
dataset_relation = test_dataset[test_dataset["relation"]!="no_relation"]
tdataset_no_relation = train_dataset[train_dataset["relation"]=="no_relation"]
tdataset_relation = train_dataset[train_dataset["relation"]!="no_relation"]
ddataset_no_relation = dev_dataset[dev_dataset["relation"]=="no_relation"]
ddataset_relation = dev_dataset[dev_dataset["relation"]!="no_relation"]
train_dataset.head()
#print(train_dataset.iloc[3])
#print(train_dataset.iloc[3]["token"][55])
#train_dataset.iloc[3]["token"]
#tdataset_relation.set_index(['subject','id'])
def process(dsr, dsnr):
results = []
gbs = dsr.groupby('relation')
groups = gbs.groups
groups = [k for k in groups]
num_relations = len(gbs)
# Redistribute all no relation rows
for ind, row in dsnr.iterrows():
row['relation'] = groups[ind % num_relations]
row['object'] = ''
results.append(row)
results_df = pd.DataFrame(results)
final_df = pd.concat([results_df, dsr])
final_df = final_df.sample(frac=1)
return final_df
splits = ['train', 'dev', 'test']
dsrs = [tdataset_relation, ddataset_relation, dataset_relation]
dsnrs = [tdataset_no_relation, ddataset_no_relation, dataset_no_relation]
z = zip(splits, dsrs, dsnrs)
for spl, dsr, dsnr in z:
relation_number = 0
results_df = process(dsr, dsnr)
results_group = results_df.groupby('relation')
with open(f"../data/tacred_{spl}_relations.jsonl", "w") as relation_outfile:
for relation_name, relation_group in results_group:
#print(relation_name)
relation_filename = "P"+str(relation_number)
relation = json.dumps({"relation": relation_filename, "template": "", "question": "", "label": relation_name})
with open(f"../data/tacred/{spl}/{relation_filename}.jsonl", "w") as example_outfile:
# {"uuid": "example_id", "obj_label": generate_me, "sub_label": add_me, "evidences": [{"masked_sentence": space_concatenated_string}]}
for idx, row in relation_group.iterrows():
example = json.dumps({"subject": row["subject"], "object": row["object"], "context": " ".join(row["token"])})
example_outfile.write(example+'\n')
relation_number += 1
relation_outfile.write(relation+'\n')
```
| github_jupyter |
# Ripple Carry Adder
In this notebook use a "simple" reversible binary adder to benchmark a quantum computer. The code is contained in the module `classical_logic`.
The benchmark is simplistic and not very rigorous as it does not test any specific feature of the hardware. Further the whole circuit is classical in the sense that we start and end in computational basis states and all gates simply perform classical not, controlled not (`CNOT`), or doubly controlled not (`CCNOT` aka a [Toffoli gate](https://en.wikipedia.org/wiki/Toffoli_gate)). Finally, even for the modest task of adding two one bit numbers, the `CZ` gate (our fundamental two qubit gate) count is very high for the circuit. This in turn implies a low probability of the entire circuit working.
However it is very easy to explain the performance of hardware to non-experts, e.g. *"At the moment quantum hardware is pretty noisy, so much so that when we run circuits to add two classical bits it gives the correct answer 40% of the time."*
Moreover the simplicity of the benchmark is also its strength. At the bottom of this notebook we provide code for examining the "error distribution". When run on hardware we can observe that low weight errors dominate which gives some insight that the hardware is approximately doing the correct thing.
The module `classical_logic` is based on the circuits found in [QRCA]
| [QRCA] *A new quantum ripple-carry addition circuit*.
| Cuccaro, Draper, Kutin, and Moulton.
| https://arxiv.org/abs/quant-ph/0410184v1.
 Figures from QRCA
```
import numpy as np
from pyquil.quil import Program
from pyquil.gates import *
from pyquil.api import get_qc
from forest.benchmarking.classical_logic.ripple_carry_adder import *
import matplotlib.pyplot as plt
import networkx as nx
# noiseless QVM
qc = get_qc("9q-generic", as_qvm=True, noisy=False)
# noisy QVM
noisy_qc = get_qc("9q-generic-noisy-qvm", as_qvm=True, noisy=True)
```
## Draw the noiseless qc topology
```
nx.draw(qc.qubit_topology(),with_labels=True)
```
## One bit addtion: 1+1 = 10 i.e. 2
There is a small bit of setup that needs to happen before creating the program for the circuit. Specifically you have to pick registers of qubits for the two input numbers `reg_a` and `reg_b`, a carry bit `c`, and an extra digit `z` that will hold the most significant bit of the answer.
If you have a specific line of qubits in mind for the registers there is a helper `assign_registers_to_line_or_cycle()` which will provide these registers for you--`c` is assigned to the provided start qubit and assignments go down the line in the circuit diagram above; however, you have to provide a subgraph that is sufficiently simple so that the assignment can be done by simpling moving to the next neighbor--e.g. the graph is a line or a cycle.
If you don't care about the particular arrangement of qubits then you can instead use `get_qubit_registers_for_adder()` which will find a suitable assignment for you if one exists.
```
# the input numbers
num_a = [1]
num_b = [1]
# There are two easy routes to assign registers
# 1) if you have particular target qubits in mind
target_qubits = [3,6,7,4,1]
start = 3
reg_a, reg_b, c, z = assign_registers_to_line_or_cycle(start,
qc.qubit_topology().subgraph(target_qubits),
len(num_a))
print('Registers c, a, b, z on target qubits', target_qubits,': ', c, reg_a, reg_b, z)
# 2) if you don't care about a particular arrangement
# you can still exclude qubits. Here we exclude 0.
reg_a, reg_b, c, z = get_qubit_registers_for_adder(qc, len(num_a), qubits = list(range(1,10)))
print('Registers c, a, b, z on any qubits excluding q0: ', c, reg_a, reg_b, z)
# given the numbers and registers construct the circuit to add
ckt = adder(num_a, num_b, reg_a, reg_b, c, z)
exe = qc.compile(ckt)
result = qc.run(exe)
print('\nThe answer of 1+1 is 10')
print('The circuit on an ideal qc gave: ', result)
```
## Two bit addition
We will start with 1+1=2 on a noiseless simulation.
We choose to represent 1 (decimal) as a two digit binary number 01 so the addition becomes
01 + 01 = 010
where the bits are ordered from most significant to least i.e. (MSB...LSB).
The MSB is necessary for representing other two bit additions e.g. 2 + 2 = 4 -> 10 + 10 = 100
```
# the input numbers
num_a = [0,1]
num_b = [0,1]
#
reg_a, reg_b, c, z = get_qubit_registers_for_adder(qc, len(num_a))
# given the numbers and registers construct the circuit to add
ckt = adder(num_a, num_b, reg_a, reg_b, c, z)
exe = qc.compile(ckt)
result = qc.run(exe)
print('The answer of 01+01 is 010')
print('The circuit on an ideal qc gave: ', result)
```
## Draw the noisy qc topology
```
nx.draw(noisy_qc.qubit_topology(),with_labels=True)
```
## Now try 1+1=2 on a noisy qc
The output is now stochastic--try re-running this cell multiple times!
Note in particular that the MSB is sometimes (rarely) 1 due to some combination of readout error and error propagation through the CNOT
```
reg_a, reg_b, c, z = get_qubit_registers_for_adder(noisy_qc, len(num_a))
ckt = adder(num_a, num_b, reg_a, reg_b, c, z)
exe = noisy_qc.compile(ckt)
noisy_qc.run(exe)
```
## Get results for all summations of pairs of 2-bit strings
Because classical binary addition is easy we can caculate the ideal output of the circuit. In order to see how well the QPU excutes the circuit we average the circuit over all possible input strings. Here we look at two bit strings e.g.
| Register a| Register b| a + b + carry|
|-----------|-----------|--------------|
| 00 | 00 | 000 |
| 00 | 01 | 001 |
| 00 | 10 | 010 |
| 00 | 11 | 011 |
| 01 | 00 | 001 |
| $\vdots$ | $\vdots$ | $\vdots$ |
| 11 | 11 | 110 |
The rough measure of goodness is the success probability, which we define as number of times the QPU correctly returns the string listed in the (a+b+carry) column divided by the total number of trials.
You might wonder how well you can do just by generating a random binary number and reporting that as the answer.
Well if you are doing addition of two $n$ bit strings the probability that you can get the correct answer by guessing
$\Pr({\rm correct}\, |\, n)= 1/ 2^{n +1}$,
explicitly $\Pr({\rm correct}\, |\, 1)= 0.25$ and $\Pr({\rm correct}\, |\, 2)= 0.125$.
A zeroth order performance criterion is to do better than these numbers.
```
n_bits = 2
nshots = 100
results = get_n_bit_adder_results(noisy_qc, n_bits, use_param_program=False, num_shots = nshots,
show_progress_bar=True)
# success probabilities of different input strings
pr_correct = get_success_probabilities_from_results(results)
print('The probability of getting the correct answer for each output in the above table is:')
print(np.round(pr_correct, 4),'\n')
print('The success probability averaged over all inputs is', np.round(np.mean(pr_correct), 5))
# For which outputs did we do better than random ?
np.asarray(pr_correct)> 1/2**(n_bits+1)
```
## Get the distribution of the hamming weight of errors
Even if the success probability of the circuit is worse than random there might be a way in which the circuit is not absolutely random. This could indicate that the computation is actually doing something 'close' to what is desired. To look for such situations we consider the full distribution of errors in our outputs.
The output of our circuit is in the computational basis so all errors manifest as bit flips from the actual answer. The number of bits you need to flip to transform one binary string $B_1$ to another binary string $B_2$ is called the Hamming distance. We are interested in the distance ${\rm dist}(B_t, B_o)$ between the true ideal answer $B_{t}$ and the noisy output answer $B_{o}$, which is equivalent to the Hamming weight ${\rm wt}(\cdot) $ of the error in our output.
For example, for various ideal answers and measured outputs for 4 bit addition (remember there's an extra fifth MSB for the answer) we have
${\rm dist}(00000,00001) = {\rm wt}(00001) = 1$
${\rm dist}(00000,10001) = {\rm wt}(10001) = 2$
${\rm dist}(11000,10101) = {\rm wt}(01101) = 3$
${\rm dist}(00001,11110) = {\rm wt}(11111) = 5$
In order to see if our near term devices are doing interesting things we calculate the distribution of the Hamming weight of the errors observed in our QPU data with respect to the known ideal output. The entry corresponding to zero Hamming weight is the success probability.
```
distributions = get_error_hamming_distributions_from_results(results)
```
## Plot distribution of 00+00 and 11+11 and compare to random
```
from scipy.special import comb
zeros_distribution = distributions[0]
rand_ans_distr = [comb(n_bits + 1, x)/2**(n_bits + 1) for x in range(len(zeros_distribution))]
x_labels = np.arange(0, len(zeros_distribution))
plt.bar(x_labels, zeros_distribution, width=0.61, align='center')
plt.bar(x_labels, rand_ans_distr, width=0.31, align='center')
plt.xticks(x_labels)
plt.xlabel('Hamming Weight of Error')
plt.ylabel('Relative Frequency of Occurrence')
plt.grid(axis='y', alpha=0.75)
plt.legend(['data','random'])
plt.title('Z basis Error Hamming Wt Distr for 00+00=000')
#name = 'numbits'+str(n_bits) + '_basisZ' + '_shots' + str(nshots)
#plt.savefig(name)
plt.show()
from scipy.special import comb
ones_distribution = distributions[-1]
rand_ans_distr = [comb(n_bits + 1, x)/2**(n_bits + 1) for x in range(len(zeros_distribution))]
x_labels = np.arange(0, len(ones_distribution))
plt.bar(x_labels, ones_distribution, width=0.61, align='center')
plt.bar(x_labels, rand_ans_distr, width=0.31, align='center')
plt.xticks(x_labels)
plt.xlabel('Hamming Weight of Error')
plt.ylabel('Relative Frequency of Occurrence')
plt.grid(axis='y', alpha=0.75)
plt.legend(['data','random'])
plt.title('Z basis Error Hamming Wt Distr for 11+11=110')
#name = 'numbits'+str(n_bits) + '_basisZ' + '_shots' + str(nshots)
#plt.savefig(name)
plt.show()
```
## Plot average distribution over all summations; compare to random
```
from scipy.special import comb
averaged_distr = np.mean(distributions, axis=0)
rand_ans_distr = [comb(n_bits + 1, x)/2**(n_bits + 1) for x in range(len(averaged_distr))]
x_labels = np.arange(0, len(averaged_distr))
plt.bar(x_labels, averaged_distr, width=0.61, align='center')
plt.bar(x_labels, rand_ans_distr, width=0.31, align='center')
plt.xticks(x_labels)
plt.xlabel('Hamming Weight of Error')
plt.ylabel('Relative Frequency of Occurrence')
plt.grid(axis='y', alpha=0.75)
plt.legend(['data','random'])
plt.title('Z basis Error Hamming Wt Distr Avgd Over {}-bit Strings'.format(n_bits))
#name = 'numbits'+str(n_bits) + '_basisZ' + '_shots' + str(nshots)
#plt.savefig(name)
plt.show()
```
## Now do the same, but with addition in the X basis
In this section we do classical logic in the X basis. This means the inputs to the circuits are no longer $|0\rangle$ and $|1\rangle$, instead they are $|+\rangle = H|0\rangle$ and $|-\rangle = H|1\rangle$.
Originally all the logic was done with X, CNOT, and Toffoli gates. In this case we have to convert them to the corresponding gates in the X basis. E.g.
CNOT = $|0\rangle\langle 0|\otimes I + |1\rangle\langle 1|\otimes X$
becomes
CNOT_in_X_basis = $(H\otimes I)$ CZ $(H\otimes I)$ = $|+\rangle\langle +|\otimes I + |-\rangle\langle -|\otimes Z$.
**Note:** in some of the cells below there is a comment `# NBVAL_SKIP` this is used in testing to speed up our tests by skipping that particular cell.
```
# NBVAL_SKIP
n_bits = 2
# set in_x_basis to true here
results = get_n_bit_adder_results(noisy_qc, n_bits, in_x_basis=True, show_progress_bar=True)
distributions = get_error_hamming_distributions_from_results(results)
averaged_distr = np.mean(distributions, axis=0)
x_labels = np.arange(0, len(averaged_distr))
plt.bar(x_labels, averaged_distr, width=0.61, align='center')
plt.bar(x_labels, rand_ans_distr, width=0.31, align='center')
plt.xticks(x_labels)
plt.xlabel('Hamming Weight of Error')
plt.ylabel('Relative Frequency of Occurrence')
plt.grid(axis='y', alpha=0.75)
plt.legend(['data','random'])
plt.title('X basis Error Hamming Wt Distr Avgd Over {}-bit Strings'.format(n_bits))
#plt.savefig(name)
plt.show()
```
## Error probability to random guess probability as a function of number of added bits
Here we compare the average probability of the adder working as a function of input size (averaged over all possible input strings) to random guessing. To provide context we also compare this to the error probability of the best input string (likely the all zero input string) and the worst input string (likely all ones).
```
# NBVAL_SKIP
summand_lengths = [1,2,3]
avg_n = []
med_n = []
min_n = []
max_n = []
rand_n = []
for n_bits in summand_lengths:
results = get_n_bit_adder_results(noisy_qc, n_bits, show_progress_bar=True)
output_len = n_bits + 1
# success probability average over all input strings
avg_n.append(np.average(get_success_probabilities_from_results(results)))
# median success probability average over all input strings
med_n.append(np.median(get_success_probabilities_from_results(results)))
# success probability input bit string with most errors
min_n.append(np.min(get_success_probabilities_from_results(results)))
# success probability input bit string with least errors
max_n.append(np.max(get_success_probabilities_from_results(results)))
# success probability of randomly guessing the correct answer
rand_n.append(1 / 2**output_len)
# NBVAL_SKIP
plt.scatter(summand_lengths, avg_n, c='b', label='mean')
plt.scatter(summand_lengths, rand_n, c='m', marker='D', label='random')
plt.scatter(summand_lengths, min_n, c='r', marker='_', label='min/max')
plt.scatter(summand_lengths, max_n, c='r', marker='_')
plt.xticks(summand_lengths)
plt.xlabel('Number of bits added n (n+1 including carry bit)')
plt.ylabel('Probablity of working')
plt.legend()
name = 'Pr_suc_fn_nbits' + '_basisZ' + '_shots' + str(nshots)
plt.savefig(name)
plt.show()
# NBVAL_SKIP
print('n bit:', np.round(summand_lengths, 5))
print('average:', np.round(avg_n, 5))
print('median:', np.round(med_n, 5))
print('min:', np.round(min_n, 5))
print('max:', np.round(max_n, 5))
print('rand:', np.round(rand_n, 5))
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-1"><span class="toc-item-num">1 </span>Setup</a></span></li><li><span><a href="#Economics" data-toc-modified-id="Economics-2"><span class="toc-item-num">2 </span>Economics</a></span></li><li><span><a href="#Cobb-Dogulas" data-toc-modified-id="Cobb-Dogulas-3"><span class="toc-item-num">3 </span>Cobb-Dogulas</a></span></li><li><span><a href="#Constant-Elasticity-of-Substitution-(CES)" data-toc-modified-id="Constant-Elasticity-of-Substitution-(CES)-4"><span class="toc-item-num">4 </span>Constant Elasticity of Substitution (CES)</a></span></li><li><span><a href="#Perfect-complements" data-toc-modified-id="Perfect-complements-5"><span class="toc-item-num">5 </span>Perfect complements</a></span></li><li><span><a href="#Perfect-substitutes" data-toc-modified-id="Perfect-substitutes-6"><span class="toc-item-num">6 </span>Perfect substitutes</a></span></li></ul></div>
**Description:** This is a Jupyter Notebook with Python code. You do not need any knowledge or either Jupyter or Python to run it.
**To run all:** Kernel $\rightarrow$ Restart & Run All
**To run each cell press:**
1. <kbd>Ctrl</kbd>+<kbd>Enter</kbd> to just run the cell
2. <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>Enter</kbd> to the run the cell and proceed to the next
# Setup
```
# imports and settings
%matplotlib inline
%load_ext autoreload
%autoreload 1
import context
import numecon.course_micro1.consumer as consumer
%aimport numecon.course_micro1.consumer
import numecon.course_micro1.slutsky_wealth as slutsky_wealth
%aimport numecon.course_micro1.slutsky_wealth
%%html
<style>
.output_wrapper, .output {
height:auto !important;
max-height:5000px; /* your desired max-height here */
}
.output_scroll {
box-shadow:none !important;
webkit-box-shadow:none !important;
}
</style>
```
# Economics
The budget set is
$$C(p_1,p_2,e_1,e_2) = \{(x_1,x_2) \in \mathbb{R}_{+}^2 \,\, | \,\, p_1 x_1 + p_2 x_2 \leq p_1 e_1 + p_2 e_2\} $$
We normalize with $p_2 = 1$ and consider a change in $p_1$ to $p_1^{\prime}$.
# Cobb-Dogulas
$$u(x_1,x_2) = x_1^{\alpha}x_2^{\beta}$$
```
slutsky_wealth.cobb_douglas()
```
# Constant Elasticity of Substitution (CES)
$$u(x_1,x_2) = (\alpha x_1^{-\beta}+(1-\alpha)x_2^{-\beta})^{-1/\beta}$$
```
slutsky_wealth.ces()
```
# Perfect complements
$$u(x_1,x_2) = \min{\{\alpha x_1,\beta x_2}\}$$
```
slutsky_wealth.perfect_complements()
```
# Perfect substitutes
$$u(x_1,x_2) = \alpha x_1 + \beta x_2 $$
```
slutsky_wealth.perfect_substitutes()
```
| github_jupyter |
# How do I make a new _project_?
### Overview
There are a number of API calls related to projects. Here we focus on _creating a **new**_ project. Along the way, we will also show how to [list billing groups](http://docs.cancergenomicscloud.org/docs/list-your-billing-groups).
### Prerequisites
1. You need to be a member (or owner) of _at least one_ project.
2. You need your _authentication token_ and the API needs to know about it. See <a href="Setup_API_environment.ipynb">**Setup_API_environment.ipynb**</a> for details.
3. You understand how to <a href="projects_listAll.ipynb" target="_blank">list</a> projects you are a member of (we will just use that call directly here).
## Imports
We import the _Api_ class from the official sevenbridges-python bindings below.
```
import sevenbridges as sbg
```
## Initialize the object
The `Api` object needs to know your **auth\_token** and the correct path. Here we assume you are using the credentials file in your home directory. For other options see <a href="Setup_API_environment.ipynb">Setup_API_environment.ipynb</a>
```
# [USER INPUT] specify credentials file profile {cgc, sbg, default}
prof = 'default'
config_file = sbg.Config(profile=prof)
api = sbg.Api(config=config_file)
```
## Make a new project using your first billing group
We start by listing all of your projects and your billing groups. Next we create the JSON that will be passed to the API to create the project. The dictionary should include:
* **billing_group** *Billing group* that will be charged for this project
* **description** (optional) Project description
* **name** Name of the project, may be *non-unique*<sup>1</sup>
**After** creating the project, you can re-check the project list. A **detail**-call for projects returns the following *attributes*:
* **description** The user specified project description
* **id** _Unique_ identifier for the project, generated based on Project Name
* **name** Name of project specified by the user, maybe _non-unique_
* **href** Address<sup>1</sup> of the project.
* **tags** List of tags
* **created_on** Project creation time
* **modified_on** Project modification time
* **created_by** User that created the project
* **root_folder** ID of the root folder for that project
* **billing_group** ID of the billing group for the project
* **settings** Dictionary with project settings for storage and task execution
<sup>1</sup> Please **don't** use non-unique *project names*. However, if you insist (on the GUI), the backend will allow it and assign a unique **id** to you project.
```
# [USER INPUT] Set project name and billing group index here:
new_project_name = 'Michael Diamond'
index_billing = -1
# Check if this project already exists. LIST all projects and check for name match
my_project = api.projects.query(name=new_project_name)
if my_project: # exploit fact that empty list is False, {list, tuple, etc} is True
print('A project named {} exists, please choose a unique name'
.format(new_project_name))
raise KeyboardInterrupt
else:
# Create a new project
# What are my funding sources?
billing_groups = api.billing_groups.query()
print((billing_groups[index_billing].name + \
' will be charged for computation and storage (if applicable)'))
# Set up the information for your new project
new_project = {
'billing_group': billing_groups[index_billing].id,
'description': """A project created by the API recipe (projects_makeNew.ipynb).
This also supports **markdown**
_Pretty cool_, right?
""",
'name': new_project_name
}
my_project = api.projects.create(
name=new_project['name'], billing_group=new_project['billing_group'],
description=new_project['description']
)
# (re)list all projects, and get your new project
my_project = [p for p in api.projects.query(limit=100).all()
if p.name == new_project_name][0]
print('Your new project {} has been created.'.format(
my_project.name))
# Print description if it exists
if hasattr(my_project, 'description'):
print('Project description: \n {}'.format(my_project.description))
```
## Additional Information
Detailed documentation of this particular REST architectural style request is available [here](http://docs.sevenbridges.com/docs/create-a-new-project)
| github_jupyter |
# TSFRESH Human Activity Recognition Example
This example show shows how to use [tsfresh](https://tsfresh.readthedocs.io/) to exctract useful features from multiple timeseries and use them to improve classification performance.
```
%matplotlib inline
import matplotlib.pylab as plt
from tsfresh.examples.har_dataset import download_har_dataset, load_har_dataset, load_har_classes
import seaborn as sns
from tsfresh import extract_features, extract_relevant_features, select_features
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import pandas as pd
import numpy as np
import logging
# We set the logger to Error level
# This is not recommend for normal use as you can oversee important Warning messages
logging.basicConfig(level=logging.ERROR)
```
## Load and visualize data
The dataset consists of timeseries for 7352 accelerometer readings. Each reading represents an accelerometer reading for 2.56 sec at 50hz (for a total of 128 samples per reading). Furthermore, each reading corresponds one of six activities (walking, walking upstairs, walking downstairs, sitting, standing and laying)
For more information, or to fetch dataset, go to https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
```
# fetch dataset from uci
download_har_dataset()
df = load_har_dataset()
df.head()
df.shape
plt.title('accelerometer reading')
plt.plot(df.ix[0,:])
plt.show()
```
## Extract Features
```
extraction_settings = ComprehensiveFCParameters()
# rearrange first 500 sensor readings column-wise, not row-wise
N = 500
master_df = pd.DataFrame({0: df[:N].values.flatten(),
1: np.arange(N).repeat(df.shape[1])})
master_df.head()
%time X = extract_features(master_df, column_id=1, impute_function=impute, default_fc_parameters=extraction_settings);
X.shape
"Number of extracted features: {}.".format(X.shape[1])
```
## Train and evaluate classifier
```
y = load_har_classes()[:N]
y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
cl = DecisionTreeClassifier()
cl.fit(X_train, y_train)
print(classification_report(y_test, cl.predict(X_test)))
```
# Multiclass feature selection
In total our feature matrix contains 222 features.
We can try to select a subset of features with the select_features method of tsfresh.
However it only works for binary classification or regression tasks.
For a 6 label multi classification we split the selection problem into 6 binary one-versus all classification problems. For each of them we can do a binary classification feature selection:
```
relevant_features = set()
for label in y.unique():
y_train_binary = y_train == label
X_train_filtered = select_features(X_train, y_train_binary)
print("Number of relevant features for class {}: {}/{}".format(label, X_train_filtered.shape[1], X_train.shape[1]))
relevant_features = relevant_features.union(set(X_train_filtered.columns))
len(relevant_features)
```
we keep only those features that we selected above, for both the train and test set
```
X_train_filtered = X_train[list(relevant_features)]
X_test_filtered = X_test[list(relevant_features)]
X_train_filtered.shape, X_test_filtered.shape
```
so, we reduced the number of used features from 794 to 263
```
cl = DecisionTreeClassifier()
cl.fit(X_train_filtered, y_train)
print(classification_report(y_test, cl.predict(X_test_filtered)))
```
It worked! The precision improved by removing irrelevant features.
## Compare against naive classification accuracy
By extracting using time-series features (as opposed to using raw data points), we can meaningfully increase classification accuracy.
```
X_1 = df.ix[:N-1,:]
X_1.shape
X_train, X_test, y_train, y_test = train_test_split(X_1, y, test_size=.2)
cl = DecisionTreeClassifier()
cl.fit(X_train, y_train)
print(classification_report(y_test, cl.predict(X_test)))
```
So, both our unfiltered and filtered feature based classificators are able to beat the model on the raw time series values
| github_jupyter |
In many financial situations like default prediction, interpretable models are required. Linear models like
logistic model are often used to reach the requirement. Meanwhile, in order to make the model robust, people
often apply single variable transformation like WOE. However, such transformation has two main drawbacks:
1) It is sensitive to noise and sometimes yields transformed boxes which are not monotone.
2) Because of the loss of monotonicity, interpretibility can not be guaranteed.
This repository introduce a new method of single variable transformation, which can ensure that the transformation
is monotone as well as continues.
The repository also presents LinearModel.py which offers a series of modified logistic models.
The dome jupyter file shows that the modified methods outperforms the state of art logistic model in terms of accuracy
and robustness.
*MonoLogitTrans.py* :
Description:
The module offers an algorithm of single varaible transformation, which has following propertities:
1) positively corelated with P(Y=1)
2) offers paramater to choose if the transformation is guaranteed to be monotone
3) theoretically equivalent to the logit of P(Y=1)
How:
If the parameter method='wide':
Fit MLP between single varaible and Y. The loss function is made up of two parts,
cross entropy and loss of monotonicity.
The difination of loss of monotonicity is:
$\text{loss_monotonous}(x,f(x)) = 1 - |(\rho(x,f(x)))|$
$\rho(x,y)$ is Pearson correlation coefficient.
The final loss function is:
loss = cross_entropy($\sigma$(f(x)),y)+lambda_monotonous*loss_monotonous(x,f(x))
If the parameter method='strict':
The sign of all the weights in the same hiden layer are constrainted to be the same. Hence, the
MLP is nested function of monotone functions, which means that the MLP is a monotone function.
In this situation, the loss function is simply the cross entropu loss.
Version info:
sklearn 0.20.1
tensorflow 1.13.1
python 3.7.1
*LinearModel.py* :
Description:
The module offers a siries of class of logistic regression, which are similar to the logistic regressions offered
by sklearn when people use them. For example, they have methods like fit(), predict(), predict_proba(), etc.
The main modifications are:
1)In order to cooperate with single variable transformations like WOE which are positively correlated with
P(Y=1), the class PosLassoClassifier and PosLassoClassifierCV offers logistic regression with constraint
that all the coefficients are positive.
2)They adopt SGD with choice of start point of ridge regression estimator or random normal.
3) To deal with outliers, the loss of each batch can exclude the largest (1-Q0)*100% elements with label
Y=0 and (1-Q1)*100% elements with label Y=1, before taking the mean, which makes the model nonsensitive
of cost.
4) The final estimator can be set to be the mean of estimated values of 100 iterations after converge,
in order to get a robust estimation and variable selection, which makes the model nonsensitive to
randomness of sampling.
Version info:
sklearn 0.20.1
tensorflow 1.13.1
matplotlib 3.0.2
python 3.7.1
**This demo uses breast cancer data offered by sklearn to demonstrate how to use MonoLogitTrans.py to preform single variable transformation, as well as to use LinearModel.py to build binary classifier after that**
The main parts are:
1. Get data
2. Perform single varaible transformation with MonoLogitTrans
3. Modeling and comparision
4. Demonstrate the choice of start point of LinearModel
5. Demonstrate the choice of Q0 and Q1 of LinearModel
First, install the package [easylogistic](https://github.com/ModelerGuanxuSu/EasyLogistic/raw/master/easylogistic-1.0.1.tar.gz)
```
import easylogistic
```
import other packages
```
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import importlib
import sklearn
from sklearn.linear_model import LogisticRegressionCV
import time
```
### Get Data
```
x1,x2,x3,x1_test,x2_test,x3_test,y,y_test = easylogistic.get_data()
plt.subplot(1,3,1)
plt.plot(x1,y,'o',alpha=0.1)
plt.subplot(1,3,2)
plt.plot(x2,y,'o',alpha=0.1)
plt.subplot(1,3,3)
plt.plot(x3,y,'o',alpha=0.1)
plt.show()
```
### Perform single varaible transformation with MonoLogitTrans
```
MonoLogitTrans1 = easylogistic.MonoLogitTrans(method='strict')
MonoLogitTrans2 = easylogistic.MonoLogitTrans(method='wide')
MonoLogitTrans3 = easylogistic.MonoLogitTrans(method='wide',num_hidden=30)
time1 = time.time()
MonoLogitTrans1.fit(x1,y)
MonoLogitTrans2.fit(x2,y)
MonoLogitTrans3.fit(x3,y)
time2 = time.time()
print(time2-time1)
x1_trans = MonoLogitTrans1.transform(x1_test)
x2_trans = MonoLogitTrans2.transform(x2_test)
x3_trans = MonoLogitTrans3.transform(x3_test)
```
**Save and load parameters**
```
#save parameters
MonoLogitTrans1.save_parameter('./Docs/MonoLogitTrans1.txt')
MonoLogitTrans2.save_parameter('./Docs/MonoLogitTrans2.txt')
MonoLogitTrans3.save_parameter('./Docs/MonoLogitTrans3.txt')
#load parameters
MonoLogitTrans1_ = easylogistic.MonoLogitTrans()
MonoLogitTrans2_ = easylogistic.MonoLogitTrans()
MonoLogitTrans3_ = easylogistic.MonoLogitTrans()
MonoLogitTrans1_.load_parameter('./Docs/MonoLogitTrans1.txt')
MonoLogitTrans2_.load_parameter('./Docs/MonoLogitTrans2.txt')
MonoLogitTrans3_.load_parameter('./Docs/MonoLogitTrans3.txt')
x1_trans = MonoLogitTrans1_.transform(x1_test)
x2_trans = MonoLogitTrans2_.transform(x2_test)
x3_trans = MonoLogitTrans3_.transform(x3_test)
```
#### Make sure converge by looking at trace plot of loss
```
plt.subplot(1,3,1)
plt.plot(MonoLogitTrans1.Loss)
plt.subplot(1,3,2)
plt.plot(MonoLogitTrans2.Loss)
plt.subplot(1,3,3)
plt.plot(MonoLogitTrans3.Loss)
plt.show()
```
#### Comparision between original data (horizontal ordinate) and transformed data (vertical ordinate)
If method='wide', such as the last two varaibles, then the value of lambda_monotonous controls monotonicity.
The greater lambda_monotonous is, the more likely that the tranformation is monotone.
If method='strict', such as the first varaible, the tranformation would be guaranteed to be monotone.
```
plt.subplot(1,3,1)
plt.plot(x1_test,x1_trans,'o',alpha=0.2)
plt.subplot(1,3,2)
plt.plot(x2_test,x2_trans,'o',alpha=0.2)
plt.subplot(1,3,3)
plt.plot(x3_test,x3_trans,'o',alpha=0.2)
plt.show()
easylogistic.PlotComparableHistogram(variable_=pd.Series(x1_trans),lable_=pd.Series(y_test))
easylogistic.PlotComparableHistogram(variable_=pd.Series(x1_test),lable_=pd.Series(y_test))
easylogistic.PlotComparableHistogram(variable_=pd.Series(x2_trans),lable_=pd.Series(y_test))
easylogistic.PlotComparableHistogram(variable_=pd.Series(x2_test),lable_=pd.Series(y_test))
easylogistic.PlotComparableHistogram(variable_=pd.Series(x3_trans),lable_=pd.Series(y_test))
easylogistic.PlotComparableHistogram(variable_=pd.Series(x3_test),lable_=pd.Series(y_test))
```
#### Obtain the test set of original data and transformed data
```
x1_test = np.reshape(x1_test,(len(x1_test),1))
x2_test = np.reshape(x2_test,(len(x2_test),1))
x3_test = np.reshape(x3_test,(len(x3_test),1))
x1_trans = np.reshape(x1_trans,(len(x1_trans),1))
x2_trans = np.reshape(x2_trans,(len(x2_trans),1))
x3_trans = np.reshape(x3_trans,(len(x3_trans),1))
X_origin = np.concatenate((x1_test,x2_test,x3_test),axis=1)
X_trans = np.concatenate((x1_trans,x2_trans,x3_trans),axis=1)
```
### Modeling and comparision
Because all the transformed variables are positively correlated with P(y=1), it would be better to use PosLassoClassifierCV of LinearModel.
For the original data, LogisticRegressionCV of sklearn is applied.
```
model1 = easylogistic.PosLassoClassifierCV(beta_mean=True)
model1.fit(X_trans,y_test)
model2 = LogisticRegressionCV(cv=5)
model2.fit(X_origin,y_test)
```
#### Coefficients
```
model1.coef_
model2.coef_[0]
```
#### Prediction
```
y_hat_trans = model1.predict_proba(X_trans)[:,1]
y_hat_origin = model2.predict_proba(X_origin)[:,1]
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test,y_hat_trans)
roc_auc_score(y_test,y_hat_origin)
plt.subplot(1,2,1)
plt.plot(y_hat_trans,y_test,'bo',alpha=0.1)
plt.subplot(1,2,2)
plt.plot(y_hat_origin,y_test,'bo',alpha=0.1)
plt.show()
easylogistic.PlotKS(y_hat_trans,y_test)
easylogistic.PlotKS(y_hat_origin,y_test)
```
## Demonstrate the choice of start point of LinearModel
```
model1 = easylogistic.LassoClassifierCV(beta_mean=True,start_point='ridge')
model1.fit(X_origin,y_test)
model2 = easylogistic.LassoClassifierCV(beta_mean=True,start_point=None)
model2.fit(X_origin,y_test)
y_hat_1 = model1.predict_proba(X_origin)[:,1]
y_hat_2 = model2.predict_proba(X_origin)[:,1]
```
#### Ridge estimator as the start point
```
roc_auc_score(y_test,y_hat_1)
model1.plotLoss()
```
#### Random normal as the start point
```
roc_auc_score(y_test,y_hat_2)
model2.plotLoss()
```
## Demonstrate the choice of Q0 and Q1 of LinearModel
```
model1 = easylogistic.LassoClassifierCV(beta_mean=True,Q0=0.95,Q1=0.95)
model1.fit(X_origin,y_test)
model2 = easylogistic.LassoClassifierCV(beta_mean=True,Q0=1,Q1=1)
model2.fit(X_origin,y_test)
y_hat_1 = model1.predict_proba(X_origin)[:,1]
y_hat_2 = model2.predict_proba(X_origin)[:,1]
```
#### Q0 and Q1 less than 1
which means the largest (1-Q0)*100% elements with label Y=0 and (1-Q1)*100% elements with label Y=1, before taking the mean
```
roc_auc_score(y_test,y_hat_1)
model1.plotLoss()
```
#### Q0 and Q1 equal to 1
```
roc_auc_score(y_test,y_hat_2)
model2.plotLoss()
```
| github_jupyter |
```
import os
import io
import detectron2
# import some common detectron2 utilities
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
# import some common libraries
import numpy as np
import cv2
import torch
# Show the image in ipynb
from IPython.display import clear_output, Image, display
import PIL.Image
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
cfg = get_cfg()
cfg.merge_from_file("../configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml")
cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300
cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.6
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2
cfg.MODEL.RPN.NMS_THRESH = 0.7
cfg.MODEL.WEIGHTS = "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_C4_3x/137849393/model_final_f97cb7.pkl"
predictor = DefaultPredictor(cfg)
im = cv2.imread("data/images/input.jpg")
im_rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
showarray(im_rgb)
NUM_OBJECTS = 100
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs, fast_rcnn_inference_single_image
def doit(raw_image):
with torch.no_grad():
raw_height, raw_width = raw_image.shape[:2]
print("Original image size: ", (raw_height, raw_width))
# Preprocessing
image = predictor.transform_gen.get_transform(raw_image).apply_image(raw_image)
print("Transformed image size: ", image.shape[:2])
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = [{"image": image, "height": raw_height, "width": raw_width}]
images = predictor.model.preprocess_image(inputs)
# Run Backbone Res1-Res4
features = predictor.model.backbone(images.tensor)
# Generate proposals with RPN
proposals, _ = predictor.model.proposal_generator(images, features, None)
proposal = proposals[0]
print('Proposal Boxes size:', proposal.proposal_boxes.tensor.shape)
# Run RoI head for each proposal (RoI Pooling + Res5)
proposal_boxes = [x.proposal_boxes for x in proposals]
features = [features[f] for f in predictor.model.roi_heads.in_features]
box_features = predictor.model.roi_heads._shared_roi_transform(
features, proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
print('Pooled features size:', feature_pooled.shape)
# Predict classes and boxes for each proposal.
pred_class_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(feature_pooled)
outputs = FastRCNNOutputs(
predictor.model.roi_heads.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
predictor.model.roi_heads.smooth_l1_beta,
)
probs = outputs.predict_probs()[0]
boxes = outputs.predict_boxes()[0]
# Note: BUTD uses raw RoI predictions,
# we use the predicted boxes instead.
# boxes = proposal_boxes[0].tensor
# NMS
for nms_thresh in np.arange(0.5, 1.0, 0.1):
instances, ids = fast_rcnn_inference_single_image(
boxes, probs, image.shape[1:],
score_thresh=0.2, nms_thresh=nms_thresh, topk_per_image=NUM_OBJECTS
)
if len(ids) == NUM_OBJECTS:
break
instances = detector_postprocess(instances, raw_height, raw_width)
roi_features = feature_pooled[ids].detach()
print(instances)
return instances, roi_features
instances, features = doit(im)
print(instances.pred_boxes)
print(instances.scores)
print(instances.pred_classes)
# Show the boxes, labels, and features
pred = instances.to('cpu')
v = Visualizer(im[:, :, :], MetadataCatalog.get("coco_2014_train"), scale=1.2)
v = v.draw_instance_predictions(pred)
showarray(v.get_image()[:, :, ::-1])
print('instances:\n', instances)
print()
print('boxes:\n', instances.pred_boxes)
print()
print('Shape of features:\n', features.shape)
```
| github_jupyter |
# 統計学の簡単な復習
If you come here without expecting Japanese, please click [Google translated version](https://translate.google.com/translate?hl=&sl=ja&tl=en&u=https%3A%2F%2Fpy4etrics.github.io%2F7_Review_of_Statistics.html) in English or the language of your choice.
---
## 確率変数
**定義**
確率変数(random variables)とは,無作為のプロセスの結果として実数をとる変数であり,実現し観察されるまで値が未知の変数である。実現した値を実現値もしくは観察値と呼ぶ。次の記号を使って例を考える。
* $X$:確率変数自体を示す記号
* $x$:実現値
例1(離散型確率変数)
* サイコロの目:$X$
* 実現可能な値の集合:$x\in\{1, 2, 3, 4, 5, 6\}$
* 実現値:$x=3$
例2(連続型確率変数)
* ランダムに選んだ経済学部の学生の身長:$X$
* 実現可能な値の集合:$\{x\;|\;0< x<\infty\}$
* 実現値:175.920483.....cm
---
**確率変数のある値が発生する確率**
* 例1:サイコロ
* $X=3$の確率は確率質量関数 $f(3)=1/6$で表される。
* 実現可能な値の確率の合計=1,即ち $\displaystyle\sum_{x=1}^6f(x)=1$
* 例2:ランダムに選んだ経済学部の学生の身長
* $X=175.92$の確率は確率密度関数 $f(175.92)=0.0204$で表される。
* 実現可能な値の確率の合計=1,即ち $\displaystyle\int_0^{\infty}f(x)dx=1$
## 確率変数の特徴を示す尺度
(以下で使う記号)
$X,Y$:確率変数
$x,y$:確率変数の実現値
---
**期待値(expected value)= 平均(average or mean)**
$\text{E}(X)=\mu_X$
(性質)
* $\text{E}(X)\gtreqqless 0$
* $\text{E}(aX)=a\text{E}(X)$
* $\text{E}(X+Y)=\text{E}(X)+\text{E}(Y)$
* $\text{E}(XY)=\text{E}(X)\cdot\text{E}(Y)+\text{Cov}(X,Y)$
* $X$の単位に依存
---
**分散(variance)**
$\sigma_X^2\equiv\text{Var}(X)\equiv\text{E}\left[(X-\mu_X)^2\right]=\text{E}\left[X^2\right]-\mu_X^2$
(性質)
* $\text{Var}(X)\geq 0$
* $\text{Var}(X+a)=\text{Var}(X)$
* $\text{Var}(aX)=a^2\text{Var}(X)$
* $\text{Var}(aX+bY)=a^2\text{Var}(X)+b^2\text{Var}(Y)+2ab\cdot\text{Cov}(X,Y)$
* $\text{Var}(aX-bY)=a^2\text{Var}(X)+b^2\text{Var}(Y)-2ab\cdot\text{Cov}(X,Y)$
* $X$の単位に依存
---
**標準偏差(standard deviation)**
$\sigma_X\equiv\sqrt{\text{Var}(X)}$
(性質)
* $X$の単位に依存
---
**共分散(covariance)**
$\sigma_{XY}\equiv\text{Cov}(X,Y)=\text{E}\left[(X-\mu_X)(Y-\mu_Y)\right]$
(性質)
* $\text{Cov}(X,Y)\lesseqqgtr 0$
* $\text{Cov}(X,X)=\text{Var}(X)$
* $X$と$Y$の単位に依存
---
**相関係数(correlation coefficient)**
$\rho_{XY}\equiv\text{Corr}(X,Y)=\dfrac{\sigma_{XY}}{\sigma_X\cdot\sigma_Y}
=\dfrac{\text{Cov}(X,Y)}{\sqrt{\text{Var}(X)\cdot\text{Var}(Y)}}$
(性質)
* $-1\leq\rho_{XY}\leq 1$
* $X$と$Y$の単位に依存しない
## 正規分布(Normal Distribution)
別名:ガウス分布(Gausian Distribution)
### 確率密度関数と累積分布関数
**確率密度関数**
$$
\phi(x)=\dfrac{1}{\sqrt{2\pi\sigma_X}}e^{-\frac{1}{2}\left(\frac{x-\mu_X}{\sigma_X}\right)^2}
$$
* 2つのパラメータ:平均($\mu_X$)と分散($\sigma_X^2$)
* 左右対称
* 「$X$は平均$\mu_X$,分散$\sigma_X^2$の正規分布に従う」を記号で表現
$$X\sim N\left(\mu_X,\sigma_X^2\right)$$
**確率分布関数**
$$F(x)=\int_{-\infty}^x\phi(s)ds$$
### 標準正規分布
正規分布の変数$X$を次式
$$
Z=\dfrac{X-\mu_X}{\sigma_X}
$$
で変換すると,$Z$の分布は標準正規分布に従う。
$$Z\sim N(0,1)$$
### 多変量正規分布
* 2つの確率変数$X_1$と$X_2$を考えよう。
$$
X_1\sim N\left(\mu_1,\sigma_1^2\right),\qquad\qquad
X_2\sim N\left(\mu_2,\sigma_2^2\right)
$$
* この表記からは2つの確率変数に何らかの関係性が存在するかどうか不明であるが,通常この場合,$X_1$と$X_2$は「独立」と受け取られる。
* この2つの変数に「何らかの関係性」を明確にするために,次のようにまとめて書く:
$$
\begin{bmatrix}
X_1\\X_2
\end{bmatrix}
\sim
N\left(
\begin{bmatrix}
\mu_1\\ \mu_2
\end{bmatrix}
,
\begin{bmatrix}
\sigma_1^2,&\sigma_{12}\\
\sigma_{21},& \sigma_2^2
\end{bmatrix}
\right)
$$
もしくは
$$
X\sim N\left(\mu_X,\Sigma_X\right)
$$
* $X$:確率変数のベクトル $\left(X_1,X_2\right)^T$($T$は「置換する」という意味で,列ベクトルにしている)
* $\mu_X$:平均のベクトル $\left(\mu_1,\mu_2\right)^T$
* $\Sigma_X$:分散共分散行列
$$
\Sigma_X=
\begin{pmatrix}
\sigma_1^2,&\sigma_{12}\\
\sigma_{21},& \sigma_2^2
\end{pmatrix}
$$
* $\sigma_{12}=\sigma_{21}$は$X_1$と$X_2$の共分散
* 上で「何らかの関連性」と書いたが,それを$\sigma_{12}$が捉えている。
(共分散の解釈)
* $\sigma_{12}=0$:$X_1$と$X_2$は独立であり何の関係もない。即ち,
$X_1\sim N\left(\mu_1,\sigma_1^2\right),\;X_2\sim N\left(\mu_2,\sigma_2^2\right)$
と別々に書いて何の問題もない。
* $\sigma_{12}>0$:$X_1$と$X_2$は「同じ方向」の値が抽出される傾向にある。例えば,両辺数ともプラスの値,もしくはマイナスの値。$\sigma_{12}$が大きくなれば,その傾向はより強くなる。(注意:これは傾向であり,必ずそうはならない)
* $\sigma_{12}<0$:$X_1$と$X_2$は「逆方向」の値が抽出される傾向にある。例えば,$X_1$はプラスの値で$X_2$はマイナスの値,もしくはその反対。$\sigma_{12}$の絶対値が大きくなれば,その傾向はより強くなる。(注意:これは傾向であり,必ずそうはならない)
## 標本の特徴を示す数値的尺度
母集団から標本を無作為に1つデータを抽出するとしよう。その場合,
* 母集団 = 実現可能な値の集合
* 抽出するデータ = 確率変数
* 抽出後の値 = 実現値
この場合,標本の大きさは母集団より小さい(母集団の大きさが2以上と仮定)。
上では1つのデータだけを抽出を考えたが,通常実証分析では複数のデータを扱い,データの種類によって母集団の大きさと標本の大きさを以下のように解釈することが可能である。
* 時系列データ
* 時間は無限に続くため,無限の母集団からの標本抽出 $\Rightarrow$ 標本の大きさは母集団より小さい。
* 横断面データ
* 多くの場合,費用対効果から母集団から一部を標本を収集する $\Rightarrow$ 標本の大きさは母集団より小さい。
* 母集団の大きさが小さい場合,標本の大きさは母集団の大きさと「等しい」ケースがある。
* 例えば,2018年神戸大学経済学部の中級マクロ経済学I定期試験の点数の場合,約300のデータ。
* この場合でも,標本の大きさは母集団より小さいと考えることができる。
* ある学生$i$さんの点数は確率変数と解釈できる。その場合,実現可能な値の集合(小数点は無視)は
$\left\{0,1,2,3,....,97,98,99,100\right\}$であり,点数の種類は101ある。この中なら1つの値だけが実現値として観察されている。更に,約300名の学生が試験を受けたので,母集団の大きさは約$101\times 300=20200$となる。
>**「標本の大きさは母集団より小さい」の含意**
>
>母集団のパラメータを推定するための標本の統計量には必ず**誤差**が存在する。
```{note}
標本のそれぞれの観測値が,同じ母集団から独立に(他の観測値との何の関連性もなく)抽出された場合,それらは**独立同一分布**(Idependently Identically Distributed; 略して IID)に従うという。
```
---
以下で使う記号
* 標本の大きさ:$n$
* $i$番目の確率変数:$X_i$
---
**標本平均(sample mean)**
* 確率変数の標本平均:$\bar{X}=\dfrac{1}{n}\displaystyle\sum_{i=1}^nX_i$
* 標本平均の実現値:$\bar{x}=\dfrac{1}{n}\displaystyle\sum_{i=1}^nx_i$
(特徴)
* $\bar{X}$は母集団平均の不偏推定量
$$\text{E}(\bar{X})=\mu_X$$
* $\bar{x}$はその推定値。
* $X_i$がIIDの場合の$\bar{X}$の分散
$$
\text{Var}(\bar{X})=\dfrac{1}{n}\sigma_{X}^2
$$
* $n\rightarrow\infty\;\Rightarrow\;\text{Var}(\bar{X})=0$
---
**標本分散(sample variance)**
* 確率変数の標本分散:$\hat{\sigma}_X^2=\dfrac{1}{n-1}\displaystyle\sum_{i=1}^n\left(X_i-\bar{X}\right)^2$
* 標本分散の実現値:$\hat{\sigma}_x^2=\dfrac{1}{n-1}\displaystyle\sum_{i=1}^n\left(x_i-\bar{x}\right)^2$
(特徴)
* $\hat{\sigma}_X^2$は母集団分散の不偏推定量
* $\hat{\sigma}_x^2$はその推定値
(注意)
* 分母は $n-1$であり,これにより$\hat{\sigma}_X^2$は母集団分散の不偏推定量となる。
---
**標本平均の分散**
* 確率変数の標本平均の分散$\text{Var}(\bar{X})=\dfrac{1}{n}\sigma_X^2$にある$\sigma_X^2$は母集団の分散であり観測不可能。従って,推定する必要がある。その推定量として$\hat{\sigma}_X$を使う。
$$
\widehat{\text{Var}(\bar{X})}=\frac{1}{n}\hat{\sigma}_X^2
$$
* 以下を標準誤差と呼ぶ
$$
\text{SE}(\bar{X})=\sqrt{\widehat{\text{Var}(\bar{X})}}=\frac{\hat{\sigma}_X}{\sqrt{n}}
$$
* 母集団平均の推定量$\bar{X}$には誤差があり,その正確性を示す。
---
**標本標準偏差**
* 確率変数の標本標準偏差:$\hat{\sigma}_X$
* 標本標準偏差の実現値:$\hat{\sigma}_x$
(注意)
* $\hat{\sigma}_X$は母集団標準偏差の不偏推定量では**ない**(直感的に、$\hat{\sigma}_X^2$のルートは非線形変換になるため)
---
**標本共分散**
* 確率変数の共分散
$$
\hat{\sigma}_{XY}=\frac{1}{n-1}\sum_{i=1}^{n}\left(X_i-\bar{X}\right)\left(Y_i-\bar{Y}\right)
$$
* 共分散の実現値
$$
\hat{\sigma}_{xy}=\frac{1}{n-1}\sum_{i=1}^{n}\left(x_i-\bar{x}\right)\left(y_i-\bar{y}\right)
$$
(注意)
* 分母は $n-1$であり,これにより$\hat{\sigma}_X$は母集団共分散の不偏推定量となる。
---
**標本相関係数**
* 確率変数の相関係数
$$r_{XY}=\dfrac{\hat{\sigma}_{XY}}{\hat{\sigma}_X\cdot\hat{\sigma}_Y}$$
* 相関係数の実現値
$$r_{xy}=\dfrac{\hat{\sigma}_{xy}}{\hat{\sigma}_x\cdot\hat{\sigma}_y}$$
(注意)
* $r_{XY}$は母集団相関係数の不偏推定量では**ない**(直感的に、不偏推定量の非線形変換になるため)
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.