code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data Cleaning of the 'dsm-beuth-edl-demodata-dirty' data set
import pandas as pd
# read the remote csv file which to clean in the following parts
df = pd.read_csv(
'https://raw.githubusercontent.com/edlich/eternalrepo/master/DS-WAHLFACH/dsm-beuth-edl-demodata-dirty.csv',
)
df
# ### Step #1: Calculate a summary on missing values
# First of all, we will analyze the data set for missing values which result in NaN values
df.isnull()
# The Number of NaN occurrences per column are as follows:
nans_per_column = df.isnull().sum()
nans_per_column
# Hence, we have a total of NaN occurrences:
total_nans = 0
for index, row in nans_per_column.iteritems():
total_nans += row
total_nans
# ## Task 2: Cleaning values
# ### Task 2.1: Clean rows that contain NaN values
# Rows which contain just NaN values can be removed:
df2 = df.dropna()
df2
# ### Task 2.2: Clean ages given as text
# Some data rows might have words set as ages instead of numbers, which are invalid, because we can't determine the real age. Hence, we're going to filter out those rows as follows:
# +
import re
REGEX = re.compile('^[-+]?([0-9]+)$') # regular expression declaring numbers (negative AND positive)
df3 = df2
df3 = df3[df3['age'].astype(str).str.match(REGEX)]
df3
# -
# ### Task 2.3: Clean invalid ages
# Some ages are not reliable, like people who already have an email address but are under 6 years old. To make this data set more trusful, we're going to define a minimum age of 10 years which schould be plausible. Furthmore, some given ages might have negative values which should be "normalized" by multipliying them by -1.
df4 = df3[df3.notnull()].copy()
df4.loc[:, 'age'] = df3.age.astype(int) # set all ages as data type integer for better cleaning
df4['age'] = df4['age'].apply(lambda x: x*(-1) if x<0 else x)
df4
# ### Task 2.4: Clean duplicates
# Some persons whith the same name and the same e-mail address exist multiple times, hence all duplicates can be removed.
df5 = df4
df5.drop_duplicates(subset=['full_name', 'first_name', 'last_name', 'email'], keep='first')
# ### Task 2.5: Drop unneccessary columns
# The column 'id' is not neccessary any longer, because it's values has some gaps which means it's values are not continuously incremented because of the cleaning process. But 'pandas' already already created it's own index column, so we might get rid of the previous broken id column:
df6 = df5.drop(columns=['id'], axis=1)
df6
| assessment_06/clean_demodata-dirty.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Índice<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Justificativa" data-toc-modified-id="Justificativa-1"><span class="toc-item-num">1 </span>Justificativa</a></span></li><li><span><a href="#Testes-estatísticos" data-toc-modified-id="Testes-estatísticos-2"><span class="toc-item-num">2 </span>Testes estatísticos</a></span><ul class="toc-item"><li><span><a href="#Cohen's-D" data-toc-modified-id="Cohen's-D-2.1"><span class="toc-item-num">2.1 </span>Cohen's D</a></span></li></ul></li><li><span><a href="#Tentando-inferir-causalidade" data-toc-modified-id="Tentando-inferir-causalidade-3"><span class="toc-item-num">3 </span>Tentando inferir causalidade</a></span><ul class="toc-item"><li><span><a href="#Regressões-lineares" data-toc-modified-id="Regressões-lineares-3.1"><span class="toc-item-num">3.1 </span>Regressões lineares</a></span></li><li><span><a href="#Testes-pareados" data-toc-modified-id="Testes-pareados-3.2"><span class="toc-item-num">3.2 </span>Testes pareados</a></span></li></ul></li></ul></div>
# -
# # Justificativa
#
# A literatura indica que o fator mais importante para o desempenho das escolas é o nível sócio econômico dos alunos. Estamos pressupondo que escolas próximas possuem alunos de nível sócio econômico próximo, mas isso precisa ser testado. Usei os dados do [INSE](http://portal.inep.gov.br/web/guest/indicadores-educacionais) para medir qual era o nível sócio econômico dos alunos de cada escola em 2015.
# +
import sen_plots as sen
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind as ttest
# -
inse = pd.read_excel("INSE_2015.xlsx")
inse.rename(columns={"CO_ESCOLA" : "cod_inep"}, inplace=True)
inse.head()
#importa os 3 datasets que eu quero comparar o INSE
ruim_pra_bom = pd.read_csv("referencias_ruim_bom.csv")
pessimo_pra_bom = pd.read_csv("referencias_pessimo_bom.csv")
risco = pd.read_csv("risco_map.csv")
ruim_pra_bom["tipo_especifico"] = "Ruim para bom"
pessimo_pra_bom["tipo_especifico"] = "Muito ruim para bom"
risco["tipo_especifico"] = "Desempenho abaixo\ndo esperado"
referencias = ruim_pra_bom.append(pessimo_pra_bom)
referencias.head()
referencias = pd.merge(referencias, inse[["cod_inep", "NOME_ESCOLA", "INSE_VALOR_ABSOLUTO", "INSE_CLASSIFICACAO"]], how = "left", on = "cod_inep")
risco = pd.merge(risco, inse[["cod_inep", "NOME_ESCOLA", "INSE_VALOR_ABSOLUTO", "INSE_CLASSIFICACAO"]], how="left", on="cod_inep")
referencias.INSE_VALOR_ABSOLUTO.describe()
risco.INSE_VALOR_ABSOLUTO.describe()
risco["tipo"] = "Escolas com desempenho abaixo do esperado"
referencias["tipo"] = "Escolas-referência"
df = risco.append(referencias)
df.to_csv("risco_referencia_inse.csv", index = False)
# +
df = pd.read_csv("risco_referencia_inse.csv")
sen.sen_boxplot(x = "tipo", y = "INSE_VALOR_ABSOLUTO", y_label = "INSE (2015) médio da escola", x_label = " ",
plot_title = "Comparação do nível sócio-econômico das escolas selecionadas",
palette = {"Escolas com desempenho abaixo do esperado" : "indianred",
"Escolas-referência" : "skyblue"},
data = df, output_path = "inse_op1.png")
# +
df = pd.read_csv("risco_referencia_inse.csv")
sen.sen_boxplot(x = "tipo_especifico", y = "INSE_VALOR_ABSOLUTO", y_label = "INSE (2015) médio da escola", x_label = " ",
plot_title = "Comparação do nível sócio-econômico das escolas selecionadas",
palette = {"Desempenho abaixo\ndo esperado" : "indianred",
"Ruim para bom" : "skyblue",
"Muito ruim para bom" : "lightblue"},
data = df, output_path = "inse_op2.png")
# -
# # Testes estatísticos
# ## Cohen's D
#
# Minha métrica preferida de tamanho de efeito é o Cohen's D, mas aparentemente não tem nenhuma implementação canônica dele. Vou usar a que eu encontrei [nesse site](https://machinelearningmastery.com/effect-size-measures-in-python/).
# +
from numpy.random import randn
from numpy.random import seed
from numpy import mean
from numpy import var
from math import sqrt
# function to calculate Cohen's d for independent samples
def cohend(d1, d2):
# calculate the size of samples
n1, n2 = len(d1), len(d2)
# calculate the variance of the samples
s1, s2 = var(d1, ddof=1), var(d2, ddof=1)
# calculate the pooled standard deviation
s = sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))
# calculate the means of the samples
u1, u2 = mean(d1), mean(d2)
# calculate the effect size
return (u1 - u2) / s
# -
# Todas as escolas referência vs. escolas risco
ttest(risco["INSE_VALOR_ABSOLUTO"], referencias["INSE_VALOR_ABSOLUTO"], nan_policy="omit")
cohend(referencias["INSE_VALOR_ABSOLUTO"], risco["INSE_VALOR_ABSOLUTO"])
# Só as escolas muito ruim pra bom vs. escolas risco
ttest(risco["INSE_VALOR_ABSOLUTO"], referencias.query("tipo_especifico == 'Muito ruim para bom'")["INSE_VALOR_ABSOLUTO"], nan_policy="omit")
cohend(referencias.query("tipo_especifico == 'Muito ruim para bom'")["INSE_VALOR_ABSOLUTO"], risco["INSE_VALOR_ABSOLUTO"])
# # Tentando inferir causalidade
#
# Sabemos que existe uma diferença significativa entre os níveis sócio econômicos dos 2 grupos. Mas até que ponto essa diferença no INSE é capaz de explicar a diferença no IDEB? Será que resta algum efeito que pode ser atribuído às práticas de gestão? Esses testes buscam encontrar uma resposta para essa pergunta.
# ## Regressões lineares
#pega a nota do IDEB pra servir de DV
ideb = pd.read_csv("./pr-educacao/data/output/ideb_merged_kepler.csv")
ideb["ano_true"] = ideb["ano"].apply(lambda x: int(x[0:4]))
ideb = ideb.query("ano_true == 2017").copy()
nota_ideb = ideb[["cod_inep", "ideb"]]
df = pd.merge(df, nota_ideb, how = "left", on = "cod_inep")
df.dropna(subset=["INSE_VALOR_ABSOLUTO"], inplace = True)
df["tipo_bin"] = np.where(df["tipo"] == "Escolas-referência", 1, 0)
# +
from statsmodels.regression.linear_model import OLS as ols_py
from statsmodels.tools.tools import add_constant
ivs_multi = add_constant(df[["tipo_bin", "INSE_VALOR_ABSOLUTO"]])
modelo_multi = ols_py(df[["ideb"]], ivs_multi).fit()
print(modelo_multi.summary())
# -
# O problema de fazer a regressão da maneira como eu coloquei acima é que tipo_bin foi criada parcialmente em função do IDEB (ver histogramas abaixo), então não é uma variável verdadeiramente independente. Talvez uma estratégia seja comparar modelos simples só com INSE e só com tipo_bin.
df.ideb.hist()
df.query("tipo_bin == 0").ideb.hist()
df.query("tipo_bin == 1").ideb.hist()
# +
#correlação simples
from scipy.stats import pearsonr
pearsonr(df[["ideb"]], df[["INSE_VALOR_ABSOLUTO"]])
# +
iv_inse = add_constant(df[["INSE_VALOR_ABSOLUTO"]])
iv_ideb = add_constant(df[["tipo_bin"]])
modelo_inse = ols_py(df[["ideb"]], iv_inse).fit()
modelo_tipo = ols_py(df[["ideb"]], iv_ideb).fit()
print(modelo_inse.summary())
print("-----------------------------------------------------------")
print(modelo_tipo.summary())
# -
# ## Testes pareados
#
# Nossa unidade de observação, na verdade, não deveria ser uma escola, mas sim um par de escolas. Abaixo, tento fazer as análises levando em consideração o delta de INSE e o delta de IDEB para cada par de escolas. Isso é importante: sabemos que o INSE faz a diferença no IDEB geral, mas a pergunta é se ele consegue explicar as diferenças na performance dentro de cada par.
pairs = pd.read_csv("sponsors_mais_proximos.csv")
pairs.head()
pairs.shape
# +
inse_risco = inse[["cod_inep", "INSE_VALOR_ABSOLUTO"]]
inse_risco.columns = ["cod_inep_risco","inse_risco"]
inse_ref = inse[["cod_inep", "INSE_VALOR_ABSOLUTO"]]
inse_ref.columns = ["cod_inep_referencia","inse_referencia"]
# -
pairs = pd.merge(pairs, inse_risco, how = "left", on = "cod_inep_risco")
pairs = pd.merge(pairs, inse_ref, how = "left", on = "cod_inep_referencia")
#calcula os deltas
pairs["delta_inse"] = pairs["inse_referencia"] - pairs["inse_risco"]
pairs["delta_ideb"] = pairs["ideb_referencia"] - pairs["ideb_risco"]
pairs["delta_inse"].describe()
pairs["delta_inse"].hist()
pairs["delta_ideb"].describe()
pairs["delta_ideb"].hist()
pairs[pairs["delta_inse"].isnull()]
clean_pairs = pairs.dropna(subset = ["delta_inse"])
# +
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize = sen.aspect_ratio_locker([16, 9], 0.6))
inse_plot = sns.regplot("delta_inse", "delta_ideb", data = clean_pairs)
plt.title("Correlação entre as diferenças do IDEB (2017) e do INSE (2015)\npara cada par de escolas mais próximas")
plt.xlabel("$INSE_{referência} - INSE_{desempenho\,abaixo\,do\,esperado}$", fontsize = 12)
plt.ylabel("$IDEB_{referência} - IDEB_{desempenh\,abaixo\,do\,esperado}$", fontsize = 12)
inse_plot.get_figure().savefig("delta_inse.png", dpi = 600)
# -
pearsonr(clean_pairs[["delta_ideb"]], clean_pairs[["delta_inse"]])
# +
X = add_constant(clean_pairs[["delta_inse"]])
modelo_pairs = ols_py(clean_pairs[["delta_ideb"]], X).fit()
print(modelo_pairs.summary())
# -
# Testando a assumption de que distância física se correlaciona com distância de INSE
pairs.head()
sns.regplot("distancia", "delta_inse", data = clean_pairs.query("distancia < 4000"))
# +
multi_iv = add_constant(clean_pairs[["distancia", "delta_inse"]])
modelo_ze = ols_py(clean_pairs[["delta_ideb"]], multi_iv).fit()
print(modelo_ze.summary())
| notebooks/base codes/2_Nível sócio econômico dos pares de escolas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.0
# language: julia
# name: julia-1.0
# ---
using SpecialFunctions, LinearAlgebra, PyPlot
pwd()
X=imread("Hello.png");
A=float(X[:,:,1]);
size(A)
maximum(A)
U,s,V = svd(A);
semilogy(s,"o")
Ak(k) = U[:,1:k]*Diagonal(s[1:k])*V[:,1:k]'
imshow(Ak(8),cmap="gray")
norm(A-Ak(8))/norm(A)
(norm(A)-norm(Ak(8)))/norm(A)
m = 1000
f = x->1/x
g = [ exp, sin, gamma ]
x = 1 .+ (0:m-1)/(m-1)
A = [ g[j](x[i]) for i=1:m, j=1:3 ]
size(A)
plot(x,A)
b = f.(x)
c = A\b
plot(x,A*c-b)
m = 1000
f = x->1/x
g = [ exp, sin, gamma ]
x = (1:m)/(m)
A = [ g[j](x[i]) for i=1:m, j=1:3 ]
b = f.(x)
c = A\b
plot(x,b-A*c)
0.6261437336323719
-1.8182329520772051
| daily/daily09-25.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 1: Stock Data Analyzation and Preprocessing
# This jupyter notebook is the first part of the series of notebooks required.
# It deals with data collection, filtering and cleaning techniques using the Pandas and Numpy libraries of Python Programming language.
# #### NOTE: If you want to directly skip to part 4: Modelling building and Evaluation please skip to the 3rd Notebook.
# All the stock data found in this project has been directly retreived from finance.yahoo.com
# Please make sure you clone the entire library and all the path variables are rectified to avoid errors.
# ### Walkthrough:
# ##### 1. Importing the libraries, and reading the dataset(.csv):
# Importing the pandas and numpy library. Scikit-learn is not required in this part of the series.
# ##### 2. Clearing all the columns with Null Values:
# This is important as the stock exchange is closed during public holidays, and there might also be days when a certain column data wasnt fed into the dataset at Yahoo Finance as they are'nt 100% reliable. We can do this using Numpy and Pandas.
# ##### 3. Correcting the data type for the date values:
# It is important to convert the dates to a datetime variable and normalize it to 00:00:00 UTC to keep things simple and free from complications. This can be done using the to_datetime function in Pandas.
# ##### 4. Sorting and Exporting:
# The final section in this notebook involves exporting our data after we have sorted them according to the increasing order of our dates.
#
#
# +
# Import libraries
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import plot_roc_curve
from sklearn.metrics import accuracy_score, classification_report
# -
# ### Reading the CSV data from Yahoo Finance
data = pd.read_csv('BSESN.csv')
data
# ### Clearing all the columns with NULL values or lack of data
#
# This is important as the stock exchange is closed during public holidays, and there might also be days when a certain column data wasnt fed into the dataset at Yahoo Finance as they are'nt 100% reliable. We can do this using Numpy and Pandas.
data = data.dropna()
na_data = np.where(data.isna().any(axis=1))
na_data
data['Volume'] = np.where(data['Volume']==0, np.nan, data['Volume'])
data
print(data.dropna())
#Erase all the once where the volume wasnt given
data = data.dropna()
data
# ### Correcting the data type for the date values
#
# It is important to convert the dates to a datetime variable and normalize it to 00:00:00 UTC to keep things simple and free from complications. This can be done using the to_datetime function in Pandas.
#making the date into a datetime format
data['Date'] = pd.to_datetime(data['Date'])
data
data.info()
# ### Sorting the data using date in ascending order
#
data.sort_values( by = ['Date'])
data["symbol"]="BSESN"
data
data.columns = ['datetime','open', 'high', 'low', 'close','adj','volume','symbol']
data
data= data.drop(['adj'], axis=1)
data
data['change_in_price'] = data['close'].diff()
data
data = data[['symbol','datetime','close','high','low','open','volume', 'change_in_price']]
data
result = data.to_csv('price_data_pre_indicator.csv', index = False)
print(result)
| Part01-stock-data-preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''tectosaur2'': conda)'
# language: python
# name: python3
# ---
import numpy as np
import sympy as sp
import scipy.spatial
from tectosaur2.mesh import build_interp_matrix, gauss_rule, refine_surfaces, concat_meshes, build_interpolator
qx, qw = gauss_rule(6)
# +
sp_t = sp.var('t')
def sp_line(start, end):
t01 = (sp_t + 1) * 0.5
xv = start[0] + t01 * (end[0] - start[0])
yv = start[1] + t01 * (end[1] - start[1])
return sp_t, xv, yv
A, B = refine_surfaces(
[
sp_line([0, 0], [10, -1]),
sp_line([0, 0], [13, 0]),
],
(qx, qw),
control_points=[(0, 0, 1.0, 1.0)],
)
# +
obs_pts = A.pts
safety_mode = True
combined_src = concat_meshes((A,B))
singularities= np.array([[0,0]])
if singularities is not None:
singularity_tree = scipy.spatial.KDTree(
np.asarray(singularities, dtype=np.float64)
)
src_tree = scipy.spatial.KDTree(combined_src.pts)
closest_dist, closest_idx = src_tree.query(obs_pts)
closest_panel_length = combined_src.panel_length[
closest_idx // combined_src.panel_order
]
use_qbx = np.ones((obs_pts.shape[0]), dtype=bool)
n_qbx = obs_pts.shape[0]
qbx_obs_pts = obs_pts[use_qbx]
qbx_src_pt_indices = closest_idx[use_qbx]
qbx_closest_pts = combined_src.pts[qbx_src_pt_indices]
qbx_normals = combined_src.normals[qbx_src_pt_indices]
qbx_panel_L = closest_panel_length[use_qbx]
# step 3: find expansion centers
exp_rs = qbx_panel_L * 0.5
direction_dot = (
np.sum(qbx_normals * (qbx_obs_pts - qbx_closest_pts), axis=1) / exp_rs
)
direction = np.sign(direction_dot)
on_surface = np.abs(direction) < 1e-13
# TODO: it would be possible to implement a limit_direction='best'
# option that chooses the side that allows the expansion point to be
# further from the source surfaces and then returns the side used. then,
# external knowledge of the integral equation could be used to handle
# the jump relation and gather the value on the side the user cares
# about
direction[on_surface] = 1.0
for j in range(30):
which_violations = np.zeros(n_qbx, dtype=bool)
exp_centers = (
qbx_obs_pts + direction[:, None] * qbx_normals * exp_rs[:, None]
)
## flip the question. if the distance from a panel center to an expansion center is less than half the panel length then it's
dist_to_nearest_panel, nearest_idx = src_tree.query(exp_centers, k=2)
## TODO: this can be decreased from 4.0 to ~2.0 once the distance to
# nearest panel algorithm is improved.
nearby_surface_ratio = 1.5 if safety_mode else 1.0001
which_violations = dist_to_nearest_panel[
:, 1
] < nearby_surface_ratio * np.abs(exp_rs)
nearest_not_owner = np.where(nearest_idx[:, 0] != qbx_src_pt_indices)[0]
break
# which_violations[nearest_not_owner] = True
# # if singularities is not None:
# # singularity_dist_ratio = 3.0
# # dist_to_singularity, _ = singularity_tree.query(exp_centers)
# # which_violations |= (
# # dist_to_singularity <= singularity_dist_ratio * np.abs(exp_rs)
# # )
# if not which_violations.any():
# break
# exp_rs[which_violations] *= 0.75
# +
from tectosaur2._ext import identify_nearfield_panels
qbx_panel_src_pts = src_tree.query_ball_point(
qbx_obs_pts, (3 + 0.5) * qbx_panel_L, return_sorted=True
)
(
qbx_panels,
qbx_panel_starts,
qbx_panel_obs_pts,
qbx_panel_obs_pt_starts,
) = identify_nearfield_panels(
n_qbx,
qbx_panel_src_pts,
combined_src.n_panels,
combined_src.panel_order,
)
# -
nearby_singularities = singularity_tree.query_ball_point(qbx_obs_pts, 4.5*qbx_panel_L)
nearby_singularities_starts = np.zeros(n_qbx+1, dtype=int)
nearby_singularities_starts[1:] = np.cumsum([len(ns) for ns in nearby_singularities])
interpolator = build_interpolator(combined_src.qx)
Im = build_interp_matrix(interpolator, np.linspace(-1, 1, 1000))
# +
a = np.array([0,0])
b = np.array([1.1,0.1])
c = np.array([1.2,0.05])
R = np.linalg.norm(b-a)
v = (b-a) / R;
d = np.linalg.norm(b-c)
S = np.linalg.norm(c-a)
S**2 + R**2 + 2*R*(a-c).dot(v)
# -
for i in range(n_qbx):
for j in range(30):
violation = False
for p in qbx_panels[:6]:
nq = combined_src.panel_order
panel_pts = combined_src.pts[p * nq : (p + 1) * nq]
interp_pts = Im.dot(panel_pts)
dist = np.linalg.norm(exp_centers[i, None] - interp_pts, axis=1)
if np.min(dist) < 2 * exp_rs[i]:
violation = True
break
nearby_start = nearby_singularities_starts[i]
nearby_end = nearby_singularities_starts[i + 1]
sing_locs = singularities[nearby_singularities[nearby_start:nearby_end]]
sing_dist = np.linalg.norm(exp_centers[i, None] - interp_pts, axis=1)
if np.min(sing_dist) < 3 * exp_rs[i]:
violation=True
print(i, violation, j, exp_rs[i])
if violation:
exp_rs[i] *= 0.75
exp_centers[i] = (
qbx_obs_pts[i]
+ direction[i, None] * qbx_normals[i] * exp_rs[i, None]
)
else:
break
import matplotlib.pyplot as plt
cs = exp_centers
rs = exp_rs
for s in [A,B]:
plt.plot(s.pts[:, 0], s.pts[:, 1], "r-o")
plt.plot(cs[:, 0], cs[:, 1], "k.", markersize=10)
for i in range(cs.shape[0]):
plt.gca().add_patch(plt.Circle(cs[i], rs[i], color="k", fill=False))
plt.axis("scaled")
plt.xlim([-0.02,0.2])
plt.ylim([-0.03,0.02])
plt.show()
| experiments/acute_expansions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Rating Table to Scorecard
# **Authors**: <NAME>
#
# In this tutorial, you are going to learn how to take a rating table from a `Generalized Additive 2 Model`, also known as `GA2M` and transform it into a scorecard.
#
# **There are a few things that you need for this exercise:**
#
# 1. Your DataRobot API Key
# 2. A trained `Generalized Additive 2 Model` (on any project, with any dataset)
# ### Import Libraries
import csv
import pandas as pd
import numpy as np
import datarobot as dr
# ### Connect to DataRobot
dr.Client(token = 'YOUR_API_TOKEN',
endpoint = 'YOUR_ENDPOINT')
# ### Define Project and Model ID's.
#
# The project and model ID's can be found in the url (when you use the UI to navigate). Make sure that the GA2M model does not have **any text features as input**.
pid = 'YOUR_PROJECT_ID'
mid = 'YOUR_MODEL_ID'
# ### Define Functions that download and transform the rating table to scorecard
# +
def download_rating_table(pid, mid):
""" Download the rating table corresponding to the pid and mid
"""
project = dr.Project.get(pid)
rating_tables = rating_tables = project.get_rating_tables()
rating_table = [rt for rt in rating_tables if rt.model_id == mid][0]
filepath = './my_rating_table_' + mid + '.csv'
rating_table.download('./my_rating_table_' + mid + '.csv')
return filepath
def csv_after_emptylines(filepath, bl_group_n=1, dtype=str):
""" Read a .CSV into a Pandas DataFrame, but only after at least one blank line has been skipped.
bl_group_n is the expected number of distinct blocks of blank lines (of any number of rows each) to skip before reading data.
NB: E.g. pd.read_csv(filepath, skiprows=[0, 1, 2]) works if you know the number of rows to be skipped. Use this function if you have a variable / unknown number of filled rows (to be skipped / ignored) before the empty rows.
"""
with open(filepath, newline='') as f:
blank_lines = 0
bl_groups = 0
contents = []
headers = None
r = csv.reader(f)
for i, l in enumerate(r):
if bl_groups < bl_group_n:
if not l:
blank_lines += 1
continue
if blank_lines == 0:
continue
bl_groups += 1
blank_lines = 0
headers = l
continue
contents.append(l)
return pd.DataFrame(data=contents, columns=headers, dtype=dtype)
def csv_until_emptyline(filepath, dtype=str):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
with open(filepath, newline='') as f:
contents = []
r = csv.reader(f)
for i, l in enumerate(r):
if not l:
break
if i == 0:
headers = l
continue
contents.append(l)
return pd.DataFrame(data=contents)
def extract_intercept(filepath):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
Extract intercept value and return it
"""
df = csv_until_emptyline(filepath)
df.rename(columns={df.columns[0]: "raw" }, inplace = True)
df[['name','value']] = df['raw'].str.split(":",expand=True)
intercept = pd.to_numeric(df.loc[df.name == 'Intercept','value'].values[0])
return intercept
def invert_coefficients(intercept, rating_table):
""" Inverting the sign of intercept and all the coefficients - this is to ensure that the high risk people are given low scores
Mathematically, we are modelling log of odds and the riskier profiles have high probability
When we negate the coefficients, it will mean the log of odds of non-risky profiles (- log(p/1-p) = log(1-p/p))
"""
intercept = - intercept
rating_table.loc[:,'Coefficient'] = - rating_table['Coefficient'].astype(float)
return intercept, rating_table
def convert_rating_table_to_scores(intercept, rating_table, min_score=300, max_score=850):
rating_table['Rel_Coefficient'] = rating_table['Coefficient']
baseline = intercept
min_sum_coef = 0
max_sum_coef = 0
for feat in rating_table['Feature Name'].unique():
min_feat_coef = rating_table.loc[rating_table['Feature Name'] == feat]['Coefficient'].min()
print('Minimum coefficient for feature ' + feat + ' ' + str(min_feat_coef))
rating_table.loc[rating_table['Feature Name'] == feat,'Rel_Coefficient'] = rating_table['Coefficient'] - min_feat_coef
baseline += min_feat_coef
min_sum_coef = min_sum_coef + rating_table.loc[rating_table['Feature Name'] == feat]['Rel_Coefficient'].min()
max_sum_coef = max_sum_coef + rating_table.loc[rating_table['Feature Name'] == feat]['Rel_Coefficient'].max()
min_sum_coef = min_sum_coef + baseline
max_sum_coef = max_sum_coef + baseline
rating_table.loc[:,'Variable Score'] = rating_table['Rel_Coefficient']*((max_score-min_score)/(max_sum_coef - min_sum_coef))
baseline_score = (((baseline-min_sum_coef)/(max_sum_coef-min_sum_coef))*(max_score-min_score))+min_score
return baseline_score, rating_table.drop(columns=['Coefficient','Rel_Coefficient'])
def get_scorecard(pid,mid, min_score=300, max_score=850):
""" Download rating table for a particular pid and mid and return scorecard
"""
filepath = download_rating_table(pid,mid)
rating_table_raw = csv_after_emptylines(filepath)
intercept_raw = extract_intercept(filepath)
intercept, rating_table = invert_coefficients(intercept_raw, rating_table_raw)
intercept_score, scorecard = convert_rating_table_to_scores(intercept, rating_table, min_score, max_score)
return intercept_score, scorecard
def get_score_from_prob(prob, min_score, max_score, min_sum_coef, max_sum_coef):
""" Get score for a particular probability and return score using the scorecard metrics - useful for threshold
"""
log_odds = np.log(prob/(1-prob))
score = (((log_odds-min_sum_coef)/(max_sum_coef-min_sum_coef))*(max_score-min_score))+min_score
return score
# -
# ### Create Scorecard and Intercept Score
intercept_score, scorecard = get_scorecard(pid,mid, min_score=300, max_score=850)
print(intercept_score)
scorecard
| Rating Tables/Rating_Table_to_Scorecard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "skip"}
#setup
data_dir='../../Data/Weather'
file_index='BBSBSBSB'
m='SNWD'
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "slide"} variables={"m": "SNWD"}
# ## Reconstruction using top eigen-vectors
# For measurement = {{m}}
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "skip"}
# ## Load the required libraries
# + run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "skip"}
# Enable automiatic reload of libraries
# #%load_ext autoreload
# #%autoreload 2 # means that all modules are reloaded before every command
# + run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "skip"}
# #%matplotlib inline
# %pylab inline
import numpy as np
import findspark
findspark.init()
import sys
sys.path.append('./lib')
from numpy_pack import packArray,unpackArray
from Eigen_decomp import Eigen_decomp
from YearPlotter import YearPlotter
from recon_plot import recon_plot
from import_modules import import_modules,modules
import_modules(modules)
from ipywidgets import interactive,widgets
# + run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "skip"}
from pyspark import SparkContext
#sc.stop()
sc = SparkContext(master="local[3]",pyFiles=['lib/numpy_pack.py','lib/spark_PCA.py','lib/computeStats.py','lib/recon_plot.py','lib/Eigen_decomp.py'])
from pyspark import SparkContext
from pyspark.sql import *
sqlContext = SQLContext(sc)
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "slide"}
# ## Read Statistics File
# + run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "subslide"}
from pickle import load
#read statistics
filename=data_dir+'/STAT_%s.pickle'%file_index
STAT,STAT_Descriptions = load(open(filename,'rb'))
measurements=STAT.keys()
print 'keys from STAT=',measurements
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "slide"}
# ## Read data file into a spark DataFrame
# We focus on the snow-depth records, because the eigen-vectors for them make sense.
# + run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "subslide"}
#read data
filename=data_dir+'/US_Weather_%s.parquet'%file_index
df_in=sqlContext.read.parquet(filename)
#filter in
df=df_in.filter(df_in.measurement==m)
df.show(5)
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "slide"}
# ### Plot Reconstructions
#
# Construct approximations of a time series using the mean and the $k$ top eigen-vectors
# First, we plot the mean and the top $k$ eigenvectors
# + run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "subslide"}
import pylab as plt
fig,axes=plt.subplots(2,1, sharex='col', sharey='row',figsize=(10,6));
k=3
EigVec=np.matrix(STAT[m]['eigvec'][:,:k])
Mean=STAT[m]['Mean']
YearPlotter().plot(Mean,fig,axes[0],label='Mean',title=m+' Mean')
YearPlotter().plot(EigVec,fig,axes[1],title=m+' Eigs',labels=['eig'+str(i+1) for i in range(k)])
# + run_control={"frozen": false, "read_only": false}
v=[np.array(EigVec[:,i]).flatten() for i in range(np.shape(EigVec)[1])]
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "subslide"}
# ### plot the percent of residual variance on average
# + run_control={"frozen": false, "read_only": false}
# x=0 in the graphs below correspond to the fraction of the variance explained by the mean alone
# x=1,2,3,... are the residuals for eig1, eig1+eig2, eig1+eig2+eig3 ...
fig,ax=plt.subplots(1,1);
eigvals=STAT[m]['eigval']; eigvals/=sum(eigvals); cumvar=np.cumsum(eigvals); cumvar=100*np.insert(cumvar,0,0)
ax.plot(cumvar[:10]);
ax.grid();
ax.set_ylabel('Percent of variance explained')
ax.set_xlabel('number of eigenvectors')
ax.set_title('Percent of variance explained');
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "slide"}
# ## Process whole dataframe to find best and worse residuals
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "subslide"}
# ### Add to each row in the dataframe a residual values
# Residuals are after subtracting in sequence: the mean, the projection on the first eigen-vector the projection on the second eigen-vector etc.
#
# `decompose(row)` axtracts the series from the row, computes the residuals and constructs a new row that is reassembled into a dataframe.
#
# + code_folding=[] run_control={"frozen": false, "read_only": false}
def decompose(row):
"""compute residual and coefficients for decomposition
:param row: SparkSQL Row that contains the measurements for a particular station, year and measurement.
:returns: the input row with additional information from the eigen-decomposition.
:rtype: SparkSQL Row
Note that Decompose is designed to run inside a spark "map()" command.
Mean and v are sent to the workers as local variables of "Decompose"
"""
Series=np.array(unpackArray(row.vector,np.float16),dtype=np.float64)
recon=Eigen_decomp(None,Series,Mean,v);
total_var,residuals,reductions,coeff=recon.compute_var_explained()
#print coeff
residuals=[float(r) for r in residuals[1]]
coeff=[float(r) for r in coeff[1]]
D=row.asDict()
D['total_var']=float(total_var[1])
D['res_mean']=residuals[0]
for i in range(1,len(residuals)):
D['res_'+str(i)]=residuals[i]
D['coeff_'+str(i)]=coeff[i-1]
return Row(**D)
# + run_control={"frozen": false, "read_only": false}
rdd2=df.rdd.map(decompose)
df2=sqlContext.createDataFrame(rdd2)
row,=df2.take(1)
#filter out vectors for which the mean is a worse approximation than zero.
print 'before filter',df2.count()
df3=df2.filter(df2.res_mean<1)
print 'after filter',df3.count()
# + run_control={"frozen": false, "read_only": false}
# Sort entries by increasing values of ers_3
df3=df3.sort(df3.res_3,ascending=True)
# + code_folding=[] run_control={"frozen": false, "read_only": false}
def plot_decomp(row,Mean,v,fig=None,ax=None,Title=None,interactive=False):
"""Plot a single reconstruction with an informative title
:param row: SparkSQL Row that contains the measurements for a particular station, year and measurement.
:param Mean: The mean vector of all measurements of a given type
:param v: eigen-vectors for the distribution of measurements.
:param fig: a matplotlib figure in which to place the plot
:param ax: a matplotlib axis in which to place the plot
:param Title: A plot title over-ride.
:param interactive: A flag that indicates whether or not this is an interactive plot (widget-driven)
:returns: a plotter returned by recon_plot initialization
:rtype: recon_plot
"""
target=np.array(unpackArray(row.vector,np.float16),dtype=np.float64)
if Title is None:
Title='%s / %d %s'%(row['station'],row['year'],row['measurement'])
eigen_decomp=Eigen_decomp(range(1,366),target,Mean,v)
plotter=recon_plot(eigen_decomp,year_axis=True,fig=fig,ax=ax,interactive=interactive,Title=Title)
return plotter
def plot_recon_grid(rows,column_n=4, row_n=3, figsize=(15,10)):
"""plot a grid of reconstruction plots
:param rows: Data rows (as extracted from the measurements data-frame
:param column_n: number of columns
:param row_n: number of rows
:param figsize: Size of figure
:returns: None
:rtype:
"""
fig,axes=plt.subplots(row_n,column_n, sharex='col', sharey='row',figsize=figsize);
k=0
for i in range(row_n):
for j in range(column_n):
row=rows[k]
k+=1
#_title='%3.2f,r1=%3.2f,r2=%3.2f,r3=%3.2f'\
# %(row['res_mean'],row['res_1'],row['res_2'],row['res_3'])
#print i,j,_title,axes[i,j]
plot_decomp(row,Mean,v,fig=fig,ax=axes[i,j],interactive=False)
return None
# + [markdown] run_control={"frozen": false, "read_only": false}
# #### Different things to try
# The best/worst rows in terms of res_mean,res_1, res_2, res_3
#
# The rows with the highest lowest levels of coeff1, coeff2, coeff3, when the corresponding residue is small.
# + run_control={"frozen": false, "read_only": false}
df4=df3.filter(df3.res_2<0.4).sort(df3.coeff_2)
rows=df4.take(12)
df4.select('coeff_2','res_2').show(4)
# + run_control={"frozen": false, "read_only": false}
plot_recon_grid(rows)
# + run_control={"frozen": false, "read_only": false}
# df3.sort?
# + run_control={"frozen": false, "read_only": false}
df5=df3.filter(df3.res_2<0.4).sort(df3.coeff_2,ascending=False)
rows=df5.take(12)
df5.select('coeff_2','res_2').show(4)
# + run_control={"frozen": false, "read_only": false}
plot_recon_grid(rows)
# + [markdown] run_control={"frozen": false, "read_only": false} slideshow={"slide_type": "slide"}
# ## Interactive plot of reconstruction
#
# Following is an interactive widget which lets you change the coefficients of the eigen-vectors to see the effect on the approximation.
# The initial state of the sliders (in the middle) corresponds to the optimal setting. You can zero a positive coefficient by moving the slider all the way down, zero a negative coefficient by moving it all the way up.
# + run_control={"frozen": false, "read_only": false}
row=rows[0]
target=np.array(unpackArray(row.vector,np.float16),dtype=np.float64)
eigen_decomp=Eigen_decomp(None,target,Mean,v)
total_var,residuals,reductions,coeff=eigen_decomp.compute_var_explained()
res=residuals[1]
print 'residual normalized norm after mean:',res[0]
print 'residual normalized norm after mean + top eigs:',res[1:]
plotter=recon_plot(eigen_decomp,year_axis=True,interactive=True)
display(plotter.get_Interactive())
# + [markdown] run_control={"frozen": false, "read_only": false}
# ### What is the distribution of the residuals and the coefficients?
#
# To answer this question we extract all of the values of `res_3` which is the residual variance after the Mean and the
# first two Eigen-vectors have been subtracted out. We rely here on the fact that `df3` is already sorted according to `res_3`
# + run_control={"frozen": false, "read_only": false}
# A function for plotting the CDF of a given feature
def plot_CDF(feat):
rows=df3.select(feat).sort(feat).collect()
vals=[r[feat] for r in rows]
P=np.arange(0,1,1./(len(vals)))
# vals=[vals[0]]+vals
vals=([vals[0]]+vals)[:-1]
plot(vals,P)
title('cumulative distribution of '+feat)
ylabel('number of instances')
xlabel(feat)
grid()
# + run_control={"frozen": false, "read_only": false}
plot_CDF('res_2')
# + run_control={"frozen": false, "read_only": false}
plot_CDF('coeff_2')
# + run_control={"frozen": false, "read_only": false}
filename=data_dir+'/decon_'+file_index+'_'+m+'.parquet'
# !rm -rf $filename
df3.write.parquet(filename)
# + run_control={"frozen": false, "read_only": false}
# !du -sh $data_dir/*.parquet
# + run_control={"frozen": false, "read_only": false}
| tutorials/spark-da-cse255/009_3_weather_analysis_reconstruction_SNWD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] azdata_cell_guid="1f608a1d-2436-4b48-80d4-5c4d2f8ca7d0"
# # Dynamics 365 Business Central Trouble Shooting Guide (TSG) - Web services
#
# This notebook contains Kusto queries that can help getting to the root cause of an issue with web services for an environment.
#
# Each section in the notebook contains links to relevant documentation from the performance tuning guide [aka.ms/bcperformance](aka.ms/bcperformance), telemetry documentation in [aka.ms/bctelemetry](aka.ms/bctelemetry), as well as Kusto queries that help dive into a specific area.
#
# NB! Some of the signal used in this notebook is only available in newer versions of Business Central, so check the version of your environment if some sections do not return any data. The signal documentation states in which version a given signal was introduced.
#
# **NB!** Telemetry for SOAP endpoints does not emit HTTP status code. So the sections that query for different values of HTTP status will not show results for these requests.
# + [markdown] azdata_cell_guid="f103fae9-cf6d-40f7-9062-11ce50691046"
# ## 1\. Get setup: Load up Python libraries and connect to Application Insights
#
# First you need to set the notebook Kernel to Python3, load the KQLmagic module (did you install it? Install instructions: https://github.com/microsoft/BCTech/tree/master/samples/AppInsights/TroubleShootingGuides) and connect to your Application Insights resource (get appid and appkey from the API access page in the Application Insights portal)
# + azdata_cell_guid="a253fa8e-6ac2-4722-a00a-1c52aedab4ed" tags=[]
# load the KQLmagic module
# %reload_ext Kqlmagic
# + azdata_cell_guid="0a7aed5f-11b3-43bf-b141-ae1a5d7adf3c"
# Connect to the Application Insights API
# #%kql appinsights://appid='<add app id from the Application Insights portal>';appkey='<add API key from the Application Insights portal>'
# %kql appinsights://appid='962fbf96-f15e-4d37-8f91-13cae96f4b3e';appkey='<KEY>'
# + [markdown] azdata_cell_guid="9ef1220c-d9cc-4552-9297-1428efcafb32"
# ## 2\. Define filters
#
# This workbook is designed for troubleshooting a single environment. Please provide values for aadTenantId and environmentName (or use a config file).
# + azdata_cell_guid="0a0785f7-a85e-4ccf-9020-732e1d4c058a" tags=["hide_input"]
# Add values for AAD tenant id, environment name, and extension id here (or use a config file)
# It is possible to leave the value for environment name blank (if you want to analyze across all values of the parameter)
# You can either use configuration file (INI file format) or set filters directly.
# If you specify a config file, then variables set here takes precedence over manually set filter variables
# config file name and directory (full path)
configFile = "c:/tmp/notebook.ini"
# Add AAD tenant id and environment name here
aadTenantId = "MyaaDtenantId"
environmentName = ""
#extensionId = "MyExtensionId"
extensionId = ""
# date filters for the analysis
# use YYYY-MM-DD format for the dates (ISO 8601)
startDate = "2020-11-20"
endDate = "2020-11-24"
# Do not edit this code section
import configparser
config = configparser.ConfigParser()
config.read(configFile)
if bool(config.defaults()):
if config.has_option('DEFAULT', 'aadTenantId'):
aadTenantId = config['DEFAULT']['aadTenantId']
if config.has_option('DEFAULT', 'environmentName'):
environmentName = config['DEFAULT']['environmentName']
if config.has_option('DEFAULT', 'extensionId'):
extensionId = config['DEFAULT']['extensionId']
if config.has_option('DEFAULT', 'startDate'):
startDate = config['DEFAULT']['startDate']
if config.has_option('DEFAULT', 'endDate'):
endDate = config['DEFAULT']['endDate']
print("Using these parameters for the analysis:")
print("----------------------------------------")
print("aadTenantId " + aadTenantId)
print("environmentName " + environmentName)
print("startDate " + startDate)
print("endDate " + endDate)
# + [markdown] azdata_cell_guid="5f9b698d-8a7e-4757-b27d-02f219d6c589"
# # Analyze web service usage
# Now you can run Kusto queries to look for possible root causes for issues about web services.
#
# Either click **Run All** above to run all sections, or scroll down to the type of analysis you want to do and manually run queries
# + [markdown] azdata_cell_guid="2f9c2d0d-df3c-482b-af58-48416a517117"
# ## Incoming Web service requests overview
#
# Performance tuning guide: [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services)
#
# Incoming Web service telemetry docs: [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace)
#
# KQL sample: [https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/WebServiceCalls.kql](https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/WebServiceCalls.kql)
# + azdata_cell_guid="a9e923e9-1d05-4acf-a230-4c5142bc3582" tags=[]
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
| extend category = tostring( customDimensions.category )
| summarize request_count=count() by category, bin(timestamp, 1d)
| render timechart title= 'Number of incoming web service requests by category'
# + azdata_cell_guid="e4e56e1a-ab5d-427a-bc49-747e6ae34a75"
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
extend category = tostring( customDimensions.category )
, executionTimeInSec = toreal(totimespan(customDimensions.serverExecutionTime))/10000 /1000 //the datatype for executionTime is timespan
| summarize count() by executionTime_sec = bin(executionTimeInSec, 10), category
| extend log_count = log10( count_ )
| order by category, executionTime_sec asc
| render columnchart with (ycolumns = log_count, ytitle='log(count)', series = category, title= 'Execution time (in seconds) of incoming ws requests by category' )
# + [markdown] azdata_cell_guid="2f7e604a-0d02-484e-9bcb-a6aa148d5f0b"
# ## Incoming Web service throttling
#
# Operational Limits for Business Central Online:
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#query-limits](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#query-limits)
#
# Telemetry docs:
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace)
#
#
#
# Note that SOAP endpoints do not have http status code in telemetry, so they will show with empty values in that dimension.
# + azdata_cell_guid="9ec9b678-7d66-4758-9101-4e9e5025dfcf" tags=[]
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
| extend httpStatusCode = case(
isempty(customDimensions.httpStatusCode), 'SOAP (no http status available)'
, tostring( customDimensions.httpStatusCode )
)
| summarize count() by bin(timestamp, 1d), httpStatusCode
| render timechart title= 'Number of incoming web service requests by http status code'
# + [markdown] azdata_cell_guid="d640208b-ec3b-4575-876e-e37954d2d035"
# ## Incoming Web service requests (400 Bad Request)
#
# The server cannot or will not process the request due to an apparent client error (e.g., malformed request syntax, size too large, invalid request message framing, or deceptive request routing) ( see [https://en.wikipedia.org/wiki/List_of_HTTP_status_codes](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes) )
#
# Telemetry docs:
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace)
# + azdata_cell_guid="f1614df3-74e6-4e64-b647-ac18fd5ea57c"
# %%kql
//
// Top 10 endpoint requests with 400 Bad Request
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.httpStatusCode == '400'
| summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| order by number_of_requests desc
| limit 10
# + [markdown] azdata_cell_guid="f6a9d2d3-26b9-4536-b279-d126e5cd5609"
# ## Incoming Web service requests (Access denied)
#
# The user who made the request doesn't have proper permissions. For more information, see
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/web-services-authentication](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/web-services-authentication)
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/ui-define-granular-permissions](https://docs.microsoft.com/en-us/dynamics365/business-central/ui-define-granular-permissions)
#
# Telemetry docs:
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace)
# + azdata_cell_guid="ef3d4e9f-42bb-4492-bc3b-f88b33dcbdea"
# %%kql
//
// Top 10 endpoint requests with access denied
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.httpStatusCode == '401'
| limit 10
# + [markdown] azdata_cell_guid="ffc66241-e49a-46c3-953c-edb1e3d1ef75"
# ## Incoming Web service requests (Not found)
#
# The given endpoint was not valid
#
# See
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/publish-web-service](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/publish-web-service)
#
# Telemetry docs:
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace)
# + azdata_cell_guid="09e649eb-d8bb-43e8-8f8b-ff07c8cda005"
# %%kql
//
// Top 10 non-valid endpoints called
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.httpStatusCode == '404'
| summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| order by number_of_requests desc
| limit 10
# + [markdown] azdata_cell_guid="66ab172d-9d99-4228-98c7-68a4113a91a0"
# ## Incoming Web service requests (Request timed out)
#
# The request took longer to complete than the threshold configured for the service
#
# See
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices)
#
# Telemetry docs:
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace)
#
# Performance tuning guide (you need to tune these endpoints to make them go faster)
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services)
# + azdata_cell_guid="f29afa7f-6408-4e85-a613-605d9898574d"
# %%kql
//
// Top 10 endpoints that times out
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.httpStatusCode == '408'
| summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| order by number_of_requests desc
| limit 10
# + [markdown] azdata_cell_guid="68241327-780a-4766-9e51-b37f90d595dc"
# ## Incoming Web service requests (Too Many Requests)
#
# The request exceeded the maximum simultaneous requests allowed on the service.
#
# See
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices)
#
# Telemetry docs:
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace)
#
# Performance tuning guide (you need to make your web service client back-off and retry)
#
# - [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services)
# + azdata_cell_guid="2c9888bb-6306-4b67-a545-a40ea5f97f60"
# %%kql
//
// Top 10 endpoints get throttled
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0008'
and customDimensions.httpStatusCode == '426'
| summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )
| order by number_of_requests desc
| limit 10
# + [markdown] azdata_cell_guid="8cf2c901-ced8-46f3-93ef-4ccb3eaad891"
# ## (Outgoing) web service requests overview
#
# Performance tuning guide: [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#web-service-client-performance](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services)
#
# Outgoing Web service telemetry docs: [https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-outgoing-trace](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-outgoing-trace)
#
# KQL sample: [https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/OutGoingWebServiceCalls.kql](https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/OutGoingWebServiceCalls.kql)
#
# Explanation of different HTTP status codes: [https://en.wikipedia.org/wiki/List_of_HTTP_status_codes](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes)
# + azdata_cell_guid="5eb83b4b-9d27-46a7-b715-a9b2511a132d"
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0019'
|extend httpStatusCode = tostring( customDimensions.httpReturnCode )
| summarize request_count=count() by httpStatusCode, bin(timestamp, 1d)
| order by httpStatusCode asc
| render timechart title= 'Number of outgoing web service requests by HTTP status code'
# + azdata_cell_guid="0ee463d9-4e13-4254-a338-cf0103d7e911"
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0019'
| extend httpMethod = tostring( toupper( customDimensions.httpMethod ) )
, executionTimeInSec = toreal(totimespan(customDimensions.serverExecutionTime))/10000 /1000 //the datatype for executionTime is timespan
| where executionTimeInSec <= 10
| summarize count() by executionTime_sec = bin(executionTimeInSec, 1), httpMethod
| extend log_count = log10( count_ )
| order by httpMethod, executionTime_sec asc
| render columnchart with (ycolumns = log_count, ytitle='log10(count)', series = httpMethod, title= 'Execution time (seconds) of "normal" outgoing ws requests by method' )
# + azdata_cell_guid="256174e8-1bc2-443b-808f-792db77637a4"
# %%kql
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0019'
| extend httpMethod = tostring( toupper( customDimensions.httpMethod ) )
, executionTimeInSec = toreal(totimespan(customDimensions.serverExecutionTime))/10000 /1000 //the datatype for executionTime is timespan
| where executionTimeInSec > 10
| summarize count() by executionTime_sec = bin(executionTimeInSec, 10), httpMethod
| extend log_count = log10( count_ )
| order by httpMethod, executionTime_sec asc
| render columnchart with (ycolumns = log_count, ytitle='log10(count)', series = httpMethod, title= 'Execution time (seconds) of slow outgoing ws requests by method' )
# + azdata_cell_guid="7d0df460-9d0c-4ff7-87a6-87a44b99b22d"
# %%kql
//
// Top 20 most expensive outgoing calls
//
// Includes descriptive statistics avg, min, max, and 95 percentile for execution time and number of calls
//
let _aadTenantId = aadTenantId;
let _environmentName = environmentName;
let _startDate = startDate;
let _endDate = endDate;
traces
| where 1==1
and timestamp >= todatetime(_startDate)
and timestamp <= todatetime(_endDate) + totimespan(24h) - totimespan(1ms)
and customDimensions.aadTenantId == _aadTenantId
and (_environmentName == '' or customDimensions.environmentName == _environmentName )
and customDimensions.eventId == 'RT0019'
| extend httpMethod = tostring( toupper( customDimensions.httpMethod ) )
, executionTimeInMS = toreal(totimespan(customDimensions.serverExecutionTime))/10000 //the datatype for executionTime is timespan
, alObjectId = tostring( customDimensions.alObjectId )
, alObjectName = tostring( customDimensions.alObjectName )
, alObjectType = tostring( customDimensions.alObjectType )
, endpoint = tostring( customDimensions.endpoint )
, extensionId = tostring( customDimensions.extensionId )
, extensionName = tostring( customDimensions.extensionName )
, extensionVersion = tostring( customDimensions.extensionVersion )
| summarize avg(executionTimeInMS), min(executionTimeInMS), max(executionTimeInMS),percentile(executionTimeInMS,95), count() by
httpMethod
, alObjectId, alObjectName, alObjectType
, extensionId, extensionName, extensionVersion
, endpoint
| order by avg_executionTimeInMS desc
| project avg_timeInSec=round(avg_executionTimeInMS/1000,2)
, min_timeInSec=round(min_executionTimeInMS/1000,2)
, max_timeInSec=round(max_executionTimeInMS/1000,2)
, perc_95InSec=round(percentile_executionTimeInMS_95/1000,2), count_, alObjectId, alObjectName, alObjectType, extensionId, extensionName, extensionVersion, httpMethod, endpoint
| limit 20
| samples/AppInsights/TroubleShootingGuides/D365BC Troubleshooting Guides (TSG)/content/Web-services-TSG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import print_function, division
import numpy as np
import gzip
from sklearn.preprocessing import LabelBinarizer
valid_labels = LabelBinarizer().fit_transform(
np.array([float(x.decode("utf-8").split("\t")[1])
for x in gzip.open("valid_labels.txt.gz",'rb')]))
# +
parent_folders = ["flip-False_rotamt-0",
"flip-True_rotamt-0",
"flip-False_rotamt-90",
"flip-True_rotamt-90",
"flip-False_rotamt-180",
"flip-True_rotamt-180",]
parent_folder_to_det_pred = {}
for parent_folder in parent_folders:
det_preds = np.array([
[float(y) for y in x.decode("utf-8").split("\t")[1:]]
for x in gzip.open(parent_folder+"/deterministic_preds.txt.gz", 'rb')])
parent_folder_to_det_pred[parent_folder] = det_preds
# +
def inverse_softmax(preds):
return np.log(preds) - np.mean(np.log(preds),axis=1)[:,None]
pseudovalid_predictions = []
pseudovalid_labels = []
pseudovalid_label_counts = np.zeros(5)
pseudotest_predictions = []
pseudotest_labels = []
pseudotest_label_counts = np.zeros(5)
for i in range(int(0.5*len(valid_labels))):
left_eye_label = valid_labels[2*i]
right_eye_label = valid_labels[(2*i)+1]
most_diseased_label = max(np.argmax(left_eye_label),
np.argmax(right_eye_label))
if (pseudovalid_label_counts[most_diseased_label] <
pseudotest_label_counts[most_diseased_label]):
append_to_predictions = pseudovalid_predictions
append_to_labels = pseudovalid_labels
append_to_label_counts = pseudovalid_label_counts
else:
append_to_predictions = pseudotest_predictions
append_to_labels = pseudotest_labels
append_to_label_counts = pseudotest_label_counts
for parent_folder in parent_folders:
append_to_labels.append(valid_labels[2*i])
append_to_labels.append(valid_labels[(2*i)+1])
append_to_label_counts += valid_labels[2*i]
append_to_label_counts += valid_labels[(2*i)+1]
append_to_predictions.append(
parent_folder_to_det_pred[parent_folder][2*i])
append_to_predictions.append(
parent_folder_to_det_pred[parent_folder][(2*i)+1])
pseudovalid_predictions = np.array(pseudovalid_predictions)
pseudovalid_pred_logits = inverse_softmax(pseudovalid_predictions)
pseudovalid_labels = np.array(pseudovalid_labels)
pseudotest_predictions = np.array(pseudotest_predictions)
pseudotest_pred_logits = inverse_softmax(pseudotest_predictions)
pseudotest_labels = np.array(pseudotest_labels)
print(pseudovalid_label_counts,
pseudotest_label_counts)
# -
import abstention
# +
from abstention.calibration import compute_ece, TempScaling
print("ece before temp scale - valid",
compute_ece(softmax_out=pseudovalid_predictions,
labels=pseudovalid_labels,
bins=15))
print("ece before temp scale - test",
compute_ece(softmax_out=pseudotest_predictions,
labels=pseudotest_labels,
bins=15))
temp_scaler = TempScaling(ece_bins=15)(
valid_preacts=pseudovalid_pred_logits,
valid_labels=pseudovalid_labels)
temp_scaled_valid = temp_scaler(pseudovalid_pred_logits)
temp_scaled_test = temp_scaler(pseudotest_pred_logits)
print("ece after temp scale - valid",
compute_ece(softmax_out=temp_scaled_valid,
labels=pseudovalid_labels,
bins=15))
print("ece after temp scale - test",
compute_ece(softmax_out=temp_scaled_test,
labels=pseudotest_labels,
bins=15))
# +
import abstention.abstention
reload(abstention.abstention)
from abstention.abstention import (weighted_kappa_metric,
WeightedKappa, DistMaxClassProbFromOne,
Entropy)
quadratic_weights = np.array([[(i-j)**2 for i in range(5)]
for j in range(5)])
print("Est valid perf",weighted_kappa_metric(predprobs=temp_scaled_valid,
true_labels=temp_scaled_valid,
weights=quadratic_weights))
print("Actual valid perf",weighted_kappa_metric(predprobs=temp_scaled_valid,
true_labels=pseudovalid_labels,
weights=quadratic_weights))
print("Est test perf",weighted_kappa_metric(predprobs=temp_scaled_test,
true_labels=temp_scaled_test,
weights=quadratic_weights))
print("Actual test perf",weighted_kappa_metric(predprobs=temp_scaled_test,
true_labels=pseudotest_labels,
weights=quadratic_weights))
abstainer_factories = [
("expected_weighted_kappa", WeightedKappa(weights=quadratic_weights)),
("dist_maxclass_prob_from_one", DistMaxClassProbFromOne()),
("entropy", Entropy())
]
for abstention_fraction in [0.05, 0.1, 0.15, 0.2]:
print("\nabstention fraction:",abstention_fraction)
for abstainer_name, abstainer_factory in abstainer_factories:
abstainer = abstainer_factory(valid_labels=pseudovalid_labels,
valid_posterior=temp_scaled_valid)
abstainer_priorities = abstainer(temp_scaled_test)
indices_to_retain = ([y[0] for y in sorted(enumerate(abstainer_priorities),
key=lambda x: x[1])][:int(len(abstainer_priorities)*
(1-abstention_fraction))])
retained_temp_scaled_test = np.array([temp_scaled_test[i] for i in indices_to_retain])
retained_pseudotest_labels = np.array([pseudotest_labels[i] for i in indices_to_retain])
print("\nAbstention criterion:",abstainer_name)
print("weighted kappa", weighted_kappa_metric(predprobs=retained_temp_scaled_test,
true_labels=retained_pseudotest_labels,
weights=quadratic_weights))
print("accuracy", (np.sum(np.argmax(retained_temp_scaled_test,axis=-1)
==np.argmax(retained_pseudotest_labels,axis=-1))/
len(retained_pseudotest_labels)))
# +
# %matplotlib inline
platt_scaler = abstention.calibration.PlattScaling()(
valid_preacts=pseudovalid_pred_logits[:,0],
valid_labels=pseudovalid_labels[:,0])
platt_scaled_binary_valid = platt_scaler(pseudovalid_pred_logits[:,0])
platt_scaled_binary_test = platt_scaler(pseudotest_pred_logits[:,0])
fracpos, mpv = calibration_curve(
y_true=1-pseudotest_labels[:,0],
y_prob=1-platt_scaled_binary_test,
n_bins=n_bins)
plt.plot(inverse_sigmoid(mpv), inverse_sigmoid(fracpos))
fracpos, mpv = calibration_curve(
y_true=1-pseudovalid_labels[:,0],
y_prob=1-platt_scaled_binary_valid,
n_bins=n_bins)
plt.plot(inverse_sigmoid(mpv), inverse_sigmoid(fracpos))
plt.plot([-3,3],[-3,3])
plt.show()
def inverse_sigmoid(vals):
return np.log(vals/(1-vals))
from sklearn.calibration import calibration_curve
from matplotlib import pyplot as plt
#plot the calibration curves for all 5 classes
n_bins=10
for i in range(5):
print("Class",i)
print(np.sum(temp_scaled_test[:,i]))
print(np.sum(temp_scaled_valid[:,i]))
fracpos, mpv = calibration_curve(
y_true=pseudotest_labels[:,i],
y_prob=temp_scaled_test[:,i],
n_bins=n_bins)
plt.plot(inverse_sigmoid(mpv), inverse_sigmoid(fracpos))
fracpos, mpv = calibration_curve(
y_true=pseudovalid_labels[:,i],
y_prob=temp_scaled_valid[:,i],
n_bins=n_bins)
plt.plot(inverse_sigmoid(mpv), inverse_sigmoid(fracpos))
plt.plot([-3,3],[-3,3])
plt.show()
# +
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
print("auROC",roc_auc_score(y_true=1.0*(augmented_labels > 0),
y_score=1.0-augmented_predictions[:,0]))
print("auPRC",average_precision_score(y_true=1.0*(augmented_labels > 0),
y_score=1.0-augmented_predictions[:,0]))
for parent_folder in parent_folders:
print(parent_folder)
det_preds = parent_folder_to_det_pred[parent_folder]
print("auROC",roc_auc_score(y_true=1.0*(valid_labels > 0),
y_score=1.0-det_preds[:,0]))
print("auPRC",average_precision_score(y_true=1.0*(valid_labels > 0),
y_score=1.0-det_preds[:,0]))
# -
mc_preds = []
for i in range(100):
nondet_preds = np.array([
[float(y) for y in x.split("\t")[1:]]
for x in gzip.open("flip-False_rotamt-0/nondeterministic_preds_"+str(i)+".txt.gz", 'rb')])
mc_preds.append(nondet_preds)
mc_preds = np.array(mc_preds)
print(roc_auc_score(y_true=1.0*(valid_labels > 0),
y_score=1.0-np.mean(mc_preds,axis=0)[:,0]))
print(average_precision_score(y_true=1.0*(valid_labels > 0),
y_score=1.0-np.mean(mc_preds,axis=0)[:,0]))
# +
# %matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
for parent_folder in parent_folders:
print(parent_folder)
preds = parent_folder_to_det_pred[parent_folder]
#mean_mcpred = np.mean(mc_preds,axis=0)
pred_logits = np.log(preds)
pred_logits = pred_logits-np.mean(pred_logits,axis=1)[:,None]
for i in range(5):
sns.distplot(pred_logits[:,0][valid_labels==i],
bins=20)
plt.show()
for i in range(5):
sns.distplot(pred_logits[:,1][valid_labels==i],
bins=20)
plt.show()
for i in range(5):
sns.distplot(pred_logits[:,2][valid_labels==i],
bins=20)
plt.show()
for i in range(5):
sns.distplot(pred_logits[:,3][valid_labels==i],
bins=20)
plt.show()
for i in range(5):
sns.distplot(pred_logits[:,4][valid_labels==i],
bins=20)
plt.show()
# -
| notebooks/StudyValidSetPredictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from numpy import mean
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
X, y = make_classification(n_samples=10000, n_features=2, n_redundant=0,
n_clusters_per_class=1, weights=[0.99], flip_y=0, random_state=4)
# -
# # Synthetic Dataset
# Standard Random Forest
model = RandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Weighted Random forest (class weighting while calculating impurity)
model = RandomForestClassifier(n_estimators=10, class_weight='balanced')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Random Forest With Class Weighting in Bootstrap samples
model = RandomForestClassifier(n_estimators=10, class_weight='balanced_subsample')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Balanced Random Forest (random undersampling in bootstrap samples)
model = BalancedRandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# # SatImage Dataset
# +
from imblearn.datasets import fetch_datasets
satimage = fetch_datasets()["satimage"]
X, y = satimage.data, satimage.target
# -
# Standard Random Forest
model = RandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Weighted Random forest (class weighting while calculating impurity)
model = RandomForestClassifier(n_estimators=10, class_weight='balanced')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Random Forest With Class Weighting in Bootstrap samples
model = RandomForestClassifier(n_estimators=10, class_weight='balanced_subsample')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Balanced Random Forest (random undersampling in bootstrap samples)
model = BalancedRandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# # US Crime Dataset
# +
from imblearn.datasets import fetch_datasets
us_crime = fetch_datasets()["us_crime"]
X, y = us_crime.data, us_crime.target
# -
# Standard Random Forest
model = RandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Weighted Random forest (class weighting while calculating impurity)
model = RandomForestClassifier(n_estimators=10, class_weight='balanced')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Random Forest With Class Weighting in Bootstrap samples
model = RandomForestClassifier(n_estimators=10, class_weight='balanced_subsample')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Balanced Random Forest (random undersampling in bootstrap samples)
model = BalancedRandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# # Oil Dataset
# +
from imblearn.datasets import fetch_datasets
oil = fetch_datasets()["oil"]
X, y = oil.data, oil.target
# -
# Standard Random Forest
model = RandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Weighted Random forest (class weighting while calculating impurity)
model = RandomForestClassifier(n_estimators=10, class_weight='balanced')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Random Forest With Class Weighting in Bootstrap samples
model = RandomForestClassifier(n_estimators=10, class_weight='balanced_subsample')
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
# Balanced Random Forest (random undersampling in bootstrap samples)
model = BalancedRandomForestClassifier(n_estimators=10)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
print('Mean ROC AUC: %.3f' % mean(scores))
| code/balanced random forest/balanced_random_forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:python3]
# language: python
# name: conda-env-python3-py
# ---
# # Save NIDM-Results packs to NiMARE dataset
import json
from glob import glob
import nibabel as nib
import pandas as pd
import numpy as np
from scipy import ndimage
from os.path import basename, join, isfile
# +
def _local_max(data, affine, min_distance):
"""Find all local maxima of the array, separated by at least min_distance.
Adapted from https://stackoverflow.com/a/22631583/2589328
Parameters
----------
data : array_like
3D array of with masked values for cluster.
min_distance : :obj:`int`
Minimum distance between local maxima in ``data``, in terms of mm.
Returns
-------
ijk : :obj:`numpy.ndarray`
(n_foci, 3) array of local maxima indices for cluster.
vals : :obj:`numpy.ndarray`
(n_foci,) array of values from data at ijk.
"""
# Initial identification of subpeaks with minimal minimum distance
data_max = ndimage.filters.maximum_filter(data, 3)
maxima = (data == data_max)
data_min = ndimage.filters.minimum_filter(data, 3)
diff = ((data_max - data_min) > 0)
maxima[diff == 0] = 0
labeled, n_subpeaks = ndimage.label(maxima)
ijk = np.array(ndimage.center_of_mass(data, labeled,
range(1, n_subpeaks + 1)))
ijk = np.round(ijk).astype(int)
vals = np.apply_along_axis(arr=ijk, axis=1, func1d=_get_val,
input_arr=data)
# Sort subpeaks in cluster in descending order of stat value
order = (-vals).argsort()
vals = vals[order]
ijk = ijk[order, :]
xyz = nib.affines.apply_affine(affine, ijk) # Convert to xyz in mm
# Reduce list of subpeaks based on distance
keep_idx = np.ones(xyz.shape[0]).astype(bool)
for i in range(xyz.shape[0]):
for j in range(i + 1, xyz.shape[0]):
if keep_idx[i] == 1:
dist = np.linalg.norm(xyz[i, :] - xyz[j, :])
keep_idx[j] = dist > min_distance
ijk = ijk[keep_idx, :]
vals = vals[keep_idx]
return ijk, vals
def _get_val(row, input_arr):
"""Small function for extracting values from array based on index.
"""
i, j, k = row
return input_arr[i, j, k]
# -
f1 = '/Users/tsalo/Documents/tsalo/NiMARE/nimare/tests/data/nidm_pain_dset.json'
f2 = '/Users/tsalo/Documents/tsalo/NiMARE/nimare/tests/data/nidm_pain_dset_with_subpeaks.json'
# +
ddict = {}
folders = sorted(glob('/Users/tsalo/Downloads/nidm-pain-results/pain_*.nidm'))
for folder in folders:
name = basename(folder)
ddict[name] = {}
ddict[name]['contrasts'] = {}
ddict[name]['contrasts']['1'] = {}
ddict[name]['contrasts']['1']['coords'] = {}
ddict[name]['contrasts']['1']['coords']['space'] = 'MNI'
ddict[name]['contrasts']['1']['images'] = {}
ddict[name]['contrasts']['1']['images']['space'] = 'MNI_2mm'
# con file
files = glob(join(folder, 'Contrast*.nii.gz'))
files = [f for f in files if 'StandardError' not in basename(f)]
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['con'] = f
# se file
files = glob(join(folder, 'ContrastStandardError*.nii.gz'))
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['se'] = f
# z file
files = glob(join(folder, 'ZStatistic*.nii.gz'))
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['z'] = f
# t file
# z file
files = glob(join(folder, 'TStatistic*.nii.gz'))
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['t'] = f
# sample size
f = join(folder, 'DesignMatrix.csv')
if isfile(f):
df = pd.read_csv(f, header=None)
n = [df.shape[0]]
else:
n = None
ddict[name]['contrasts']['1']['sample_sizes'] = n
# foci
files = glob(join(folder, 'ExcursionSet*.nii.gz'))
f = sorted(files)[0]
img = nib.load(f)
data = np.nan_to_num(img.get_data())
# positive clusters
binarized = np.copy(data)
binarized[binarized>0] = 1
binarized[binarized<0] = 0
binarized = binarized.astype(int)
labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]
clust_ids = sorted(list(np.unique(labeled)[1:]))
ijk = np.hstack([np.where(data * (labeled == c) == np.max(data * (labeled == c))) for c in clust_ids])
ijk = ijk.T
xyz = nib.affines.apply_affine(img.affine, ijk)
ddict[name]['contrasts']['1']['coords']['x'] = list(xyz[:, 0])
ddict[name]['contrasts']['1']['coords']['y'] = list(xyz[:, 1])
ddict[name]['contrasts']['1']['coords']['z'] = list(xyz[:, 2])
with open(f1, 'w') as fo:
json.dump(ddict, fo, sort_keys=True, indent=4)
# +
ddict = {}
folders = sorted(glob('/Users/tsalo/Downloads/nidm-pain-results/pain_*.nidm'))
for folder in folders:
name = basename(folder)
ddict[name] = {}
ddict[name]['contrasts'] = {}
ddict[name]['contrasts']['1'] = {}
ddict[name]['contrasts']['1']['coords'] = {}
ddict[name]['contrasts']['1']['coords']['space'] = 'MNI'
ddict[name]['contrasts']['1']['images'] = {}
ddict[name]['contrasts']['1']['images']['space'] = 'MNI_2mm'
# con file
files = glob(join(folder, 'Contrast*.nii.gz'))
files = [f for f in files if 'StandardError' not in basename(f)]
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['con'] = f
# se file
files = glob(join(folder, 'ContrastStandardError*.nii.gz'))
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['se'] = f
# z file
files = glob(join(folder, 'ZStatistic*.nii.gz'))
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['z'] = f
# t file
# z file
files = glob(join(folder, 'TStatistic*.nii.gz'))
if files:
f = sorted(files)[0]
else:
f = None
ddict[name]['contrasts']['1']['images']['t'] = f
# sample size
f = join(folder, 'DesignMatrix.csv')
if isfile(f):
df = pd.read_csv(f, header=None)
n = [df.shape[0]]
else:
n = None
ddict[name]['contrasts']['1']['sample_sizes'] = n
# foci
files = glob(join(folder, 'ExcursionSet*.nii.gz'))
f = sorted(files)[0]
img = nib.load(f)
data = np.nan_to_num(img.get_data())
# positive clusters
binarized = np.copy(data)
binarized[binarized>0] = 1
binarized[binarized<0] = 0
binarized = binarized.astype(int)
labeled = ndimage.measurements.label(binarized, np.ones((3, 3, 3)))[0]
clust_ids = sorted(list(np.unique(labeled)[1:]))
peak_vals = np.array([np.max(data * (labeled == c)) for c in clust_ids])
clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()] # Sort by descending max value
ijk = []
for c_id, c_val in enumerate(clust_ids):
cluster_mask = labeled == c_val
masked_data = data * cluster_mask
# Get peaks, subpeaks and associated statistics
subpeak_ijk, subpeak_vals = _local_max(masked_data, img.affine,
min_distance=8)
# Only report peak and, at most, top 3 subpeaks.
n_subpeaks = np.min((len(subpeak_vals), 4))
#n_subpeaks = len(subpeak_vals)
subpeak_ijk = subpeak_ijk[:n_subpeaks, :]
ijk.append(subpeak_ijk)
ijk = np.vstack(ijk)
xyz = nib.affines.apply_affine(img.affine, ijk)
ddict[name]['contrasts']['1']['coords']['x'] = list(xyz[:, 0])
ddict[name]['contrasts']['1']['coords']['y'] = list(xyz[:, 1])
ddict[name]['contrasts']['1']['coords']['z'] = list(xyz[:, 2])
with open(f2, 'w') as fo:
json.dump(ddict, fo, sort_keys=True, indent=4)
| examples/save_nidm_to_dset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **1.** Run the following cells to setup the environment
import requests, pandas as pd, json, copy
from IPython.display import JSON, Image
from requests.auth import HTTPBasicAuth
URL = 'http://mira.isi.edu:8000'
auth = HTTPBasicAuth('mint', '<PASSWORD>!')
# **2.** Load a dataset that we want to model, and show the content of the dataset.
#
# The dataset is supposed to be a [relational table](https://www.ibm.com/support/knowledgecenter/SSWU4L/Data/imc_Data/Data_q_a_watson_assistant/Relational_Tables371.html). If the data format is CSV, the first row is the header.
df = pd.read_csv('./examples/ethiopia_dashboard.csv', header=0)
df.head(5)
# **3.** To send the table to the API for annotation, we need to convert the original table into a JSON format below:
# ```json
# {
# "ontologies": ["http://wikiba.se/ontology#"],
# "source": [
# {"name": "<column name>", "values": ["<value of the column in first row>", "<value of the column in second row>", "..."]
# ]
# }
# ```
# and send it in the body of a `POST` request to `/v1.1/annotate`
payload = {
"ontologies": ["http://datamart.isi.edu/ontology#"],
"source": [
{"name": "" if c.startswith("Unnamed:") else c, "values": df[c].tolist()}
for c in df.columns
]
}
resp = requests.post(URL + "/v1.1/annotate", json=payload, auth=auth)
assert resp.status_code == 200
# **4.** The JSON response that the server returns are in the following format:
# ```json
# {
# "sms": [{
# "<ont_class_id>": {
# "iri": "<uri of the ontology class>",
# "properties": [
# ["<ontology predicate 1>", "<column_index>"],
# ["<ontology predicate 2>", "<column_index>"]
# ],
# "links": [
# ["<ontology predicate 1>", "<ont_class_id>"]
# ]
# }
# }],
# "semantic_types": [
# [{
# "class": "<ontology class uri>",
# "predicate": "<ontology predicate uri>",
# "prob": "<probability of this semantic type (float)>"
# }]
# ],
# "prefixes": {
# "<prefix>": "<url>"
# }
# }
# ```
# where:
# * `sms` is a list of predicted semantic models, sorted by the likelihood
# * `semantic_types` is a list containing predicted semantic types for each column. In particular, an ith item in the list is predicted semantic types of the ith column in the data source.
JSON(resp.json(), indent=4)
# Run the following cell to draw the most probable semantic model
data = resp.json()
data = {
"prefixes": data['prefixes'],
"sm": data['sms'][0],
"columns": df.columns.tolist()
}
resp2 = requests.post(URL + "/v1/draw", json=data, auth=auth, stream=all)
assert resp2.status_code == 200
display(Image(resp2.content))
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import pymongo
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
# define a placeholder for final dictionary to pass into mars_app.py
mars_dict = {}
# -
# function to get headlines, paragraphs, not images
def scraping_func(url):
# Path to chromedriver
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# Go to website
browser.visit(url)
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=1)
# Convert the browser html to a soup object and then quit the browser
html = browser.html
scrape_soup = bs(html, 'html.parser')
# Close the browser after scraping
browser.quit()
#return scraped object
return scrape_soup
# function to get Featured image
def get_featured_img_func(url):
# Path to chromedriver
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# Go to website
browser.visit(url)
# find "Full Image" button to click on it to get to next webpage
full_img = browser.find_by_id("full_image")
full_img.click()
# find "More Info" button to click on it to get to next webpage
browser.is_element_present_by_text('more info', wait_time=1)
more_info_elem = browser.find_link_by_partial_text('more info')
more_info_elem.click()
# read website's html
html = browser.html
soup = bs(html, 'html.parser')
# find "a" tag to find href containing the URL
result = browser.find_by_tag("a")
relative_image_path = result[58]["href"]
# get image title
relative_image_title = soup.find('h1', class_='article_title')
relative_image_title = relative_image_title.get_text()
relative_image_title = relative_image_title.split('\t')
relative_image_title
relative_image_title[4]
final_title_feature_img = []
final_title_feature_img.append({'Title': relative_image_title[4], 'URL': relative_image_path})
# Close the browser after scraping
browser.quit()
#return scraped object
return final_title_feature_img
# function to get Hemis images
def get_hemis_img(url):
# Path to chromedriver
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# Go to website
browser.visit(url)
# read website's html
html = browser.html
soup = bs(html, 'html.parser')
# find "a" tag
result = browser.find_by_tag("a")
# define a list to hold 1st link to full images
hemis_image_path_list = []
for i in range(8):
# if link exist, skip saving to list
if (result[i+4]["href"]) in hemis_image_path_list:
print('')
else:
hemis_image_path_list.append(result[i+4]["href"])
# Close the browser after scraping
browser.quit()
final_hemis_img_url_list = []
for i in range(len(hemis_image_path_list)):
# Path to chromedriver
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# Go to website
browser.visit(hemis_image_path_list[i])
# read website's html
html = browser.html
soup = bs(html, 'html.parser')
# get image title
result_title = soup.find('h2', class_='title').get_text()
# get image URL
result = soup.find('img', class_='wide-image')["src"]
final_url = 'https://astrogeology.usgs.gov' + result
# concat image URL to get complete URL link
final_hemis_img_url_list.append({"title": result_title, "img_url": final_url})
# Close the browser after scraping
browser.quit()
return final_hemis_img_url_list
# +
### NASA Mars News
# * Scrape the [NASA Mars News Site](https://mars.nasa.gov/news/) and collect the latest News Title and Paragraph Text.
# Assign the text to variables that you can reference later.
# Set Website URL to scrape
url = 'https://mars.nasa.gov/news/'
# Call scrape function and pass in url
scrape_soup = scraping_func(url)
# Get latest title
news_title_find = scrape_soup.find('div', class_='content_title')
# Get title from scrape
news_title = news_title_find.get_text()
news_title
# -
# Get latest title's paragraph
news_p_find = scrape_soup.find('div', class_='article_teaser_body')
news_p = news_p_find.get_text()
news_p
# +
### JPL Mars Space Images - Featured Image
#* Use splinter to navigate the site and find the image url for the current Featured Mars Image and
# assign the url string to a variable called `featured_image_url`.
# Set Website URL to scrape
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars/'
# call function to get the URL
final_title_feature_img = get_featured_img_func(url)
final_title_feature_img
# +
### Mars Weather
#* Visit the Mars Weather twitter account [here](https://twitter.com/marswxreport?lang=en) and
# scrape the latest Mars weather tweet from the page.
#Save the tweet text for the weather report as a variable called `mars_weather`
# Set Website URL to scrape
url = 'https://twitter.com/marswxreport?lang=en'
# Call scrape function and pass in url
scrape_soup = scraping_func(url)
# Get latest weather tweet
mars_weather_find = scrape_soup.find('p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text')
mars_weather = mars_weather_find.get_text()
mars_weather
mars_weather_splits = mars_weather.split('\n')
mars_weather_splits
temp = mars_weather_splits[2].split('pic.twitter.com/MhPPOHJg3m')
temp[0]
del mars_weather_splits[2]
mars_weather_splits.append(temp[0])
mars_weather_splits
final_mars_weather = []
temperature_list = ['Temperature', 'Wind', 'Pressure']
for i in range(3):
mars_weather_splits_dict = {'weather': temperature_list[i], 'value': mars_weather_splits[i]}
final_mars_weather.append(mars_weather_splits_dict)
final_mars_weather
# +
### Mars Facts
#* Visit the Mars Facts webpage [here](https://space-facts.com/mars/) and
#use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.
#* Use Pandas to convert the data to a HTML table string.
# Set Website URL to scrape
url = 'https://space-facts.com/mars/'
tables = pd.read_html(url)
comp_table = tables[0]
comp_table
# -
mars_earth_comp_df = tables[0]
mars_earth_comp_df.columns = ['Mars-Earth Comparison', 'Mars', 'Earth']
mars_earth_comp_df
mars_comparison = []
dict_temp = {}
for i in range(len(mars_earth_comp_df)):
for j in range(1):
dict_temp = {"description": mars_earth_comp_df.iloc[i][j], "mars": mars_earth_comp_df.iloc[i][j+1],
"earth": mars_earth_comp_df.iloc[i][j+2]}
mars_comparison.append(dict_temp)
mars_comparison
mars_planet_profile_df = tables[1]
mars_planet_profile_df.columns = ['Mars Planet Profile', 'Measurement']
mars_planet_profile_df
mars_profile = []
dict_temp = {}
for i in range(len(mars_planet_profile_df)):
for j in range(1):
dict_temp = {"description":mars_planet_profile_df.iloc[i][j], "value": mars_planet_profile_df.iloc[i][j+1]}
mars_profile.append(dict_temp)
mars_profile
# +
#* Visit the USGS Astrogeology site [here](https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars)
#to obtain high resolution images for each of Mar's hemispheres.
#* You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image.
#* Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing
#the hemisphere name. Use a Python dictionary to store the data using the keys `img_url` and `title`.
#* Append the dictionary with the image url string and the hemisphere title to a list.
# This list will contain one dictionary for each hemisphere.
# Set Website URL to scrape
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
# call function to get Hemis Images URL
final_hemis_img_url_list = get_hemis_img(url)
final_hemis_img_url_list
# -
# +
mars_dict["mars_title"] = news_title
mars_dict["mars_news"] = news_p
mars_dict["mars_image"] = final_title_feature_img
mars_dict["mars_currentweather"] = mars_weather
mars_dict["mars_comparison"] = mars_comparison
mars_dict["mars_profile"] = mars_profile
mars_dict["mars_image_urls"] = final_hemis_img_url_list
mars_dict
# -
mars_dict['mars_profile']
| Instructions/mission_to_mars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 8: Restructuring Data into Tidy Form
# ## Recipes
# * [Tidying variable values as column names with stack](#Tidying-variable-values-as-column-names-with-stack)
# * [Tidying variable values as column names with melt](#Tidying-variable-values-as-column-names-with-melt)
# * [Stacking multiple groups of variables simultaneously](#Stacking-multiple-groups-of-variables-simultaneously)
# * [Inverting stacked data](#Inverting-stacked-data)
# * [Unstacking after a groupby aggregation](#Unstacking-after-a-groupby-aggregation)
# * [Replicating pivot_table with a groupby aggregation](#Replicating-pivot_table-with-a-groupby-aggregation)
# * [Renaming axis levels for easy reshaping](#Renaming-axis-levels-for-easy-reshaping)
# * [Tidying when multiple variables are stored as column names](#Tidying-when-multiple-variables-are-stored-as-column-names)
# * [Tidying when multiple variables are stored as column values](#Tidying-when-multiple-variables-are-stored-as-column-values)
# * [Tidying when two or more values are stored in the same cell](#Tidying-when-two-or-more-values-are-stored-in-the-same-cell)
# * [Tidying when variables are stored in column names and values](#Tidying-when-variables-are-stored-in-column-names-and-values)
# * [Tidying when multiple observational units are stored in the same table](#Tidying-when-multiple-observational-units-are-stored-in-the-same-table)
import pandas as pd
import numpy as np
# # Tidying variable values as column names with stack
state_fruit = pd.read_csv('data/state_fruit.csv', index_col=0)
state_fruit
state_fruit.stack()
state_fruit_tidy = state_fruit.stack().reset_index()
state_fruit_tidy
state_fruit_tidy.columns = ['state', 'fruit', 'weight']
state_fruit_tidy
state_fruit.stack()\
.rename_axis(['state', 'fruit'])\
state_fruit.stack()\
.rename_axis(['state', 'fruit'])\
.reset_index(name='weight')
# ## There's more...
state_fruit2 = pd.read_csv('data/state_fruit2.csv')
state_fruit2
state_fruit2.stack()
state_fruit2.set_index('State').stack()
# # Tidying variable values as column names with melt
state_fruit2 = pd.read_csv('data/state_fruit2.csv')
state_fruit2
state_fruit2.melt(id_vars=['State'],
value_vars=['Apple', 'Orange', 'Banana'])
state_fruit2.index=list('abc')
state_fruit2.index.name = 'letter'
state_fruit2
state_fruit2.melt(id_vars=['State'],
value_vars=['Apple', 'Orange', 'Banana'],
var_name='Fruit',
value_name='Weight')
# ## There's more...
state_fruit2.melt()
state_fruit2.melt(id_vars='State')
# # Stacking multiple groups of variables simultaneously
movie = pd.read_csv('data/movie.csv')
actor = movie[['movie_title', 'actor_1_name', 'actor_2_name', 'actor_3_name',
'actor_1_facebook_likes', 'actor_2_facebook_likes', 'actor_3_facebook_likes']]
actor.head()
def change_col_name(col_name):
col_name = col_name.replace('_name', '')
if 'facebook' in col_name:
fb_idx = col_name.find('facebook')
col_name = col_name[:5] + col_name[fb_idx - 1:] + col_name[5:fb_idx-1]
return col_name
actor2 = actor.rename(columns=change_col_name)
actor2.head()
stubs = ['actor', 'actor_facebook_likes']
actor2_tidy = pd.wide_to_long(actor2,
stubnames=stubs,
i=['movie_title'],
j='actor_num',
sep='_').reset_index()
actor2_tidy.head()
# ## There's more...
df = pd.read_csv('data/stackme.csv')
df
df2 = df.rename(columns = {'a1':'group1_a1', 'b2':'group1_b2',
'd':'group2_a1', 'e':'group2_b2'})
df2
pd.wide_to_long(df2,
stubnames=['group1', 'group2'],
i=['State', 'Country', 'Test'],
j='Label',
suffix='.+',
sep='_')
# # Inverting stacked data
usecol_func = lambda x: 'UGDS_' in x or x == 'INSTNM'
college = pd.read_csv('data/college.csv',
index_col='INSTNM',
usecols=usecol_func)
college.head()
college_stacked = college.stack()
college_stacked.head(18)
college_stacked.unstack().head()
college2 = pd.read_csv('data/college.csv',
usecols=usecol_func)
college2.head()
college_melted = college2.melt(id_vars='INSTNM',
var_name='Race',
value_name='Percentage')
college_melted.head()
melted_inv = college_melted.pivot(index='INSTNM',
columns='Race',
values='Percentage')
melted_inv.head()
college2_replication = melted_inv.loc[college2['INSTNM'],
college2.columns[1:]]\
.reset_index()
college2.equals(college2_replication)
# ## There's more...
college.stack().unstack(0)
college.T
# # Unstacking after a groupby aggregation
employee = pd.read_csv('data/employee.csv')
employee.groupby('RACE')['BASE_SALARY'].mean().astype(int)
agg = employee.groupby(['RACE', 'GENDER'])['BASE_SALARY'].mean().astype(int)
agg
agg.unstack('GENDER')
agg.unstack('RACE')
# ## There's more...
agg2 = employee.groupby(['RACE', 'GENDER'])['BASE_SALARY'].agg(['mean', 'max', 'min']).astype(int)
agg2
# # Replicating pivot_table with a groupby aggregation
flights = pd.read_csv('data/flights.csv')
flights.head()
fp = flights.pivot_table(index='AIRLINE',
columns='ORG_AIR',
values='CANCELLED',
aggfunc='sum',
fill_value=0).round(2)
fp.head()
fg = flights.groupby(['AIRLINE', 'ORG_AIR'])['CANCELLED'].sum()
fg.head()
fg_unstack = fg.unstack('ORG_AIR', fill_value=0)
fg_unstack.head()
fp.equals(fg_unstack)
# ## There's more...
fp2 = flights.pivot_table(index=['AIRLINE', 'MONTH'],
columns=['ORG_AIR', 'CANCELLED'],
values=['DEP_DELAY', 'DIST'],
aggfunc=[np.mean, np.sum],
fill_value=0)
fp2.head()
flights.groupby(['AIRLINE', 'MONTH', 'ORG_AIR', 'CANCELLED'])['DEP_DELAY', 'DIST'] \
.agg(['mean', 'sum']) \
.unstack(['ORG_AIR', 'CANCELLED'], fill_value=0) \
.swaplevel(0, 1, axis='columns') \
.head()
# # Renaming axis levels for easy reshaping
college = pd.read_csv('data/college.csv')
cg = college.groupby(['STABBR', 'RELAFFIL'])['UGDS', 'SATMTMID'] \
.agg(['count', 'min', 'max']).head(6)
cg
cg = cg.rename_axis(['AGG_COLS', 'AGG_FUNCS'], axis='columns')
cg
cg.stack('AGG_FUNCS').head()
cg.stack('AGG_FUNCS').swaplevel('AGG_FUNCS', 'STABBR', axis='index').head()
cg.stack('AGG_FUNCS') \
.swaplevel('AGG_FUNCS', 'STABBR', axis='index') \
.sort_index(level='RELAFFIL', axis='index') \
.sort_index(level='AGG_COLS', axis='columns').head(6)
cg.stack('AGG_FUNCS').unstack(['RELAFFIL', 'STABBR'])
cg.stack(['AGG_FUNCS', 'AGG_COLS']).head(12)
# # There's more...
cg.rename_axis([None, None], axis='index').rename_axis([None, None], axis='columns')
# # Tidying when multiple variables are stored as column names
weightlifting = pd.read_csv('data/weightlifting_men.csv')
weightlifting
wl_melt = weightlifting.melt(id_vars='Weight Category',
var_name='sex_age',
value_name='Qual Total')
wl_melt.head()
sex_age = wl_melt['sex_age'].str.split(expand=True)
sex_age.head()
sex_age.columns = ['Sex', 'Age Group']
sex_age.head()
sex_age['Sex'] = sex_age['Sex'].str[0]
sex_age.head()
wl_cat_total = wl_melt[['Weight Category', 'Qual Total']]
wl_tidy = pd.concat([sex_age, wl_cat_total], axis='columns')
wl_tidy.head()
cols = ['Weight Category', 'Qual Total']
sex_age[cols] = wl_melt[cols]
# ## There's more...
age_group = wl_melt.sex_age.str.extract('(\d{2}[-+](?:\d{2})?)', expand=False)
sex = wl_melt.sex_age.str[0]
new_cols = {'Sex':sex,
'Age Group': age_group}
wl_tidy2 = wl_melt.assign(**new_cols).drop('sex_age', axis='columns')
wl_tidy2.head()
wl_tidy2.sort_index(axis=1).equals(wl_tidy.sort_index(axis=1))
# # Tidying when multiple variables are stored as column values
inspections = pd.read_csv('data/restaurant_inspections.csv', parse_dates=['Date'])
inspections.head(10)
inspections.pivot(index=['Name', 'Date'], columns='Info', values='Value')
inspections.set_index(['Name','Date', 'Info']).head(10)
inspections.set_index(['Name','Date', 'Info']).unstack('Info').head()
insp_tidy = inspections.set_index(['Name','Date', 'Info']) \
.unstack('Info') \
.reset_index(col_level=-1)
insp_tidy.head()
insp_tidy.columns = insp_tidy.columns.droplevel(0).rename(None)
insp_tidy.head()
inspections.set_index(['Name','Date', 'Info']) \
.squeeze() \
.unstack('Info') \
.reset_index() \
.rename_axis(None, axis='columns')
# ## There's more...
inspections.pivot_table(index=['Name', 'Date'],
columns='Info',
values='Value',
aggfunc='first') \
.reset_index()\
.rename_axis(None, axis='columns')
# # Tidying when two or more values are stored in the same cell
cities = pd.read_csv('data/texas_cities.csv')
cities
geolocations = cities.Geolocation.str.split(pat='. ', expand=True)
geolocations.columns = ['latitude', 'latitude direction', 'longitude', 'longitude direction']
geolocations
geolocations = geolocations.astype({'latitude':'float', 'longitude':'float'})
geolocations.dtypes
cities_tidy = pd.concat([cities['City'], geolocations], axis='columns')
cities_tidy
pd.concat([cities['City'], geolocations], axis='columns')
# ## How it works...
temp = geolocations.apply(pd.to_numeric, errors='ignore')
temp
temp.dtypes
# ## There's more...
cities.Geolocation.str.split(pat='° |, ', expand=True)
cities.Geolocation.str.extract('([0-9.]+). (N|S), ([0-9.]+). (E|W)', expand=True)
# # Tidying when variables are stored in column names and values
sensors = pd.read_csv('data/sensors.csv')
sensors
sensors.melt(id_vars=['Group', 'Property'], var_name='Year').head(6)
sensors.melt(id_vars=['Group', 'Property'], var_name='Year') \
.pivot_table(index=['Group', 'Year'], columns='Property', values='value') \
.reset_index() \
.rename_axis(None, axis='columns')
# ## There's more...
sensors.set_index(['Group', 'Property']) \
.stack() \
.unstack('Property') \
.rename_axis(['Group', 'Year'], axis='index') \
.rename_axis(None, axis='columns') \
.reset_index()
# # Tidying when multiple observational units are stored in the same table
movie = pd.read_csv('data/movie_altered.csv')
movie.head()
movie.insert(0, 'id', np.arange(len(movie)))
movie.head()
stubnames = ['director', 'director_fb_likes', 'actor', 'actor_fb_likes']
movie_long = pd.wide_to_long(movie,
stubnames=stubnames,
i='id',
j='num',
sep='_').reset_index()
movie_long['num'] = movie_long['num'].astype(int)
movie_long.head(9)
movie_table = movie_long[['id','title', 'year', 'duration', 'rating']]
director_table = movie_long[['id', 'director', 'num', 'director_fb_likes']]
actor_table = movie_long[['id', 'actor', 'num', 'actor_fb_likes']]
movie_table.head(9)
director_table.head(9)
actor_table.head(9)
movie_table = movie_table.drop_duplicates().reset_index(drop=True)
director_table = director_table.dropna().reset_index(drop=True)
actor_table = actor_table.dropna().reset_index(drop=True)
movie_table.head()
director_table.head()
movie.memory_usage(deep=True).sum()
movie_table.memory_usage(deep=True).sum() + \
director_table.memory_usage(deep=True).sum() + \
actor_table.memory_usage(deep=True).sum()
# +
director_cat = pd.Categorical(director_table['director'])
director_table.insert(1, 'director_id', director_cat.codes)
actor_cat = pd.Categorical(actor_table['actor'])
actor_table.insert(1, 'actor_id', actor_cat.codes)
director_table.head()
# -
actor_table.head()
director_associative = director_table[['id', 'director_id', 'num']]
dcols = ['director_id', 'director', 'director_fb_likes']
director_unique = director_table[dcols].drop_duplicates().reset_index(drop=True)
director_associative.head()
director_unique.head()
actor_associative = actor_table[['id', 'actor_id', 'num']]
acols = ['actor_id', 'actor', 'actor_fb_likes']
actor_unique = actor_table[acols].drop_duplicates().reset_index(drop=True)
actor_associative.head()
actor_unique.head()
movie_table.memory_usage(deep=True).sum() + \
director_associative.memory_usage(deep=True).sum() + \
director_unique.memory_usage(deep=True).sum() + \
actor_associative.memory_usage(deep=True).sum() + \
actor_unique.memory_usage(deep=True).sum()
movie_table.head()
# +
actors = actor_associative.merge(actor_unique, on='actor_id') \
.drop('actor_id', 1) \
.pivot_table(index='id', columns='num', aggfunc='first')
actors.columns = actors.columns.get_level_values(0) + '_' + \
actors.columns.get_level_values(1).astype(str)
directors = director_associative.merge(director_unique, on='director_id') \
.drop('director_id', 1) \
.pivot_table(index='id', columns='num', aggfunc='first')
directors.columns = directors.columns.get_level_values(0) + '_' + \
directors.columns.get_level_values(1).astype(str)
# -
actors.head()
directors.head()
movie2 = movie_table.merge(directors.reset_index(), on='id', how='left') \
.merge(actors.reset_index(), on='id', how='left')
movie2.head()
movie.equals(movie2[movie.columns])
| Pandas-CookBook/Chapter 08 Restructuring Data into Tidy Form.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="I6qdbBpaloCv"
# # 第8章 ベイズ推定:データを元に「確信」を高める手法
# + [markdown] id="_435d-6Gl0_I"
# ## 8.2 ベイズ推定の回帰分析への応用
# + [markdown] id="D8U1MtXimB7E"
# **[08BR-01]**
#
# 必要なモジュールをインポートします。
# + id="dmy0vrOT76h9"
import numpy as np
import matplotlib.pyplot as plt
from pandas import Series, DataFrame
from numpy.random import normal, multivariate_normal
# + [markdown] id="_caVniSmmFqP"
# **[08BR-02]**
#
# 正弦関数 $y=\sin(2\pi x)$ に、平均 0、標準偏差 0.3 の正規分布のノイズを載せたデータセットを生成する関数を定義します。
#
# これは、$0\le x\le 1$ の区間を等分した `num` 個の点 $\{x_n\}_{n=1}^N$ に対して、対応する $\{t_n\}_{n=1}^N$ の値を生成します。
# + id="ULN9rPu-77um"
def create_dataset(num):
xs = np.linspace(0, 1, num)
ts = np.sin(2*np.pi*xs) + normal(loc=0, scale=0.3, size=num)
return xs, ts
# + [markdown] id="qX9Dz9_7g-8x"
# **[08BR-03]**
#
# $N=4, 6, 10, 100$ の 4 種類のトレーニングセットを生成します。
# + id="jw-GySqjHOXn"
trainset = []
for num in [4, 6, 10, 100]:
xs, ts = create_dataset(num)
trainset.append((xs, ts))
# + [markdown] id="20jO99OxmMhc"
# **[08BR-04]**
#
# ベイズ推定の計算結果を返す関数を用意します。
#
# これは、以下の値を返します。
#
# * 点 $x$ の予測分布(正規分布)の平均 $m(x)$
#
# $$
# \displaystyle m(x) = \beta\boldsymbol\phi(x)^{\rm T}\mathbf S\sum_{n=1}^Nt_n\boldsymbol\phi(x_n) = \boldsymbol\phi(x)^{\rm T}\overline{\mathbf w}
# $$
#
# * 点 $x$ の予測分布(正規分布)の標準偏差 $s(x)$
#
# $$
# s(x) = \beta^{-1} + \boldsymbol\phi(x)^{\rm T}\mathbf S\boldsymbol\phi(x)
# $$
#
# * 係数 $w$ の事後分布(多次元正規分布)の平均ベクトル $\overline{\mathbf w}$
#
# $$
# \displaystyle \overline{\mathbf w} = \beta\mathbf S\sum_{n=1}^Nt_n\boldsymbol\phi(x_n)
# $$
#
# * 係数 $w$ の事後分布(多次元正規分布)の分散共分散行列 $\mathbf S$
#
# $$
# \displaystyle \mathbf S^{-1} = \alpha\mathbf I + \beta\sum_{n=1}^N\boldsymbol\phi(x_n)\boldsymbol\phi(x)^{\rm T}
# $$
#
# $\alpha^{-1}$ はパラメーター $\mathbf w$ の事前分布の分散、$\beta^{-1}$ は観測データの分散を表します。ここでは、次の値を使用します。
#
# $$
# \alpha^{-1} = 100^2,\ \beta^{-1} = 0.3^2
# $$
# + id="gWIXlJhN79la"
def resolve(xs, ts, m):
beta = 1.0/0.3**2 # 真の分布の分散
alpha = 1.0/80**2 # 事前分布の分散
def phi(x):
return np.array([[x**k] for k in range(m+1)])
# 分散共分散行列 S
tmp = np.sum([np.dot(phi(x), phi(x).T) for x in xs], axis=0)
s_inv = alpha * np.eye(m+1) + beta * tmp
s = np.linalg.inv(s_inv)
# 係数 w の平均
tmp = np.sum([ts[n] * phi(xs[n]) for n in range(len(ts))], axis=0)
w_mean = beta * np.dot(s, tmp)
# 点 x の予測分布の平均 m(x)
def mean_fun(xs): # universal function
tmp = [np.dot(phi(x).T, w_mean)[0][0] for x in xs]
return np.array(tmp)
# 点 x の予測分布の標準偏差 √s(x)
def deviation_fun(xs): # universal function
tmp = [1.0/beta + np.dot(phi(x).T, np.dot(s, phi(x)))[0][0] for x in xs]
return np.sqrt(np.array(tmp))
return mean_fun, deviation_fun, w_mean, s
# + [markdown] id="-xqU8_eIhNfW"
# **[08BR-05]**
#
# 先ほどの関数を利用して、ベイズ推定の結果から予測されるデータ分布(次に観測されるデータの分布)をグラフ表示する関数を用意します。
# + id="NXWKRGMnOpPa"
def show_estimation(subplot, xs, ts, m):
mean_fun, deviation_fun, _, _ = resolve(xs, ts, m)
subplot.tick_params(axis='x', labelsize=12)
subplot.tick_params(axis='y', labelsize=12)
subplot.set_xlim(-0.05, 1.05)
subplot.set_ylim(-2.0, 2.0)
subplot.set_title('N={}'.format(len(ts)), fontsize=14)
# トレーニングセットを表示
subplot.scatter(xs, ts, marker='o', color='blue', label=None)
# 真の曲線を表示
linex = np.linspace(0, 1, 100)
liney = np.sin(2*np.pi*linex)
subplot.plot(linex, liney, color='green', linestyle='--')
# 平均と標準偏差の曲線を表示
m = mean_fun(linex)
d = deviation_fun(linex)
subplot.plot(linex, m, color='red', label='mean')
subplot.plot(linex, m-d, color='black', linestyle='--')
subplot.plot(linex, m+d, color='black', linestyle='--')
subplot.legend(loc=1, fontsize=14)
# + [markdown] id="H-lUvoSuhg0i"
# **[08BR-06]**
#
# 予測分の平均 $m(x)$、および、標準偏差の幅を加えた値 $m(x)\pm \sqrt{s(x)}$ のグラフを描きます。
#
# 多項式の次数は $M=9$ を指定します。
# + colab={"base_uri": "https://localhost:8080/", "height": 530} id="xaIrkIQLOt8v" outputId="030812b4-dd0e-4506-fe74-887f94f16e71"
fig = plt.figure(figsize=(12, 8.5))
fig.subplots_adjust(wspace=0.3, hspace=0.3)
for i, (xs, ts) in enumerate(trainset):
subplot = fig.add_subplot(2, 2, i+1)
show_estimation(subplot, xs, ts, 9)
# + [markdown] id="iUWuXKgWiAi7"
# **[08BR-07]**
#
# 事後分布に従って得られる係数のサンプルを取得して、多項式のサンプルを表示する関数を用意します。
#
# この関数では、5種類のサンプルを取得してグラフを描きます。
# + id="wsKg_8gJbP6W"
def show_samples(subplot, xs, ts, m):
mean_fun, _, w_mean, sigma = resolve(xs, ts, m)
num = len(ts)
ws_samples = multivariate_normal(w_mean.flatten(), sigma, 5)
subplot.tick_params(axis='x', labelsize=12)
subplot.tick_params(axis='y', labelsize=12)
subplot.set_xlim(-0.05, 1.05)
subplot.set_ylim(-2.0, 2.0)
subplot.set_title('N={}'.format(len(ts)), fontsize=14)
# トレーニングセットを表示
subplot.scatter(xs, ts, marker='o', color='blue', label=None)
# 平均の曲線を表示
linex = np.linspace(0, 1, 100)
m = mean_fun(linex)
subplot.plot(linex, m, color='red', label='mean')
subplot.legend(loc=1, fontsize=14)
# 多項式のサンプルを表示
def f(x, ws):
y = 0
for i, w in enumerate(ws):
y += w * x**i
return y
for ws in ws_samples:
liney = f(linex, ws)
subplot.plot(linex, liney, color='red', alpha=0.5, linestyle='--')
# + [markdown] id="KB2TvIvJjAFx"
# **[08BR-08]**
#
# 実際に多項式のサンプルを描きます。
# + colab={"base_uri": "https://localhost:8080/", "height": 530} id="6Uri_Y9zcQYg" outputId="06affbbf-8b8b-4b24-b102-d7476f2c7a71"
fig = plt.figure(figsize=(12, 8.5))
fig.subplots_adjust(wspace=0.3, hspace=0.3)
for i, (xs, ts) in enumerate(trainset):
subplot = fig.add_subplot(2, 2, i+1)
show_samples(subplot, xs, ts, 9)
| 08-bayes_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from flask import Flask, render_template, request
import pandas as pd
import os
import shutil
import torch
from IPython.display import Image, clear_output # to display images
import cv2
import tqdm
import numpy as np
import glob
import sys
sys.path.insert(0, './yolov5')
from utils.datasets import *
from utils.utils import *
from fastai import *
from fastai.vision import *
from script import predict_plates,get_string_from_image
import sqlite3
from sqlite3 import Error
import warnings
warnings.filterwarnings("ignore")
def detect():
os.chdir('C:\\Users\\<NAME>\\Desktop\\DS\\Hackathon\\')
vidcap = cv2.VideoCapture('yolov5/video/HD CCTV Traffic.mp4')
success,image = vidcap.read()
count = 0
while success:
cv2.imwrite("yolov5/converted/frame%d.jpg" % count, image) # save frame as JPEG file
success,image = vidcap.read()
count += 1
os.chdir("yolov5")
os.system('python detect.py --weights last_yolov5s_results.pt --img 416 --conf 0.4 --source converted')
list_frames = pd.DataFrame(columns=['Path','Number'])
list_frames['Path'] = sorted(glob.glob('inference/output/*.jpg'))
list_frames['Number'] = list_frames['Path'].apply(lambda x: x.split('frame')[1].split('.')[0]).astype('int')
list_frames = list_frames.sort_values('Number').reset_index(drop = True)
img_array = []
for filename in list_frames['Path']:
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
out = cv2.VideoWriter('Extracted/project.mp4',cv2.VideoWriter_fourcc(*'H264'), 25,size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
owd = os.getcwd()
os.chdir('C:\\Users\\<NAME>\\Desktop\\DS\\Hackathon\\')
shutil.copy('yolov5/Extracted/project.mp4', 'static/Extracted/project.mp4')
def extract():
os.chdir('C:\\Users\\<NAME>\\Desktop\\DS\\Hackathon\\')
vs = cv2.VideoCapture('yolov5/video/HD CCTV Traffic.mp4')
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load model
model = torch.load('yolov5/last_yolov5s_results.pt', map_location=device)['model'].float() # load to FP32
model.to(device).eval()
labels = pd.DataFrame(columns=['image','label'])
path = 'static/cropped/'
j = 0
while True:
(grabbed,frame) = vs.read()
if not grabbed:
break
img = cv2.resize(frame,(416,416))
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
pred = model(img, augment=False)[0]
pred = non_max_suppression(pred, 0.5, 0.6)
boxes = []
scores = []
lb = []
for i, det in enumerate(pred): # detections per image
# save_path = 'draw/' + image_id + '.jpg'
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], frame.shape)
# Write results
for *xyxy, conf, cls in det:
boxes.append([int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])])
scores.append(conf)
lb.append(cls)
boxes = np.array(boxes)
scores = np.array(scores)
lb = np.array(lb)
boxes = boxes[scores >= 0.05].astype(np.int32)
lb = lb[scores >=float(0.05)]
scores = scores[scores >=float(0.05)]
for i in range(len(boxes)):
j += 1
x = boxes[i][0]
y = boxes[i][1]
w = boxes[i][2]
h = boxes[i][3]
imgg = frame[y:y+h, x:x+w]
try:
labels.at[j,'image'] = 'img' + str(j)
labels.at[j,'label'] = lb[i]
fname = 'img'+str(j)+'.png'
cv2.imwrite(path+fname,imgg)
except:
break
def run():
os.chdir('C:\\Users\\<NAME>\\Desktop\\DS\\Hackathon\\')
model = load_learner('','vehicle_classifier.pkl')
model1=load_learner('','emergency_classifier.pkl')
count=-1
import cv2,random, tqdm
results = pd.DataFrame(columns = ['S No','Name','License_No','Vehicle Type','Emergency','License Plate'])
for files in os.listdir('static/cropped'):
sub_path=os.path.join('static/cropped/','')
for sub_files in tqdm.tqdm(os.listdir(sub_path)):
count+=1
img=open_image(os.path.join(sub_path,sub_files))
dataset=['Ambulance', 'Bus', 'Car', 'Limousine', 'Motorcycle', 'Taxi', 'Truck', 'Van']
emerge=['Non-Emergency','Emergency']
tens=model.predict(img)[-1].numpy()
tens1=model1.predict(img)[-1].numpy()
with open('test_db.csv','a') as f:
try:
gsfi = get_string_from_image(os.path.join(sub_path,sub_files))
if len(gsfi)>2:
no=random.randint(1,50000)
lic="IND_{}".format(no)
#sql_insert(count,l[no][:-2],dataset[np.argmax(tens)],lic,emerge[np.argmax(tens1)],get_string_from_image(os.path.join(sub_path,sub_files)))
results.at[count,:] = [count,l[no][:-2],dataset[np.argmax(tens)],lic,emerge[np.argmax(tens1)],get_string_from_image(os.path.join(sub_path,sub_files))]
except:
continue
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
def create_project(conn, project):
"""
Create a new project into the projects table
:param conn:
:param project:
:return: project id
"""
try:
sql = ''' INSERT INTO Data(Name,Email,Password,Adhaar_Card_Number)
VALUES(?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, project)
conn.commit()
except Error as e:
print(e)
return cur.lastrowid
def create_project_2(conn, project, password_entered):
cur = conn.cursor()
cur.execute("SELECT Password FROM Data WHERE Email=?", (project,))
password = cur.fetchall()
if password[0][0] == password_entered:
return True
return False
def create_project_3(conn, project):
cur = conn.cursor()
cur.execute("SELECT Fine FROM Fines WHERE Email=?", (project,))
payment = cur.fetchall()
return payment
server = Flask(__name__)
# +
@server.route('/')
def home():
return render_template('main.html')
@server.route('/index.html', methods=['GET', 'POST'])
def admin():
return render_template('index.html')
@server.route('/blank.html', methods=['GET', 'POST'])
def blank():
return render_template('blank.html')
@server.route('/login', methods=['GET','POST'])
def login():
email = request.form['email']
password = request.form['Password']
conn = create_connection('UsersData.db')
task_1 = email
res = create_project_2(conn, task_1, password)
if res == True:
if email == '<EMAIL>':
return render_template('index.html')
payment = create_project_3(conn, email)
return render_template('payment.html',data=payment)
return render_template('main.html')
@server.route('/register', methods=['GET','POST'])
def registerDone():
os.chdir('C:\\Users\\<NAME>\\Desktop\\DS\\Hackathon\\')
name = request.form['name']
email = request.form['email']
password = request.form['password']
number = request.form['Number']
conn = create_connection('UsersData.db')
task_1 = (name, email, password, number)
create_project(conn, task_1)
return render_template('main.html')
@server.route('/register.html', methods=['GET', 'POST'])
def register():
return render_template('register.html')
@server.route('/extract_vehicles')
def extract_func():
detect()
return render_template('blank.html')
@server.route('/extract_vehicles_1')
def extract_vehicles():
extract()
message = request.args.get("Vehicles Extracted")
return render_template('blank.html', msg=message)
@server.route('/run_operations')
def run_operations():
return render_template('success.html', msg=message)
# -
if __name__ == "__main__":
server.static_folder = 'static'
server.run()
conn = create_connection('UsersData.db')
payment = create_project_3(conn, '<EMAIL>')
payment
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Sufficiency and Selection
#
# In this notebook we will be covering the following
#
# - Feature Importance
# - Single Factor Analysis
# - Learning Curves
# all imports at the top!
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import (
SelectFromModel,
SelectKBest,
mutual_info_classif,
chi2,
f_classif
)
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import learning_curve
from IPython.display import Image
df = pd.read_csv('data/titanic.csv')
df.head()
# # Single Factor Analysis on Categoricals
#
#
# ## nunique
#
# count how many unique values exist for a categorical
df.Ticket.nunique()
df.PassengerId.nunique()
# ## value_counts
#
# count the occurrences of each category for a feature
df.Ticket.value_counts().head()
df.Cabin.value_counts().head()
# ## plot the value counts for all categoricals
categorical_columns = df.select_dtypes(include='object').columns
pd.Series({
colname: df[colname].nunique()
for colname in categorical_columns
}, index=categorical_columns).plot.barh();
# # Learning curve
#
# Here is the function that is taken from the sklearn page on learning curves:
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Test Set score")
plt.legend(loc="best")
return plt
# +
# and this is how we used it
X = df.select_dtypes(exclude='object').fillna(-1).drop('Survived', axis=1)
y = df.Survived
clf = DecisionTreeClassifier(random_state=1, max_depth=5)
plot_learning_curve(X=X, y=y, estimator=clf, title='DecisionTreeClassifier');
# -
# And remember the internals of what this function is actually doing by knowing how to use the
# output of the scikit [learning_curve](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html) function
# +
# here's where the magic happens! The learning curve function is going
# to take your classifier and your training data and subset the data
train_sizes, train_scores, test_scores = learning_curve(clf, X, y)
# 5 different training set sizes have been selected
# with the smallest being 59 and the largest being 594
# the remaining is used for testing
print('train set sizes', train_sizes)
print('test set sizes', X.shape[0] - train_sizes)
# -
# each row corresponds to a training set size
# each column corresponds to a cross validation fold
# the first row is the highest because it corresponds
# to the smallest training set which means that it's very
# easy for the classifier to overfit and have perfect
# test set predictions while as the test set grows it
# becomes a bit more difficult for this to happen.
train_scores
# The test set scores where again, each row corresponds
# to a train / test set size and each column is a differet
# run with the same train / test sizes
test_scores
# Let's average the scores across each fold so that we can plot them
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
# +
# this one isn't quite as cool as the other because it doesn't show the variance
# but the fundamentals are still here and it's a much simpler one to understand
learning_curve_df = pd.DataFrame({
'Training score': train_scores_mean,
'Test Set score': test_scores_mean
}, index=train_sizes)
learning_curve_df.plot.line(title='Learning Curve');
# -
# # Feature importance
#
# Using any tree-based estimator, you can get feature importances on a model that has already been fitted
# model must be fitted
clf.fit(X, y)
feature_importances = clf.feature_importances_
# +
# now you can plot the importances by creating a pandas Series with the correct index
pd.Series(clf.feature_importances_, index=X.columns).plot.barh(title='feature importances');
# -
# # Feature importance
#
# Using linear models that fit coefficients
clf = LogisticRegression(random_state=1, solver='lbfgs')
clf.fit(X, y)
abs_coefs = pd.Series(clf.coef_[0, :], index=X.columns).abs()
abs_coefs.plot.barh();
# ## Correlations
#
# Computing correlations is pure pandas. Remember that there are a few
X.corr()
# ## SelectFromModel
#
# Using a previously trained model that is either a linear or tree-based model, we can select use SelectFromModel to select the top k features without having to know which type it is.
clf_linear = LogisticRegression(random_state=1, solver='lbfgs')
clf_linear.fit(X, y);
clf_tree = DecisionTreeClassifier(random_state=1, max_depth=5)
clf_tree.fit(X, y);
def select_from_model(clf, X_train):
sfm = SelectFromModel(clf, prefit=True, max_features=4)
selected_features = X_train.columns[sfm.get_support()]
return list(selected_features)
select_from_model(clf_linear, X)
select_from_model(clf_tree, X)
# ## SelectKBest
#
# Using functions that take measures between individual features and the target column, you can also select the k with the highest scores
def select_k_best(method, X, y, k=4):
skb = SelectKBest(method, k=k)
skb.fit(X, y)
if method is not mutual_info_classif:
p_values = skb.pvalues_.round(4)
else:
p_values = None
result = pd.DataFrame({
'column': X.columns,
'p_values': p_values,
'scores': skb.scores_,
'selected': skb.get_support()
}).sort_values(by='scores', ascending=False)
return result
select_k_best(mutual_info_classif, X, y)
# chi2 cannot handle negative numbers so quick hack to make them all positive
# DON'T DO THIS BLINDLY! THIS IS JUST A HACK FOR DEMONSTRATION PURPOSES!
select_k_best(chi2, X.abs(), y)
select_k_best(f_classif, X, y)
| S01 - Bootcamp and Binary Classification/SLU16 - Data Sufficiency and Selection/Examples Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
##The premise of this project is for the implementation a CNN with VGG-16 as a feature selector
import matplotlib.pyplot as plt
# %matplotlib inline
# +
#Create an ImageGenerator object that is used to randomize and make certain small transformations to the image
#to build better and robust networks
from keras.preprocessing.image import ImageDataGenerator
# -
image_gen = ImageDataGenerator(rotation_range=30,
width_shift_range=0.1,
height_shift_range=0.1,
rescale=1/255,
zoom_range=0.2,
shear_range=0.2,
fill_mode='nearest')
# ## Model:
#
from keras.applications import vgg16
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D
from keras import optimizers
# +
model = vgg16.VGG16(weights='imagenet', include_top=False,
input_shape=(150,150,3), pooling="max")
for layer in model.layers[:-5]:
layer.trainable = False
for layer in model.layers:
print(layer, layer.trainable)
transfer_model = Sequential()
for layer in model.layers:
transfer_model.add(layer)
transfer_model.add(Dense(128, activation="relu"))
transfer_model.add(Dropout(0.5))
transfer_model.add(Dense(10, activation="softmax"))
adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)
transfer_model.compile(loss="categorical_crossentropy",
optimizer=adam,
metrics=["accuracy"])
# -
transfer_model.summary()
train_directory = '/home/amoghavarsha/Project_CNN/my_project_env/Dataset/training/training'
validation_directory = '/home/amoghavarsha/Project_CNN/my_project_env/Dataset/validation/validation'
## Getting the training and the validation sets
batch_size = 16
train_gen = image_gen.flow_from_directory(train_directory,target_size=(150,150),batch_size=batch_size,
class_mode='categorical')
validation_gen = image_gen.flow_from_directory(validation_directory,target_size=(150,150),batch_size=batch_size,
class_mode='categorical')
results = transfer_model.fit_generator(train_gen,epochs=30,steps_per_epoch=1097//batch_size,
validation_data=validation_gen,validation_steps=272//batch_size)
plt.figure(figsize=(15,5))
plt.plot(results.history['accuracy'],'r',label='Training Accuracy')
plt.plot(results.history['val_accuracy'],'b',label='Validation Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend(loc='lower right')
plt.title('Training Vs Validation')
plt.figure(figsize=(15,5))
plt.plot(results.history['loss'],'r',label='Training Loss')
plt.plot(results.history['val_loss'],'b',label='Validation loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(loc='lower right')
plt.title('Training Vs Validation')
# +
#transfer_model.save('TL_CNN_Monkey.h5')
# -
_, acc = transfer_model.evaluate_generator(validation_gen, steps=272 //batch_size)
print('The testing accuracy for the CNN with the 10-Species-Monkey dataset is : %.3f' % (acc * 100.0))
# +
# from tensorflow import keras
# x = keras.models.load_model('TL_CNN_Monkey.h5')
| CNN_Monkey_Dataset/Transfer Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:unidata-python-workshop]
# language: python
# name: conda-env-unidata-python-workshop-py
# ---
# <div style="width:1000 px">
#
# <div style="float:right; width:98 px; height:98px;"><img src="https://pbs.twimg.com/profile_images/1187259618/unidata_logo_rgb_sm_400x400.png" alt="Unidata Logo" style="height: 98px;"></div>
#
# <h1>Making an Interactive GOES Plot</h1>
# <h3>Unidata Python Workshop</h3>
#
# <div style="clear:both"></div>
# </div>
#
# <hr style="height:2px;">
from datetime import datetime
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from IPython.display import display
from ipywidgets import Dropdown, interact, fixed, Select
from matplotlib import patheffects
import matplotlib.pyplot as plt
import metpy
from metpy.plots import add_timestamp
import numpy as np
from siphon.catalog import TDSCatalog
def open_dataset(date, channel, idx, region):
"""
Open and return a netCDF Dataset object for a given date, channel, and image index
of GOES-16 data from THREDDS test server.
"""
cat = TDSCatalog('https://thredds.ucar.edu/thredds/catalog/satellite/goes/east/products/'
f'CloudAndMoistureImagery/{region}/Channel{channel:02d}/{date:%Y%m%d}/catalog.xml')
ds = cat.datasets[idx]
ds = ds.remote_access(use_xarray=True)
return ds
def plot_GOES16_channel(date, idx, channel, region):
"""
Get and plot a GOES 16 data band from the ABI.
"""
ds = open_dataset(date, channel, idx, region)
dat = ds.metpy.parse_cf('Sectorized_CMI')
proj = dat.metpy.cartopy_crs
x = dat['x']
y = dat['y']
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.add_feature(cfeature.COASTLINE, linewidth=2)
ax.add_feature(cfeature.STATES, linestyle=':', edgecolor='black')
ax.add_feature(cfeature.BORDERS, linewidth=2, edgecolor='black')
for im in ax.images:
im.remove()
im = ax.imshow(dat, extent=(x.min(), x.max(), y.min(), y.max()), origin='upper')
timestamp = datetime.strptime(ds.start_date_time, '%Y%j%H%M%S')
add_timestamp(ax, time=timestamp, high_contrast=True,
pretext=f'GOES 16 Ch.{channel} - ',
time_format='%d %B %Y %H%MZ', y=0.01,
fontsize=18)
display(fig)
plt.close()
# +
channel_list = {u'1 - Blue Band 0.47 \u03BCm': 1,
u'2 - Red Band 0.64 \u03BCm': 2,
u'3 - Veggie Band 0.86 \u03BCm': 3,
u'4 - Cirrus Band 1.37 \u03BCm': 4,
u'5 - Snow/Ice Band 1.6 \u03BCm': 5,
u'6 - Cloud Particle Size Band 2.2 \u03BCm': 6,
u'7 - Shortwave Window Band 3.9 \u03BCm': 7,
u'8 - Upper-Level Tropo. WV Band 6.2 \u03BCm': 8,
u'9 - Mid-Level Tropo. WV Band 6.9 \u03BCm': 9,
u'10 - Low-Level WV Band 7.3 \u03BCm': 10,
u'11 - Cloud-Top Phase Band 8.4 \u03BCm': 11,
u'12 - Ozone Band 9.6 \u03BCm': 12,
u'13 - Clean IR Longwave Band 10.3 \u03BCm': 13,
u'14 - IR Longwave Band 11.2 \u03BCm': 14,
u'15 - Dirty Longwave Band 12.3 \u03BCm': 15,
u'16 - CO2 Longwave IR 13.3 \u03BCm': 16}
region = Select(
options=['Mesoscale-1', 'Mesoscale-2', 'CONUS', 'PuertoRico', 'FullDisk'],
description='Region:',
)
channel = Dropdown(
options=channel_list,
value=9,
description='Channel:',
)
date = datetime.utcnow()
interact(plot_GOES16_channel, date=fixed(date), idx=fixed(-2),
channel=channel, region=region)
# -
| notebooks/Satellite_Data/GOES_Interactive_Plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Logistic Regression supports only solvers in ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']
#
# titles_options = [("Confusion matrix, with Logistic regression", None),] --> withoutLin & withoutLin (before loop)
#
# https://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
#
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
#
# https://machinelearningmastery.com/difference-test-validation-datasets/
def knnConfusion ():
x_train, x_test = scaler()
classifier = LogisticRegression(random_state = 0, solver='lbfgs', multi_class='auto')
X, y = setup()
classifier.fit(X, y)
title = 'Confusion matrix, with Logistic regression'
display = plot_confusion_matrix(classifier, X, y,
display_labels=names, cmap=plt.cm.Blues, normalize='true')
display.ax_.set_title(title)
print(title)
print(display.confusion_matrix)
plt.title('Confusion matrix, with Logistic regression KNN')
plt.show()
# +
classifier = svm.SVC(kernel='linear', C=0.01).fit(x_train, y_train)
titles_options = [("Confusion matrix, without normalization", None),]
for title, normalize in titles_options:
display = plot_confusion_matrix(classifier, x_test, y_test,
display_labels=names, cmap=plt.cm.Blues, normalize=normalize)
display.ax_.set_title(title)
print(title)
#print(display.confusion_matrix)
plt.show()
| data science/DIT852 - intro/assignment_2/dump.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: seaborn-py38-latest
# language: python
# name: seaborn-py38-latest
# ---
# + tags=["hide"]
import seaborn as sns
sns.set_theme(style="ticks")
# -
# Calling the constructor requires a long-form data object. This initializes the grid, but doesn't plot anything on it:
tips = sns.load_dataset("tips")
sns.FacetGrid(tips)
# + active=""
# Assign column and/or row variables to add more subplots to the figure:
# -
sns.FacetGrid(tips, col="time", row="sex")
# + active=""
# To draw a plot on every facet, pass a function and the name of one or more columns in the dataframe to :meth:`FacetGrid.map`:
# -
g = sns.FacetGrid(tips, col="time", row="sex")
g.map(sns.scatterplot, "total_bill", "tip")
# + active=""
# The variable specification in :meth:`FacetGrid.map` requires a positional argument mapping, but if the function has a ``data`` parameter and accepts named variable assignments, you can also use :meth:`FacetGrid.map_dataframe`:
# -
g = sns.FacetGrid(tips, col="time", row="sex")
g.map_dataframe(sns.histplot, x="total_bill")
# + active=""
# One difference between the two methods is that :meth:`FacetGrid.map_dataframe` does not add axis labels. There is a dedicated method to do that:
# -
g = sns.FacetGrid(tips, col="time", row="sex")
g.map_dataframe(sns.histplot, x="total_bill")
g.set_axis_labels("Total bill", "Count")
# + active=""
# Notice how the bins have different widths in each facet. A separate plot is drawn on each facet, so if the plotting function derives any parameters from the data, they may not be shared across facets. You can pass additional keyword arguments to synchronize them. But when possible, using a figure-level function like :func:`displot` will take care of this bookkeeping for you:
# -
g = sns.FacetGrid(tips, col="time", row="sex")
g.map_dataframe(sns.histplot, x="total_bill", binwidth=2)
g.set_axis_labels("Total bill", "Count")
# + active=""
# The :class:`FacetGrid` constructor accepts a ``hue`` parameter. Setting this will condition the data on another variable and make multiple plots in different colors. Where possible, label information is tracked so that a single legend can be drawn:
# -
g = sns.FacetGrid(tips, col="time", hue="sex")
g.map_dataframe(sns.scatterplot, x="total_bill", y="tip")
g.set_axis_labels("Total bill", "Tip")
g.add_legend()
# + active=""
# When ``hue`` is set on the :class:`FacetGrid`, however, a separate plot is drawn for each level of the variable. If the plotting function understands ``hue``, it is better to let it handle that logic. It is important, however, to ensure that each facet will use the same hue mapping. In the sample ``tips`` data, the ``sex`` column has a categorical datatype, which ensures this:
# -
g = sns.FacetGrid(tips, col="time")
g.map_dataframe(sns.scatterplot, x="total_bill", y="tip", hue="sex")
g.set_axis_labels("Total bill", "Tip")
g.add_legend()
# + active=""
# The size and shape of the plot is specified at the level of each subplot using the ``height`` and ``aspect`` parameters:
# -
# Change the height and aspect ratio of each facet:
g = sns.FacetGrid(tips, col="day", height=3.5, aspect=.65)
g.map(sns.histplot, "total_bill")
# + active=""
# If the variable assigned to ``col`` has many levels, it is possible to "wrap" it so that it spans multiple rows:
# -
g = sns.FacetGrid(tips, col="size", height=2, col_wrap=3)
g.map(sns.histplot, "total_bill")
# You can pass custom functions to plot with, or to annotate each facet. Your custom function must use the matplotlib state-machine interface to plot on the "current" axes, and it should catch additional keyword arguments:
# +
import matplotlib.pyplot as plt
def annotate(data, **kws):
n = len(data)
ax = plt.gca()
ax.text(.1, .6, f"N = {n}", transform=ax.transAxes)
g = sns.FacetGrid(tips, col="time")
g.map_dataframe(sns.scatterplot, x="total_bill", y="tip")
g.set_axis_labels("Total bill", "Tip")
g.map_dataframe(annotate)
# + active=""
# The :class:`FacetGrid` object has some other useful parameters and methods for tweaking the plot:
# -
g = sns.FacetGrid(tips, col="sex", row="time", margin_titles=True)
g.map_dataframe(sns.scatterplot, x="total_bill", y="tip")
g.set_axis_labels("Total bill", "Tip")
g.set_titles(col_template="{col_name} patrons", row_template="{row_name}")
g.set(xlim=(0, 60), ylim=(0, 12), xticks=[10, 30, 50], yticks=[2, 6, 10])
g.tight_layout()
g.savefig("facet_plot.png")
# + tags=["hide"]
import os
if os.path.exists("facet_plot.png"):
os.remove("facet_plot.png")
# + active=""
# You also have access to the underlying matplotlib objects for additional tweaking:
# -
g = sns.FacetGrid(tips, col="sex", row="time", margin_titles=True, despine=False)
g.map_dataframe(sns.scatterplot, x="total_bill", y="tip")
g.set_axis_labels("Total bill", "Tip")
g.fig.subplots_adjust(wspace=0, hspace=0)
for (row_val, col_val), ax in g.axes_dict.items():
if row_val == "Lunch" and col_val == "Female":
ax.set_facecolor(".95")
else:
ax.set_facecolor((0, 0, 0, 0))
| doc/docstrings/FacetGrid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Evaluation
# The `Evaluation` class is the actual interface for the user with the
# functionalities of the `pyEvalData` package. It provides access to the raw
# data via the `Source` object, given on initialization.
# The main features of the `Evaluation` class are the definition of counter
# aliases as well as new counters by simple algebraic expressions.
# At the same time pre- and post-filters can be applied to the raw and
# evaluated data, respectively.
# Much efforts have been put into the binning, averaging, and error calculation
# of the raw data.
# In addition to the evaluation of a list of scans or scan sequence of one or
# multiple scans in dependence of an external paramter, the `Evaluation` class
# also provides high-level helper functions for plotting and fitting the
# according results.
# + [markdown] tags=[]
# ## Setup
#
# Here we do the necessary import for this example
# +
# %load_ext autoreload
# %autoreload 2
import matplotlib.pyplot as plt
import numpy as np
import pyEvalData as ped
# import lmfit for fitting
import lmfit as lf
# import some usefull fit functions
import ultrafastFitFunctions as ufff
# define the path for the example data
example_data_path = '../../../example_data/'
# + [markdown] tags=[]
# ## Source
#
# Here we iitialize the `Source` for the current evaluation. It is based on raw
# data in a [SPEC file](https://certif.com/content/spec) which was generated by
# the open-source software [Sardana](https://sardana-controls.org).
# -
spec = ped.io.Spec(file_name='sardana_spec.spec',
file_path=example_data_path,
use_nexus=True,
force_overwrite=False,
update_before_read=False,
read_and_forget=True)
# ## The `Evaluation` class
#
# For the most basic example we just have to provide a `Source` on initialization:
ev = ped.Evaluation(spec)
# Now it is possible to check the available attributes of the `Evaluation`
# object, which will be explained step-by-step in the upcominng sections.
print(ev.__doc__)
# Most of the attributes of the `Evaluation` class are well explained in the `docstring` above
# and described in more details below.
# The `custom_counters` might be soon depricated.
# The `t0` attribute is used for easy normalization to 1 by
# dividing the data by the mean of all values which are `xcol < t0`.
# This is typically useful for time-resolved delay scans but might be renamed
# for a more general approach in the future.
# The `statistics_type` attribute allows to switch between *gaussian* statistics,
# which calculates the error from the standard derivation, and *poisson* statistics,
# whichs calculates the error from $1/\sqrt N$ with $N$ being the total number of
# photons in the according bin.
# ## Simple plot example
#
# To plot data, the `Evlauation` objects does only need to know the `xcol` as
# horizontal axis as well a list of *counters* to plot, which is called `clist`.
#
# First we can check the available scan numbers in the source:
spec.get_all_scan_numbers()
# Now we can check for the available data for a specific scan
spec.scan1.data.dtype.names
# So let's try to plot the counters `Pumped` and `Unpumped` vs the motor `delay` for scan #1:
# +
ev.xcol = 'delay'
ev.clist = ['Pumped', 'Unpumped']
plt.figure()
ev.plot_scans([1])
plt.xlim(-10, 50)
plt.xlabel('delay (ps)')
plt.show()
# -
# ## Algebraic expressions
# For now, we only see a lot of noise. So let's work a bit further on the data we
# plot. The experiment was an ultrafast [MOKE](https://en.wikipedia.org/wiki/Magneto-optic_Kerr_effect)
# measurement, which followed the polarization rotation of the probe pulse after
# a certain `delay` in respect to the pump pulse. Typically, this magnetic contrast
# is improved by subtracting the measured signal for two opposite magnetization
# directions of the sample, as the MOKE effect depends on the sample's magnetization.
#
# In our example, we have two additional *counters* available, which contain the
# data for negative magnetic fields (*M* - minus): `PumpedM` and `UnpumpedM`
# While the two former *counters* were acquired for positive fields.
#
# Let's plot the difference signal for the *pumped* and *unpumped* signals:
# +
ev.xcol = 'delay'
ev.clist = ['Pumped-PumpedM', 'Unpumped-UnpumpedM']
plt.figure()
ev.plot_scans([1])
plt.xlim(-10, 50)
plt.xlabel('delay (ps)')
plt.show()
# -
# The new plot shows already much more dynamisc in the *pumped* vs. the *unpumped*
# signal. However, we can still improve that, by normalizing one by the other:
# +
ev.xcol = 'delay'
ev.clist = ['(Pumped-PumpedM)/(Unpumped-UnpumpedM)']
plt.figure()
ev.plot_scans([1])
plt.xlim(-10, 50)
plt.xlabel('delay (ps)')
plt.show()
# -
# This does look much better but we lost the absolute value of the contrast.
# Let's simply multiply the trace with the average of the *unpumped* magnetic
# contrast:
# +
ev.xcol = 'delay'
ev.clist = ['(Pumped-PumpedM)/(Unpumped-UnpumpedM)*mean(Unpumped-UnpumpedM)']
plt.figure()
ev.plot_scans([1])
plt.xlim(-10, 50)
plt.xlabel('delay (ps)')
plt.show()
# -
# So besides simple operations such as `+. -. *, /` we can also use some basic
# `numpy` functionalities. You can check the available functions by inspection
# of the attribute `math_keys`:
ev.math_keys
# But of course our current *counter* name is rather bulky. So lets define some
# aliases using the attribute `cdef`:
ev.cdef['pumped_mag'] = 'Pumped-PumpedM'
ev.cdef['unpumped_mag'] = 'Unpumped-UnpumpedM'
ev.cdef['rel_mag'] = 'pumped_mag/unpumped_mag'
ev.cdef['abs_mag'] = 'pumped_mag/unpumped_mag*mean(unpumped_mag)'
# +
ev.xcol = 'delay'
ev.clist = ['abs_mag']
plt.figure()
ev.plot_scans([1])
plt.xlim(-10, 50)
plt.xlabel('delay (ps)')
plt.show()
# -
# ## Binning
# In many situations it is desireable to reduce the data density or to plot the
# data on a new grid. This can be easily achieved by the `xgrid` keyword of the
# `plot_scans` method.
#
# Here we plot the same data as before on a three reduced grids with 0.1, 1, and
# 5 ps step width. Please note that the errorbars appear due to the averaging of
# multiple point in the bins of the grid. The errorbars are vertical and horizontal.
# We can also skip the `xlim` setting here, as our grid is in the same range as
# before.
# +
ev.xcol = 'delay'
ev.clist = ['abs_mag']
plt.figure()
ev.plot_scans([1], xgrid=np.r_[-10:50:0.1])
ev.plot_scans([1], xgrid=np.r_[-10:50:1])
ev.plot_scans([1], xgrid=np.r_[-10:50:5])
plt.xlabel('delay (ps)')
plt.show()
# -
# ## Averaging & error propagation
# In order to improve statistics even further, scans are often repeated an averaged.
# This was also done for this experimental example and all scans #1-6 were done
# with the same settings.
#
# We can simply average them by providing all scans of interest to the `plot_scans`
# method:
# +
ev.xcol = 'delay'
ev.clist = ['abs_mag']
plt.figure()
ev.plot_scans([1, 2, 3, 4, 5, 6], xgrid=np.r_[-10:50:0.1])
plt.xlabel('delay (ps)')
plt.show()
# -
# Hm, somehow this did not really did the job, right?
# Altough the scattering of the circle symbols has decreased, the errorbars
# are much large as for the single scan before.
#
# Let's check the individual scans to see what happened:
# +
ev.xcol = 'delay'
ev.clist = ['abs_mag']
plt.figure()
ev.plot_scans([1], xgrid=np.r_[-10:50:0.1])
ev.plot_scans([2], xgrid=np.r_[-10:50:0.1])
ev.plot_scans([3], xgrid=np.r_[-10:50:0.1])
ev.plot_scans([4], xgrid=np.r_[-10:50:0.1])
ev.plot_scans([5], xgrid=np.r_[-10:50:0.1])
ev.plot_scans([6], xgrid=np.r_[-10:50:0.1])
plt.xlabel('delay (ps)')
plt.show()
# -
# Individually all scans look very much the same, with very small errorbars.
# So why do we get so large errorbars when we average them?
#
# Let's go one more step back and plot the `Unpumped` signal for all scans with a
# large grid of 5 ps for clarity:
# +
ev.xcol = 'delay'
ev.clist = ['Unpumped']
plt.figure()
ev.plot_scans([1], xgrid=np.r_[-10:50:5])
ev.plot_scans([2], xgrid=np.r_[-10:50:5])
ev.plot_scans([3], xgrid=np.r_[-10:50:5])
ev.plot_scans([4], xgrid=np.r_[-10:50:5])
ev.plot_scans([5], xgrid=np.r_[-10:50:5])
ev.plot_scans([6], xgrid=np.r_[-10:50:5])
plt.xlabel('delay (ps)')
plt.show()
# -
# We can observe a significant drift of the raw data which results in deviations
# that are not statistically distributed anymore.
#
# This essentially means, that it makes a difference if we
# 1. evaluate the expression `abs_mag` for every scan individually and
# eventually average the resulting traces
# 2. first average the raw data (`Pumped, PumpedM, Unpumped, UnpumpedM`)
# and then calculate final trace for `abs_mag` using the averaged raw data.
# In the later case we need to carry out a proper error propagation to
# determine the errors for `abs_mag`.
#
# The `Evaluation` class allows to switch between both cases by the attribute flag
# `propagate_errors` which is `True` by default and handles the error propagation
# automatically using the [uncertainties](https://pythonhosted.org/uncertainties) package.
# For our last example we were follwoing option 2. as described above.
# Accoridngly, rather large errors from the drifiting of the raw signals were propagated.
#
# Now lets compare to option 1. without error propagation:
# +
ev.xcol = 'delay'
ev.clist = ['abs_mag']
plt.figure()
ev.propagate_errors = True
ev.plot_scans([1, 2, 3, 4, 5, 6], xgrid=np.r_[-10:50:0.1])
ev.propagate_errors = False
ev.plot_scans([1, 2, 3, 4, 5, 6], xgrid=np.r_[-10:50:0.1])
plt.xlabel('delay (ps)')
plt.legend(['error propagation', 'no error propagation'])
plt.show()
# -
# The application of the both options strongly depends on the type of noise and
# drifts of the acquired data.
# ## `plot_scans()` options
#
# Let's check all arguments of the `plot_scans()` method by simply calling:
help(ev.plot_scans)
# Most of the above arguments are *plotting options* and will be changed/simplified
# in a future release.
# The `xerr` and `yerr` arguments allow to change the type of errorbars in `x `and `y`
# direction between *standard error, standard derivation* and *no error*.
# The `norm2one` flag allows to normalize the data to 1 for all data which is before
# `Evaluation.t0` on the `xcol`.
# The `skip_plot` option disables plotting at all and can be handy if only access to
# the return values is desired.
# The returned data contains the `xcol` and according error as `ndarray` named `x2plot`
# and `xerr2plot`, while the according counters and errors from the `clist` are given
# as `OrderedDict`s `y2plot` and `yerr2plot`. The `keys` of these dictionaries
# correspond to the elements in the `clist`.
# ## Scan sequences
#
# Experimentally it is common to repeat similar scans while varying an external parameter,
# such as the sample environment (temperature, external fields, etc.).
# For this common task, the `Evaluation` class provides a method named
# `plot_scan_sequence()` which wraps around the `plot_scans()` method.
#
# First, we have to define the `scan_sequence` as a nested list to be correctly parsed
# by the `plot_scan_sequence()` method.
# For that, we use the day time of the scans as an external parameter.
# We can access such meta information directly from the `Source` object as follows:
print(spec.scan1.time)
print(spec.scan2.time)
print(spec.scan3.time)
print(spec.scan4.time)
print(spec.scan5.time)
print(spec.scan6.time)
# Now we create the `scan_sequence` as a `list`, which contains one or multiple
# entries. Each entry can be a `list` or `tuple` which contains two elements:
# The scan list containing one or multiple scan numbers, and a string or number
# describing the external parameter.
scan_sequence = [
# ([scan numbers], parameter)
([1], spec.scan1.time), # first entry
([2], spec.scan2.time),
([3], spec.scan3.time),
([4], spec.scan4.time),
([5], spec.scan5.time),
([6], spec.scan6.time), # last entry
]
# The minimum example below does not differ too much from plotting all six scans
# manually by the `plot_scans()` method:
plt.figure()
ev.plot_scan_sequence(scan_sequence)
plt.show()
# Obviously, the legend did not take the scan time into account. Let's check the
# documentation for some details:
help(ev.plot_scan_sequence)
# In the docstring we can find many arguments already known from the `plot_scans()`
# method. In order to fix the legend labels we need to tell the method about the
# `sequence_type` as an argument. Otherwise it will enumerate the scans by default.
# In our case we provided a `text` type label:
plt.figure()
sequence_data, parameters, names, label_texts = \
ev.plot_scan_sequence(scan_sequence, sequence_type='text')
plt.show()
# In the example above we also catched the return values. Here the `parameters`
# correspond exactly to the data we provided in the `scan_sequence` while the
# `label_texts` are the formatted string as written in the legend.
# The `names` correspond to the auto-generated name of each scan as given by
# the `plot_scans()` method.
#
# The actual `sequence_data` is again an `OrderedDict` where the `keys` are given
# by the strings in the `xcol` and `clist` attributes of the `Evaluation` object.
# Each value for a given `key` is a `list` of `ndarray`s that hold the data for
# every parameter.
print(sequence_data.keys())
# Let's make an 2D plot from the `scan_sequence`:
# +
x = sequence_data['delay'][0]
y = np.arange(6)
z = sequence_data['abs_mag']
plt.figure()
plt.pcolormesh(x, y, z, shading='auto')
plt.xlim(-10, 50)
plt.xlabel('delay (ps)')
plt.yticks(y, label_texts, rotation='horizontal')
plt.ylabel('scan time')
plt.colorbar()
plt.show()
# -
# ## Fit scan sequences
#
# Finally, we want to fit the `scan_sequence` and extract the according fit parameters
# for every trace. Here we use the `fit_scan_sequence()` method. So let's check
# the documentation first:
help(ev.fit_scan_sequence)
# Again we find a lot of previously defined arguments which we are already familiar with.
# For the fitting, we need to provide first of all a proper fitting model `mod` and the
# accroding fit parameters `pars` with *initial* and *boundray* conditions.
# Here we rely on the [`lmfit`](https://lmfit.github.io/lmfit-py/) package.
# So please dive into its great documentation before continuing here.
#
# In order to describe our data best, we would like to use a double-exponential function for
# the initial decrease and subsequent increase of the magnetization. Moreover, we have to
# take into account the *step-like* behaviour before and after the exciation at *delay=0* ps
# as well as the temporal resoultion of our setup as mimiced by a convolution with a gaussian
# function.
#
# Such rather complex fitting function is provided by the
# [ultrafastFitFunctions](https://github.com/EmCeBeh/ultrafastFitFunctions) package
# which as been already imported in the [Setup](#setup).
help(ufff.doubleDecayConvScale)
# The documentation for the fitting fucntions are hopefully comming soon :)
#
# Now lets create the model and parameters:
# +
mod = lf.Model(ufff.doubleDecayConvScale)
pars = lf.Parameters()
pars.add('mu', value=0)
pars.add('tau1', value=0.2)
pars.add('tau2', value=10)
pars.add('A', value=0.5)
pars.add('q', value=1)
pars.add('alpha', value=1, vary=False)
pars.add('sigS', value=0.05, vary=False)
pars.add('sigH', value=0, vary=False)
pars.add('I0', value=0.002)
# -
# We are not going too much in to detail of the fitting function, so let's do
# the actual fit.
# For that, we limit the data again on a reduced grid given by the `xgrid` argument
# and provide the correct `sequence_type` for the label generation.
plt.figure()
ev.fit_scan_sequence(scan_sequence, mod, pars, xgrid=np.r_[-10:50:0.01],
sequence_type='text')
plt.show()
# The results does already look very good, but lets access some more information:
plt.figure()
res, parameters, sequence_data = ev.fit_scan_sequence(scan_sequence, mod, pars,
xgrid=np.r_[-10:50:0.01],
sequence_type='text',
show_single=True,
fit_report=1)
plt.show()
# ### Access fit results
#
# The results of the fits are given in the `res` dictionary. Here the keys correspond
# again to the elements in the `clist`:
print(res.keys())
# For every counter in the `clist` we have a nested dictionary with all best values
# and errrors for the individual fit parameters. Moreover, we can access some general
# parameters as the *center of mass* (`CoM`) or *integral* (`int`), as well as the
# `fit` objects themselve.
print(res['abs_mag'].keys())
# So let's plot the decay amplitude `A` for the different parameters in the
# `scan_sequence`:
plt.figure()
plt.errorbar(parameters, res['abs_mag']['A'], yerr=res['abs_mag']['AErr'], fmt='-o')
plt.xlabel('scan time')
plt.ylabel('decay amplitude')
plt.show()
# ## Filters
# +
# to be done
| docs/source/examples/evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IEEE European Low Voltage Test Feeder:
#
# http://sites.ieee.org/pes-testfeeders/resources/
#
# The current IEEE test cases are focused on North American style systems; however it is common outside of North America to see low-voltage distribution systems, both radial and meshed. It is important to make sure that tools support both dominant styles of distribution system configuration. This test case seeks to fill a benchmark gap by presenting a number of common low-voltage configurations. This circuit also introduces quasi-static time series simulations.
#
# IEEE European LV network is a generic 0.416 kV network serviced by one 0.8 MVA MV/LV transformer and a 11kV external grid. The network supplies 906 LV buses and 55 single phase loads.
#
# # Snapshot of Time series data
#
# In the benchmark document, there are three snapshots taken from a time series data.
#
# - 12:01 AM : Off Peak(1 min)
# - 09:26 AM : On Peak (566 min)
# - 12:00 AM : Off Peak (1440 min)
#
# All the three networks have been saved into pandapower.networks
# We can select them using :
#
# - 'off_peak_1',
# - 'on_peak_566',
# - 'off_peak_1440'
# +
import pandapower as pp
import pandapower.networks as nw
net = nw.ieee_european_lv_asymmetric('on_peak_566')
# -
# # Plotting the network
#
# - 11 KV External Grid ( cyan triangle)
# - 0.8 MVA 11/0.416 kV Transformer ( Intersecting Circles)
# - Loads
# - Phase A: red triangles,
# - Phase B: yellow box
# - Phase C: blue circle
#
# **PS:**
#
# **Maximum unbalanced node 0.74% ( Black rectangle in the fig)**
#
# **Max Line Loading 33.10 % ( Black line in the fig)**
# +
import pandapower.plotting as plot
import numpy as np
try:
import seaborn
colors = seaborn.color_palette()
except:
colors = ["b", "g", "r", "c", "y"]
# %matplotlib inline
# Plot all the buses
bc = plot.create_bus_collection(net, net.bus.index, size=.2, color=colors[0], zorder=10)
#Plot Transformers
tlc, tpc = plot.create_trafo_collection(net, net.trafo.index, color="g",size =1.5 )
# Plot all the lines
lcd = plot.create_line_collection(net, net.line.index, color="grey", linewidths=0.5, use_bus_geodata=True)
# Plot the external grid
sc = plot.create_bus_collection(net, net.ext_grid.bus.values, patch_type="poly3", size=1, color="c", zorder=11)
#Plot all the loads
ldA = plot.create_bus_collection(net, net.asymmetric_load.bus.values[np.where(net.asymmetric_load.p_a_mw >0)], patch_type="poly3", size=.5, color="r", zorder=11)
ldB = plot.create_bus_collection(net, net.asymmetric_load.bus.values[np.where(net.asymmetric_load.p_b_mw >0)], patch_type="rect", size=.5, color="y", zorder=11)
ldC = plot.create_bus_collection(net, net.asymmetric_load.bus.values[np.where(net.asymmetric_load.p_c_mw >0)], patch_type="circle", size=.5, color="b", zorder=11)
# Plot the max. loaded line and max. unbalanced node
max_load = plot.create_line_collection(net, np.array([net.res_line_3ph.loading_percent.idxmax()]), color="black", linewidths=15, use_bus_geodata=True)
max_unbal = plot.create_bus_collection(net, np.array([net.res_bus_3ph.unbalance_percent.idxmax()]), patch_type="rect", size=1, color="black", zorder=11)
# Draw all the collected plots
plot.draw_collections([lcd, bc, tlc, tpc, sc,ldA,ldB,ldC,max_load,max_unbal], figsize=(20,20))
# -
# # Sample Result Values
#
# Max loaded line and most unbalanced load has been marked in black in the plot.
#
# The exact values are provided below:
# - Maximum unbalance %
# - Max Line Loading %
net.res_bus_3ph.unbalance_percent.max()
net.res_line_3ph.loading_percent.max()
| tutorials/ieee_european_lv_asymmetric.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building a Spam Filter with Naive Bayes
#
# 
#
# In this project, I'm going to build a spam filter for SMS messages using the multinomial Naive Bayes algorithm. Goal this project - write a program that classifies new messages with an accuracy greater than 80% — so I expect that more than 80% of the new messages will be classified correctly as spam or ham (non-spam) categories.
#
# For training the algorithm, I using a dataset of 5,572 SMS messages that are already classified by humans. The dataset was put together by <NAME> and <NAME>, and it can be <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip" target="_blank">downloaded</a> from <a href="https://archive.ics.uci.edu/ml/index.php" target="_blank"> The UCI Machine Learning Repository</a>. The data collection process is described in more details on <a href="https://archive.ics.uci.edu/ml/datasets/sms+spam+collection" target="_blank">this page</a>, where you can also find some of the papers authored by <NAME> and <NAME>.
#
# ## Exploring the Dataset
#
# Import required library and open dataset:
# +
from IPython.display import display
import matplotlib.pyplot as plt
import pandas as pd
import re
# Open dataset - cat column for `ham`/`spam`, text column - message
sms_raw = pd.read_csv('data/SMSSpamCollection', sep='\t',
header=None, names=['cat', 'text'])
# View head and tail, and general info for dataset
display(sms_raw.head())
display(sms_raw.tail())
display("Numbers messages by category ",sms_raw.cat.value_counts())
display("Share messages by category in %",sms_raw.cat.value_counts(normalize = True)*100)
# Plot messages by category
fig, ax = plt.subplots(figsize = (12, 8))
ax = sms_raw.cat.value_counts().plot.bar(rot = 0, color=['C0', 'C3'])
ax.set_title("The category messages for sms_raw dataset.", fontsize=14,
fontweight="bold")
plt.show()
# Check nan values
sms_raw.info()
# -
# Write function `word_stat(df)` and collect more detailed statical information about words in the `sms_raw` dataset :
# +
def word_stat(df):
"""
Calculate word statistics for inpur datasets
----------
df : sms dataset
Returns
-------
ham_word, spam_word, total_word dataframe with stats
"""
ham_word = {}
spam_word = {}
total_word = {}
sms = df.copy()
# iteration of each row in dataset and extract unique words in dictionaries
for index, row in sms.copy().iterrows():
message_row = re.sub(r"(\W+)", " ", row[1]).lower().split()
for word in message_row:
if word not in total_word:
total_word[word] = 1
else:
total_word[word] += 1
if row[0] == "ham":
for word in message_row:
if word not in ham_word:
ham_word[word] = 1
else:
ham_word[word] += 1
else:
for word in message_row:
if word not in spam_word:
spam_word[word] = 1
else:
spam_word[word] += 1
# Convert dictionary to dataframes
ham_word = pd.DataFrame.from_dict(ham_word, orient="index").reset_index()
ham_word = ham_word.rename(columns={'index':'word', 0: "times"})
spam_word = pd.DataFrame.from_dict(spam_word, orient="index").reset_index()
spam_word = spam_word.rename(columns={'index':'word', 0: "times"})
total_word = pd.DataFrame.from_dict(total_word, orient="index").reset_index()
total_word = total_word.rename(columns={'index':'word', 0: "times"})
return ham_word, spam_word, total_word
ham_raw, spam_raw, total_raw = word_stat(sms_raw)
ham_unique = ham_raw.shape[0]
ham_count = ham_raw.times.sum()
spam_inique = spam_raw.shape[0]
spam_count = spam_raw.times.sum()
total_inique = total_raw.shape[0]
total_count = total_raw.times.sum()
print (f"The ham category in the sms_raw datset, total unique words: {ham_unique:,}, \
total words: {ham_count:,}.")
print (f"\nThe spam category in the sms_raw datset, total unique words:\
{spam_inique:,}, total words: {spam_count:,}.")
print (f"\nThe sms_raw datset - total unique words: {total_inique:,}, total words: \
{total_count:,}.")
print(f"\nThe share spam for unique total words : {spam_inique/total_inique:.3%}, \
\nthe share spam word numbers for total word numbers : {spam_count/total_count:.3%}")
# -
# It seen, that `total_unique` words = 8,753, and less than sum `ham_unique` and `spam_unique` hence the part of same words are contained in the `ham` and the `spam` messages - see bellow on the intersected words in the `ham_raw` and `spam_raw` datasets.
ham_raw.sort_values("times", ascending = False).head(10)
spam_raw.sort_values("times", ascending = False).head(10)
# ## Split `sms_raw` dataset into training and test datasets
# +
# Randomize dataset
sms_rand = sms_raw.copy().sample(frac = 1, random_state = 1)
# Calculate index - the 80% lenth of sms_raw dataset
train_index = int(round(len(sms_rand) * 0.8))
# Split for train and test df
sms_train = sms_rand.iloc[:train_index,:].reset_index(drop = True)
sms_test = sms_rand.iloc[train_index:,:].reset_index(drop = True)
# Display head and tail and general info for train set
print("1. sms_train info")
display(sms_train.head())
display(sms_train.tail())
display(sms_train.info())
# Show margin values for train set
print("sms_train margin category")
display(sms_train.cat.value_counts())
# Show in percent train set
print("sms_train share % category")
display(sms_train.cat.value_counts(normalize=True)*100.0)
# Display head and tail and general info for train set
print("\n2. sms_test info")
display(sms_test.head())
display(sms_test.tail())
display(sms_test.info())
# Show margin values for test info
print("sms_test margin category")
display(sms_test.cat.value_counts())
# Show in percent test info
print("sms_train share % category")
display(sms_test.cat.value_counts(normalize=True)*100.0)
# -
# It seen that `sms_raw` split correctly and the and integer values of percentages of spam/ham messages in each dataset `sms_train` and `sms_test` are equal each other and parent `sms_raw` dataset.
#
# Extract the statical information from `sms_train` dataset using `word_stat(df)` function:
# +
# Extract and display information about train dataset
train_ham, train_spam, train_total = word_stat(sms_train)
train_ham_unique = train_ham.shape[0]
train_ham_count = train_ham.times.sum()
train_spam_inique = train_spam.shape[0]
train_spam_count = train_spam.times.sum()
train_total_inique = train_total.shape[0]
train_total_count = train_total.times.sum()
print (f"\nThe ham category in the sms_train datset, total unique words: {train_ham_unique:,}, \
total words: {train_ham_count:,}.")
print (f"\nThe spam category in the sms_train datset, total unique words:\
{train_spam_inique:,}, total words: {train_spam_count:,}.")
print (f"\nThe train_total datset - total unique words: {train_total_inique:,}, total words: \
{train_total_count:,}.")
print(f"\nThe share spam unique words : {train_spam_inique/train_total_inique:.3%}, \
\nthe share spam word numbers to total word numbers : {train_spam_count/train_total_count:.3%}")
# -
# # Define general statistical and probability values and constants.
#
# I have cleaning training set and almost all information about it and begin creating the spam filter. The Naive Bayes algorithm will need to answer these two probability questions to be able to classify new messages:
#
# $$
# P(Spam | w_1,w_2, ..., w_n) \propto P(Spam) \cdot \prod_{i=1}^{n}P(w_i|Spam)
# $$
#
# $$
# P(Ham | w_1,w_2, ..., w_n) \propto P(Ham) \cdot \prod_{i=1}^{n}P(w_i|Ham)
# $$
#
# Also, to calculate P(wi|Spam) and P(wi|Ham) inside the formulas above, we'll need to use these equations:
#
# $$
# P(w_i|Spam) = \frac{N_{w_i|Spam} + \alpha}{N_{Spam} + \alpha \cdot N_{Vocabulary}}
# $$
#
# $$
# P(w_i|Ham) = \frac{N_{w_i|Ham} + \alpha}{N_{Ham} + \alpha \cdot N_{Vocabulary}}
# $$
#
# Some of the terms in the four equations above will have the same value for every new message. Calculating the value of these terms once and avoid doing the computations again when a new messages comes in. Below need calculating next values for training set:
#
# - *N<sub>w<sub>i</sub>|Spam</sub>* — the number of times the word *w<sub>i</sub>* occurs in spam messages,
# - *N<sub>w<sub>i</sub>|Ham</sub>* — the number of times the word *w<sub>i</sub>* occurs in ham messages,
# - *N<sub>Spam</sub>* — total number of words in spam messages,
# - *N<sub>Ham</sub>* — total number of words in ham messages,
# - *N<sub>Vocabulary</sub>* — total number of unique words in the vocabulary,
# - *α* — a smoothing parameter.
#
# In fact, it is required to create a pivot probability matrix based of existing data in the next datasets - `train_ham`, `train_spam`, and `train_total`.
#
# Calculate marginal probability for ham - `p_ham` and for spam - `p_spam` categories and create `word_pm` dataframe with values of times word in the `ham` and `spam` categories from `train_ham` and `train_spam` and calculate and filling values of conditional probability for `word(i)|Spam`as `p_spam` and `word(i)|Ham` as `p_sam`:
# +
# Rename columns for further creating word probability matrix
train_total = train_total.rename(columns = {"times":"n_total"})
train_ham = train_ham.rename(columns = {"times":"n_ham"})
train_spam = train_spam.rename(columns = {"times":"n_spam"})
# Create words probability matrix - further as `word_pm`
# Add ham values from train_ham
word_pm = train_total.copy().merge(train_ham, how = 'left', on = 'word')
# Add spam values from train_spam
word_pm = word_pm.copy().merge(train_spam, how = 'left', on = 'word')
# YOU MUST HAVE TO FILL NAN VALUES BY ZERO FOR CORRECT CALCULATING P_SPAM AND P_HAM BEFORE IT!!!
word_pm.fillna(0, inplace=True)
# Insert new columns with calculating values of ham probability - further as `p_ham`
word_pm.insert(3, "p_ham", ((word_pm.n_ham + 1.0)/
(train_ham_count + 1.0 * train_total_inique)))
# Insert new columns calculating values spam probability - further as `p_spam`
word_pm.insert(5, "p_spam", ((word_pm.n_spam + 1.0)/
(train_spam_count + 1.0 * train_total_inique)))
# Display head for ham nad spam
print("1. Top 10 popular ham words")
display(word_pm.sort_values("p_ham", ascending = False).head(10))
print("\n2. Top 10 popular spam words")
display(word_pm.sort_values("p_spam", ascending = False).head(10))
display(word_pm.info())
# -
# ## Define function for classify SMS sentences
#
# Now I ready to create the spam filter as a function that can:
# - Takes in as input a new message.
# - Calculates *P(Spam|message)* and *P(Ham|message)* using the following formulas:
# $$
# P(Spam | w_1,w_2, ..., w_n) \propto P(Spam) \cdot \prod_{i=1}^{n}P(w_i|Spam)
# $$
#
# $$
# P(Ham | w_1,w_2, ..., w_n) \propto P(Ham) \cdot \prod_{i=1}^{n}P(w_i|Ham)
# $$
#
# - Compares both values and:
# - if *P(Ham|message)* > *P(Spam|message)*, then the message is classified as ham,
# - if *P(Ham|message)* < *P(Spam|message)*, then the message is classified as spam,
# - if *P(Ham|message)* = *P(Spam|message)*, then the algorithm may request human help.
#
# If a new message contains some words that are not in the vocabulary, these words will be simply ignored for calculating the probabilities.
#
# Let's create this function and check it with a three messages that are obviously spam or ham:
# +
# Define margin probability p_spam and p_ham for train dataset:
p_ham = sms_train.cat.value_counts(normalize = True)[0]
p_spam = sms_train.cat.value_counts(normalize = True)[1]
def bayes_classify_draft(sentence):
'''Takes in a message as a string, removes punctuation, and makes all the
words lower case, calculates P(Spam|sentence) and P(Ham|sentence) based on
the constants and parameters calculated earlier in the word_pm probability
matrix,compares the two values and classifies the message as spam or ham,
or requires external human classification.
'''
sentence = re.sub('(\W+)', ' ', sentence).lower().split()
p_spam_given_sentence = p_spam
p_ham_given_sentence = p_ham
for word in sentence:
if word in list(word_pm.word.values):
p_ham_given_sentence *= word_pm[word_pm.word == word].p_ham.values[0]
p_spam_given_sentence *= word_pm[word_pm.word == word].p_spam.values[0]
print("P(Ham|message): ", p_ham_given_sentence)
print("P(Spam|message): ", p_spam_given_sentence)
if p_ham_given_sentence > p_spam_given_sentence:
return 'ham'
elif p_spam_given_sentence > p_ham_given_sentence:
return 'spam'
else:
return 'needs human classification!'
sentence_1 = "Vadim, do you want to win an amazing super-prize today?"
print(sentence_1)
print("Type of sentence_1 :", bayes_classify_draft(sentence_1), "\n")
sentence_2 = "Hello, you a look super-amazing today!"
print(sentence_2)
print("Type of sentence_2 :", bayes_classify_draft(sentence_2),"\n" )
sentence_3 = "Vadim, it seems you are writing overly complex and incomprehensible programs!"
print(sentence_3)
print("Type of sentence_3 :", bayes_classify_draft(sentence_3))
# -
# In two cases - `1`and `2` - the word `Vadim` doesn't exist in `word_pm` dataframe.
"Vadim" in list(word_pm.word.values)
# ## Classifying a new sentences in `sms_test` and measuring the spam filter's accuracy
#
# Little modify `bayes_classify_draft` - remove print messages and save it as `bayes_classify`:
# +
# Define margin probability p_spam and p_ham for train dataset:
p_ham = sms_train.cat.value_counts(normalize = True)[0]
p_spam = sms_train.cat.value_counts(normalize = True)[1]
def bayes_classify(sentence):
'''Takes in a message as a string, removes punctuation, and makes all the
words lower case, calculates P(Spam|sentence) and P(Ham|sentence) based on
the constants and parameters calculated earlier in the word_pm probability
matrix,compares the two values and classifies the message as spam or ham,
or requires external human classification.
'''
sentence = re.sub('(\W+)', ' ', sentence).lower().split()
p_spam_given_sentence = p_spam
p_ham_given_sentence = p_ham
for word in sentence:
if word in list(word_pm.word.values):
p_ham_given_sentence *= word_pm[word_pm.word == word].p_ham.values[0]
p_spam_given_sentence *= word_pm[word_pm.word == word].p_spam.values[0]
if p_ham_given_sentence > p_spam_given_sentence:
return 'ham'
elif p_spam_given_sentence > p_ham_given_sentence:
return 'spam'
else:
return 'needs human classification!'
# -
# and create new columns `predicted` using `bayes_classify()` function
# +
# Add new column
sms_test["predict"] = sms_test.text.apply(bayes_classify)
# Change order of columns
sms_test = sms_test.reindex(['cat', 'predict', 'text'], axis = 1)
# Display head
display(sms_test.head(10))
# -
# Measure spam filter accuracy:
# Calculating the accuracy of the spam filter
correct = 0
total = len(sms_test) # number of sms in the test set
for row in sms_test.iterrows():
if row[1]['cat']==row[1].predict:
correct+=1
accuracy = correct/total*100
print(f"Total messages: {total:,}")
print(f"Correct messages: {correct:,}")
print(f"Incorrect messages: {(total - correct):,}")
print(f'The accuracy of the spam filter: {accuracy:.3f}%')
false_spam = sms_test[(sms_test['predict']=='spam')&(sms_test['cat']=='ham')].reset_index(drop=True)
false_ham = sms_test[(sms_test['predict']=='ham')&(sms_test['cat']=='spam')].reset_index(drop=True)
ambiguous = sms_test[sms_test['predict']=='needs human classification!'].reset_index(drop=True)
print("1. False Spam")
display(false_spam)
print("\n2. False Ham")
display(false_ham)
print("\n3. Ambiguous messages ")
display(ambiguous)
# ## Conclusion
# In this project created a highly accurate spam filter based on the multinomial Naive Bayes algorithm and a dataset of labeled 5,572 SMS. The spam filter takes in a new message and classifies it as spam or ham with reaching accuracy of 98.74%, which is almost 20% higher than our initial focus.
# Notice, that `sms_train`dataset contains only 6,137 unique words or 78.85% from 7,783 total unique words
# and nevertheless the spam filter provided the relative high level of recognition sentences.
#
# For example you can move and place this code line
#
# `word_pm.fillna(0, inplace=True)`
#
# after this code line
#
# `word_pm.insert(5, "p_spam", ((word_pm.n_spam + 1.0)/
# (train_spam_count + 1.0 * train_total_inique)))`
#
# in section [7] - as a result you get spam filter accuracy equal 92.011% and 89 incorrect messages because p_ham and p_spam values became wrong all along of that the Nan value was not removed in time - be attentive to the little things.
#
# The most of popular meaningful spam-prone words revealed the following patterns: encouraging people to do further actions, promising them something alluring, urging them, having sexual context, inviting to visit some web resources, advertising various digital devices and products.
#
# Created on June 01, 2021
#
# @author: <NAME>, used some ideas from public Internet resources.
#
# © 3-clause BSD License
#
# Software environment:
#
# Debian 10
#
# Python 3.8.7
#
# required next imported python modules:
#
# IPython
#
# matplotlib
#
# pandas
| 15_Building_a_Spam_Filter_with_Naive_Bayes/Building_a_Spam_Filter_with_Naive_Bayes_m433.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] id="PiOlL-AnUTSG" colab_type="text"
# # Batch Normalization
#
# Batch normalization was introduced in <NAME> and <NAME>'s 2015 paper [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf). The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to _layers within_ the network.
# > It's called **batch** normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current *batch*.
# + [markdown] id="L3LBRgMnUTSJ" colab_type="text"
# ## Batch Normalization in PyTorch<a id="implementation_1"></a>
#
# This section of the notebook shows you one way to add batch normalization to a neural network built in PyTorch.
#
# The following cells import the packages we need in the notebook and load the MNIST dataset to use in our experiments.
# + id="EDrcWIxLUTSK" colab_type="code" colab={}
# %matplotlib inline
import numpy as np
import torch
import matplotlib.pyplot as plt
# + id="rO5ZECfMUTSP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="4941b909-4fe0-4652-ef76-8b66ba59127a" executionInfo={"status": "ok", "timestamp": 1577466417898, "user_tz": -60, "elapsed": 2503, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 64
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# get the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
# + [markdown] id="8f_pCQ56UTST" colab_type="text"
# ### Visualize the data
# + id="pVXzhCTvUTSe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 229} outputId="ad7a74fb-efca-40a3-8799-2d2129a74488" executionInfo={"status": "ok", "timestamp": 1577466446656, "user_tz": -60, "elapsed": 805, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# get one image from the batch
img = np.squeeze(images[0])
fig = plt.figure(figsize = (3,3))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
# + [markdown] id="QT8W-sESUTSg" colab_type="text"
# ## Neural network classes for testing
#
# The following class, `NeuralNet`, allows us to create identical neural networks **with and without batch normalization** to compare. The code is heavily documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions.
#
# *About the code:*
# >We are defining a simple MLP for classification; this design choice was made to support the discussion related to batch normalization and not to get the best classification accuracy.
#
# ### (Important) Model Details
#
# There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.
#
# We add batch normalization to layers inside the `__init__` function. Here are some important points about that code:
# 1. Layers with batch normalization do **not** include a bias term.
# 2. We use PyTorch's [BatchNorm1d](https://pytorch.org/docs/stable/nn.html#batchnorm1d) function to handle the math. This is the function you use to operate on linear layer outputs; you'll use [BatchNorm2d](https://pytorch.org/docs/stable/nn.html#batchnorm2d) for 2D outputs like filtered images from convolutional layers.
# 3. We add the batch normalization layer **before** calling the activation function.
#
# + id="34LZo55nUTSh" colab_type="code" colab={}
import torch.nn as nn
import torch.nn.functional as F
class NeuralNet(nn.Module):
def __init__(self, use_batch_norm, input_size=784, hidden_dim=256, output_size=10):
"""
Creates a PyTorch net using the given parameters.
:param use_batch_norm: bool
Pass True to create a network that uses batch normalization; False otherwise
Note: this network will not use batch normalization on layers that do not have an
activation function.
"""
super(NeuralNet, self).__init__() # init super
# Default layer sizes
self.input_size = input_size # (28*28 images)
self.hidden_dim = hidden_dim
self.output_size = output_size # (number of classes)
# Keep track of whether or not this network uses batch normalization.
self.use_batch_norm = use_batch_norm
# define hidden linear layers, with optional batch norm on their outputs
# layers with batch_norm applied have no bias term
if use_batch_norm:
self.fc1 = nn.Linear(input_size, hidden_dim*2, bias=False)
self.batch_norm1 = nn.BatchNorm1d(hidden_dim*2)
else:
self.fc1 = nn.Linear(input_size, hidden_dim*2)
# define *second* hidden linear layers, with optional batch norm on their outputs
if use_batch_norm:
self.fc2 = nn.Linear(hidden_dim*2, hidden_dim, bias=False)
self.batch_norm2 = nn.BatchNorm1d(hidden_dim)
else:
self.fc2 = nn.Linear(hidden_dim*2, hidden_dim)
# third and final, fully-connected layer
self.fc3 = nn.Linear(hidden_dim, output_size)
def forward(self, x):
# flatten image
x = x.view(-1, 28*28)
# all hidden layers + optional batch norm + relu activation
x = self.fc1(x)
if self.use_batch_norm:
x = self.batch_norm1(x)
x = F.relu(x)
# second layer
x = self.fc2(x)
if self.use_batch_norm:
x = self.batch_norm2(x)
x = F.relu(x)
# third layer, no batch norm or activation
x = self.fc3(x)
return x
# + [markdown] id="iavYZMjUUTSm" colab_type="text"
# ### Create two different models for testing
#
# * `net_batchnorm` is a linear classification model **with** batch normalization applied to the output of its hidden layers
# * `net_no_norm` is a plain MLP, without batch normalization
#
# Besides the normalization layers, everthing about these models is the same.
# + id="Q74lntKSUTSn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="47bfbc8a-eb5b-49dd-c7b5-f9532ce40077" executionInfo={"status": "ok", "timestamp": 1577466517900, "user_tz": -60, "elapsed": 355, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
net_batchnorm = NeuralNet(use_batch_norm=True)
net_no_norm = NeuralNet(use_batch_norm=False)
print(net_batchnorm)
print()
print(net_no_norm)
# + [markdown] id="qFsSZ5DHUTSp" colab_type="text"
# ---
# ## Training
#
# The below `train` function will take in a model and some number of epochs. We'll use cross entropy loss and stochastic gradient descent for optimization. This function returns the losses, recorded after each epoch, so that we can display and compare the behavior of different models.
#
# #### `.train()` mode
# Note that we tell our model whether or not it should be in training mode, `model.train()`. This is an important step because batch normalization has different behavior during training on a batch or testing/evaluating on a larger dataset.
# + id="DGkqf15tUTSq" colab_type="code" colab={}
def train(model, n_epochs=10):
# number of epochs to train the model
n_epochs = n_epochs
# track losses
losses = []
# optimization strategy
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# set the model to training mode
model.train()
for epoch in range(1, n_epochs+1):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
batch_count = 0
for batch_idx, (data, target) in enumerate(train_loader):
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update average training loss
train_loss += loss.item() # add up avg batch loss
batch_count +=1
# print training statistics
losses.append(train_loss/batch_count)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch,
train_loss/batch_count))
# return all recorded batch losses
return losses
# + [markdown] id="CTP0XD1PUTSs" colab_type="text"
# ### Comparing Models
#
# In the below cells, we train our two different models and compare their trainining loss over time.
# + id="BdXf3T5YUTSt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="13c793fc-3371-48d7-9622-31e9f47f2a4b" executionInfo={"status": "ok", "timestamp": 1577466711911, "user_tz": -60, "elapsed": 167988, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
# batchnorm model losses
# this may take some time to train
losses_batchnorm = train(net_batchnorm)
# + id="wQ-SIjdbUTSv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="0009b6bb-6a25-4d12-d67a-a3e129d046ae" executionInfo={"status": "ok", "timestamp": 1577466923566, "user_tz": -60, "elapsed": 108993, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
# *no* norm model losses
# you should already start to see a difference in training losses
losses_no_norm = train(net_no_norm)
# + id="nIQN4-tBUTS0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 516} outputId="f3fed169-7a5f-409b-844f-12f3647a4dce" executionInfo={"status": "ok", "timestamp": 1577466935209, "user_tz": -60, "elapsed": 754, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
# compare
fig, ax = plt.subplots(figsize=(12,8))
#losses_batchnorm = np.array(losses_batchnorm)
#losses_no_norm = np.array(losses_no_norm)
plt.plot(losses_batchnorm, label='Using batchnorm', alpha=0.5)
plt.plot(losses_no_norm, label='No norm', alpha=0.5)
plt.title("Training Losses")
plt.legend()
# + [markdown] id="5ehXXlfdUTS3" colab_type="text"
# ---
# ## Testing
#
# You should see that the model with batch normalization, starts off with a lower training loss and, over ten epochs of training, gets to a training loss that is noticeably lower than our model without normalization.
#
# Next, let's see how both these models perform on our test data! Below, we have a function `test` that takes in a model and a parameter `train` (True or False) which indicates whether the model should be in training or evaulation mode. This is for comparison purposes, later. This function will calculate some test statistics including the overall test accuracy of a passed in model.
#
# + id="wOZh2198UTS4" colab_type="code" colab={}
def test(model, train):
# initialize vars to monitor test loss and accuracy
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
test_loss = 0.0
# set model to train or evaluation mode
# just to see the difference in behavior
if(train==True):
model.train()
if(train==False):
model.eval()
# loss criterion
criterion = nn.CrossEntropyLoss()
for batch_idx, (data, target) in enumerate(test_loader):
batch_size = data.size(0)
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update average test loss
test_loss += loss.item()*batch_size
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
print('Test Loss: {:.6f}\n'.format(test_loss/len(test_loader.dataset)))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# + [markdown] id="5OER-nDCUTS7" colab_type="text"
# ### Training and Evaluation Mode
#
# Setting a model to evaluation mode is important for models with batch normalization layers!
#
# >* Training mode means that the batch normalization layers will use **batch** statistics to calculate the batch norm.
# * Evaluation mode, on the other hand, uses the estimated **population** mean and variance from the entire training set, which should give us increased performance on this test data!
# + id="IjP3Lod0UTS8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="e08ee1d2-e942-406d-cf21-f0c28d72ecd3" executionInfo={"status": "ok", "timestamp": 1577466952403, "user_tz": -60, "elapsed": 2171, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
# test batchnorm case, in *train* mode
test(net_batchnorm, train=True)
# + id="5V3Uq5cQUTS_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="75c413d6-7a82-48ff-c553-7a86c987b994" executionInfo={"status": "ok", "timestamp": 1577466961711, "user_tz": -60, "elapsed": 1884, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
# test batchnorm case, in *evaluation* mode
test(net_batchnorm, train=False)
# + id="_9o1qqkwUTTB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="8f58a53a-75c2-499d-de90-e31c648e815c" executionInfo={"status": "ok", "timestamp": 1577466993393, "user_tz": -60, "elapsed": 1762, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08573171651197416845"}}
# for posterity, test no norm case in eval mode
test(net_no_norm, train=False)
# + [markdown] id="PQ3V6vbBUTTG" colab_type="text"
# ### Which model has the highest accuracy?
#
# You should see a small improvement whe comparing the batch norm model's accuracy in training and evaluation mode; **evaluation mode** should give a small improvement!
#
# You should also see that the model that uses batch norm layers shows a marked improvement in overall accuracy when compared with the no-normalization model.
# + [markdown] id="FA9d6IiMUTTH" colab_type="text"
# ---
# # Considerations for other network types
#
# This notebook demonstrates batch normalization in a standard neural network with fully connected layers. You can also use batch normalization in other types of networks, but there are some special considerations.
#
# ### ConvNets
#
# Convolution layers consist of multiple feature maps. (Remember, the depth of a convolutional layer refers to its number of feature maps.) And the weights for each feature map are shared across all the inputs that feed into the layer. Because of these differences, batch normalizing convolutional layers requires batch/population mean and variance per feature map rather than per node in the layer.
#
# > To apply batch normalization on the outputs of convolutional layers, we use [BatchNorm2d](https://pytorch.org/docs/stable/nn.html#batchnorm2d)
#
#
# ### RNNs
#
# Batch normalization can work with recurrent neural networks, too, as shown in the 2016 paper [Recurrent Batch Normalization](https://arxiv.org/abs/1603.09025). It's a bit more work to implement, but basically involves calculating the means and variances per time step instead of per layer. You can find an example where someone implemented recurrent batch normalization in PyTorch, in [this GitHub repo](https://github.com/jihunchoi/recurrent-batch-normalization-pytorch).
| batch-norm/Batch_Normalization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
df = pd.read_csv("positivosZac.csv",header=0)
df
# ## Comparativa entre hombre y mujeres mayores de 40 años
#
dfhombres=df[(df['EDAD']>=40) & (df['SEXO']==2)]
dfmujeres=df[(df['EDAD']>=40) & (df['SEXO']==1)]
print("Hombres:", len(dfhombres),"| Mujeres:", len(dfmujeres))
# Proporción de muertes en pacientes hospitalizados Hombres y Mujeres
# +
#hombres y mujeres hospitalizados
dfHH=dfhombres[dfhombres['TIPO_PACIENTE']==2]
dfMH=dfmujeres[dfmujeres['TIPO_PACIENTE']==2]
#hombres y mujeres hospitalizados que fallecieron
dfHHF=dfHH[dfHH['FECHA_DEF'].str.match("([2])\S+")== True]
dfMHF=dfMH[dfMH['FECHA_DEF'].str.match("([2])\S+")== True]
#hombres y mujeres hospitalizados con al menos una enfermedad de riesgo
dfHHC=dfHH[(dfHH['DIABETES']==1) | (dfHH['EPOC']==1) | (dfHH['ASMA']==1) | (dfHH['INMUSUPR']==1) | (dfHH['HIPERTENSION']==1) | (dfHH['OTRA_COM']==1) | (dfHH['CARDIOVASCULAR']==1) | (dfHH['OBESIDAD']==1) | (dfHH['TABAQUISMO']==1) | (dfHH['RENAL_CRONICA']==1)]
dfMHC=dfMH[(dfMH['DIABETES']==1) | (dfMH['EPOC']==1) | (dfMH['ASMA']==1) | (dfMH['INMUSUPR']==1) | (dfMH['HIPERTENSION']==1) | (dfMH['OTRA_COM']==1) | (dfMH['CARDIOVASCULAR']==1) | (dfMH['OBESIDAD']==1) | (dfMH['TABAQUISMO']==1) | (dfMH['RENAL_CRONICA']==1)]
#porcentaje de hombres y mujeres hospitalidos que falllecieron
porcentajeH=len(dfHHF)/len(dfHH)*100
porcentajeM=len(dfMHF)/len(dfMH)*100
##porcentaje de hombres y mujeres hospitalidos con almenos una enfermedad de riesgo
porcentajeHC=len(dfHHC)/len(dfHH)*100
porcentajeMC=len(dfMHC)/len(dfMH)*100
#gráficas
x=["Hombres", "Mujeres"]
y=[porcentajeHC, porcentajeMC]
plt.ylim(0,100)
plt.xlim(1.25-1,1.25+1)
plt.xticks([])
plt.title("Porcentaje de hombres y mujeres hospitalizados con al menos una enfermedad de riesgo")
plt.bar(1,porcentajeHC,width=0.5, label="Hombres")
plt.bar(1.5,porcentajeMC,width=0.5, label="Mujeres")
plt.legend()
plt.show()
plt.ylim(0,100)
plt.xlim(1.25-1,1.25+1)
plt.xticks([])
plt.title("Porcentaje de hombres y mujeres hospitalizados que falllecieron")
plt.bar(1,porcentajeH,width=0.5, label="Hombres")
plt.bar(1.5,porcentajeM,width=0.5, label="Mujeres")
plt.legend()
plt.show()
print(porcentajeHC,porcentajeMC)
print("{:.2f}% de los hombres hospitalizados fallecieron".format(porcentajeH))
print("{:.2f}% de las mujeres hospitalizadas fallecieron".format(porcentajeM))
# -
# ## Comparativa entre decesos en hospitales públicos y privados
#
dfprivados=df[df['SECTOR']==9]
dfpublicos=df[df['SECTOR']!=9]
print("Pacientes atendidos en hospitales","\nPrivados:",len(dfprivados),"| Públicos",len(dfpublicos))
# +
#pacientes hospitalizados en servicios público y privados
dfprivadosH=dfprivados[dfprivados['TIPO_PACIENTE']==2]
dfpublicosH=dfpublicos[dfpublicos['TIPO_PACIENTE']==2]
#pacientes hospitalizados que fallecieron en servivios publicos y privados
dfprivadosHF=dfprivadosH[dfprivadosH['FECHA_DEF'].str.match("([2])\S+")== True]
dfpublicosHF=dfpublicosH[dfpublicosH['FECHA_DEF'].str.match("([2])\S+")== True]
#pacientes hospitalizados que precentan cuando menos una enfermedad de riesgo
dfprivadosC=dfprivadosH[(dfprivadosH['DIABETES']==1) | (dfprivadosH['EPOC']==1) | (dfprivadosH['ASMA']==1) | (dfprivadosH['INMUSUPR']==1) | (dfprivadosH['HIPERTENSION']==1) | (dfprivadosH['OTRA_COM']==1) | (dfprivadosH['CARDIOVASCULAR']==1) | (dfprivadosH['OBESIDAD']==1) | (dfprivadosH['TABAQUISMO']==1) | (dfprivadosH['RENAL_CRONICA']==1)]
dfpublicosC=dfpublicosH[(dfpublicosH['DIABETES']==1) | (dfpublicosH['EPOC']==1) | (dfpublicosH['ASMA']==1) | (dfpublicosH['INMUSUPR']==1) | (dfpublicosH['HIPERTENSION']==1) | (dfpublicosH['OTRA_COM']==1) | (dfpublicosH['CARDIOVASCULAR']==1) | (dfpublicosH['OBESIDAD']==1) | (dfpublicosH['TABAQUISMO']==1) | (dfpublicosH['RENAL_CRONICA']==1)]
#porcentaje de muertes
porcentajePriv=len(dfprivadosHF)/len(dfprivadosH)*100
porcentajePubl=len(dfpublicosHF)/len(dfpublicosH)*100
#porcentaje de enfermedades de riesgo
porcentajePrivC=len(dfprivadosC)/len(dfprivadosH)*100
porcentajePublC=len(dfpublicosC)/len(dfpublicosH)*100
#graficas
plt.ylim(0,100)
plt.xlim(1.25-1,1.25+1)
plt.xticks([])
plt.title("Porcentaje de pacientes hospitalizados con al menos una enfermedad de riesgo")
plt.bar(1,porcentajePrivC,width=0.5, label="Privado")
plt.bar(1.5,porcentajePublC,width=0.5, label="Público")
plt.legend()
plt.show()
plt.ylim(0,100)
plt.xlim(1.25-1,1.25+1)
plt.xticks([])
plt.title("Porcentaje de pacientes hospitalizados que falllecieron")
plt.bar(1,porcentajePriv,width=0.5, label="Privado")
plt.bar(1.5,porcentajePubl,width=0.5, label="Público")
plt.legend()
plt.show()
print("{:.2f}% de los pacientes hospitalizados en servivios privados presentan cuando menos un factor de riesgo".format(porcentajePrivC))
print("{:.2f}% de los pacientes hospitalizados en servivios públicos presentan cuando menos un factor de riesgo".format(porcentajePublC))
print("{:.2f}% de los pacinetes hospitalizados en servivios privados fallecieron".format(porcentajePriv))
print("{:.2f}% de las pacientes hospitalizadas en servicios públicos fallecieron".format(porcentajePubl))
| datos/Covid-19.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Lecture 11: Scalar and Vector Fields, Accumulation
# + [markdown] slideshow={"slide_type": "skip"}
# ## Background
#
# Engineers and scientists are typically interested in how certain quantities vary within space. A scalar field is a mapping of values to positions within some coordinate system. In physical problems it is common to map values to three dimensions or fewer. For example, scalar fields may be generated from temperature measurements within a chemical reactor, pressure data over the Earth's surface, or x-ray intensity from telescope observations. Any mapping of a numerical quantity to a spatial coordinate is a scalar field.
#
# Materials engineers interested in phase transformations typically deal with scalar fields corresponding to concentration as a function of position. This could be indicated using the symbols $c(x,y)$ where the $c$ corresponds to the field of interest and the $(x,y)$ indicating the dimensions of and position within the field.
# + [markdown] slideshow={"slide_type": "skip"}
#
# ## What Skills Will I Learn?
#
# * Define a scalar and vector field
# * Define and use the gradient and divergence operators
# * Develop a geometric intuition for the divergence and gradient
# * Learn or review the definition of the chemical potential and ideal solution models
#
# ## What Steps Should I Take?
#
# 1. Read through all of the activities below. The computer code below is only for visualization and you are not required to understand it all completely before you finish this lecture's assignment.
# 1. Understand how to use the $\vec{\nabla}$ operator to compute the gradients and divergence of scalar and vector fields respectively.
# 1. Use the visualizations to build your geometric understanding for the concepts of a scalar and vector field.
# 1. Compute the chemical potential from the Gibbs free energy and then apply the gradient and divergence operators to that expression and analyze what you get.
#
# ## Reading and Reference
# * Essential Mathematical Methods for Physicists, <NAME> and <NAME>, Academic Press, 2003
#
# -
# ## Quick Review: Product Rule and Chain Rule
#
# There are three ways that functions can be combined: addition, multiplication and compostion (one function placed as an argument into another function). The product and chain rule are the result of computing differentials for products and compositions. The sum rule is stated as:
#
# $$
# {\frac {d(af+bg)}{dx}}=a{\frac {df}{dx}}+b{\frac {dg}{dx}}
# $$
#
# The product rule is:
#
# $$
# {\dfrac{d}{dx}}(u\cdot v)={\dfrac {du}{dx}}\cdot v+u\cdot {\dfrac {dv}{dx}}
# $$
#
# Assuming that $z(y)$ and $y(x)$ then the chain rule is stated as:
#
# $$
# \frac{dz}{dx}={\frac{dz}{dy}}\cdot {\frac{dy}{dx}}
# $$
#
# Reviewing these rules in the context of geometry will help build a better intuition for why these are the results.
# ## Scalar Fields
#
# A scalar field is just a number linked to an independent variable or variables. The Python code here is just to help us visualize the field.
# + [markdown] slideshow={"slide_type": "slide"}
# $$
# f(x,y) = \sin(2x) \cdot \sin(y)
# $$
#
# Note that this function takes an $(x,y)$ position as input and returns a single scalar value.
# + slideshow={"slide_type": "slide"}
# %matplotlib notebook
import sympy as sp
from sympy.plotting import plot3d
import numpy as np
import matplotlib.pyplot as plt
x, y = sp.symbols('x y')
# + [markdown] slideshow={"slide_type": "fragment"}
# We use `plot3d()` to visualize this field so that we use the height of the surface as a proxy for the magnitude of the scalar field.
# + slideshow={"slide_type": "slide"}
# Plot our scalar function over a specified range.
plot3d(sp.sin(2*x)*sp.sin(y), (x, -3, 3), (y, -2, 2));
# + [markdown] slideshow={"slide_type": "slide"}
# ### Projections Onto Two Dimensions
# ----
#
# * A field $f(x,y)$ contains three pieces of information: f, x, y
# * Colors and other glyphs can help access the additional information
# * Contours and heatmaps are two such methods
# + [markdown] slideshow={"slide_type": "skip"}
# An alternative to the three dimensional plot is to project the scalar values onto a two dimensional surface. Two common options for this are _contour plots_ and _heat maps_. Rather than using a third dimension to represent the scalar values a contour plot traces single-valued lines through the domain whereas a heat-map uses colors to represent the value of the scalar field.
#
#
# ### Visualizing the Scalar Field by Contours
# ----
#
# As previously stated, _iso-lines_ are plotted within the domain and the color, annotations and position of the lines quantify the scalar field. Topographic maps use this representation to help the reader understand the locations and incline of mountains and valleys. `matplotlib` has a `.contour()` method that will generate contour plots. We demonstrate this next.
# + slideshow={"slide_type": "slide"}
delta = 0.025
xnp = np.arange(-3.0, 3.0, delta)
ynp = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(xnp, ynp)
Z = np.sin(2*X)*np.sin(Y)
# + slideshow={"slide_type": "slide"}
contours = 10
plt.figure()
CS = plt.contour(X, Y, Z, contours)
plt.clabel(CS, inline=1, fontsize=10)
plt.title(r'A Simple Contour Plot of Your Scalar Field')
plt.show()
# + [markdown] slideshow={"slide_type": "skip"}
# ### Visualizing the Scalar Field by Value
# ----
#
# Color choice requires consideration of the medium in which you distribute your data as well as the capabilities of the reader. An [essay](http://blogs.nature.com/onyourwavelength/2018/07/18/heuristics-for-better-figures/) has been written on the topic and provides some guidance for preparation of figures. `matplotlib` has a module called `cm` that provides some standard color maps. User specified maps are possible.
# + [markdown] slideshow={"slide_type": "slide"}
# ### DIY: Using Surface and Contour Plots
# ----
#
# Plot the electric field around a point charge if the potential is given by:
#
# $$
# V(x,y) = \frac{1}{x^2+y^2}
# $$
#
# Use three dimensional surface plots and contour plots to help visualize this potential.
# +
# Your code here.
# + [markdown] slideshow={"slide_type": "skip"}
# ### Gradients
# ----
#
# Scalar fields have associated vector fields. One such vector field is known as the _gradient_ and is indicated with the symbol $\nabla$ and is called [nabla](https://en.wikipedia.org/wiki/Nabla_symbol). The gradient is a type of directional derivative and is a vector quantity. In a physical problem of three dimensions the gradient can be thought of as pointing in the direction of the maximum spatial rate of change. Each basis vector magnitude is multiplied by the partial derivative of the field with respect to the basis vector's coordinate.
# + [markdown] slideshow={"slide_type": "slide"}
# You can visualize the gradient operator as a vector with components:
#
# $$\overrightarrow{\nabla} = \frac{\partial}{\partial x} \hat{i} + \frac{\partial}{\partial y} \hat{j} + \frac{\partial}{\partial z} \hat{k} $$
#
# When applied to a scalar field the result is a vector field - the gradient. Geometrically, the gradient is a vector field that "points uphill". The following illustrations are meant to convey some of that geometric intuition.
# + [markdown] slideshow={"slide_type": "skip"}
# ### Quiver Plots
# ----
#
# One way to visualize a vector field in a physical problem is to use small oriented arrow to indicate direction and length to indicate magnitude. These are known as quiver plots and they can be used in conjunction with heat maps to help visualize vector fields. In the example below the scalar field is sampled with two different point densities. A higher point density (100 points) for the filled contour plot and a lower point density (30 points) for the quivers.
# + slideshow={"slide_type": "slide"}
x0, x1 = (-3,3)
y0, y1 = (-2,2)
# Read the docstrings to understand why the numbers are given
# as complex quantities. use: ?np.mgrid
Y, X = np.mgrid[y0:y1:100j, x0:x1:100j]
Y1, X1 = np.mgrid[y0:y1:25j, x0:x1:25j]
Z = np.sin(2*X)*np.sin(Y)
# u and v here are the results of applying the gradient operation
# to our scalar field. Probably wise to check this in a seperate
# code block.
u = (2*np.sin(Y1)*np.cos(2*X1))
v = (np.sin(2*X1)*np.cos(Y1))
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
filled_contour = plt.contourf(X,Y,Z,5)
fig.colorbar(filled_contour, ax=ax)
plt.quiver(X1,Y1,u,v, color='white')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ### Divergence
# ----
#
# If we interpret the gradient field as a measurement of the flow of some quantity through space, then the divergence of that vector field is a measurement of the accumulation of that quantity. To compute the divergence we compute the dot product of _nabla_ and the vector field. The resulting quantity is a scalar quantity:
#
# $$
# \overrightarrow{\nabla} \cdot \overrightarrow{F} = \frac{\partial F_x}{\partial x} + \frac{\partial F_y}{\partial y} + \frac{\partial F_z}{\partial z}
# $$
# + [markdown] slideshow={"slide_type": "skip"}
# ### Accumulation and Diffusion
# ----
#
# One of the key problems in science and engineering is the diffusion of an extensive quantity such as energy, charge, or mass. The gradient of a potential provides a driving force for this diffusion.
# + [markdown] slideshow={"slide_type": "slide"}
# Using the diffusion of mass as an example:
#
# $$\overrightarrow{J} = -M \overrightarrow{\nabla} \cdot {\mu} $$
#
# and then computing the accumulation based on the vector field:
#
# $$\frac{\partial X(x,t)}{\partial t} = - \overrightarrow{\nabla} \cdot \overrightarrow{J} $$
#
# where the minus sign indicates that accumulation occurs antiparallel to the gradient. You may have seen
# + [markdown] slideshow={"slide_type": "slide"}
# ### Plotting the Flux (or Accumulation)
# ----
#
# In this section we keep track of the vector components by hand using the mathematical definitions. Writing this out we have a scalar field:
#
# $$
# F(x,y)
# $$
#
# the flux written as minus the gradient of the field (with $M=1$):
#
# $$
# \overrightarrow{J} = -\nabla F(x,y) = -\left(\frac{\partial F}{\partial x} \hat{i} + \frac{\partial F}{\partial y}\hat{j} \right)
# $$
#
# and the accumulation as minus the divergence of the flux:
#
# $$
# A = -\nabla \cdot \overrightarrow{J} = \left(\frac{\partial^2 F}{\partial x^2} + \frac{\partial^2 F}{\partial y^2}\right)
# $$
# + slideshow={"slide_type": "slide"}
x, y = sp.symbols('x y')
# Note NEW field function:
concentrationFunction = sp.sin(sp.pi*x)*sp.cos(sp.pi*y)
fluxX = -sp.diff(concentrationFunction,x)
fluxY = -sp.diff(concentrationFunction,y)
accumulationFunction = sp.diff(concentrationFunction,x,2) + sp.diff(concentrationFunction,y,2)
# We use lambdify to permit the functions to take arguments and vectorize the computations.
myConcentrationFunction = sp.lambdify((x,y), concentrationFunction, 'numpy')
myFluxX = sp.lambdify((x,y), fluxX, 'numpy')
myFluxY = sp.lambdify((x,y), fluxY, 'numpy')
myAccumulationFunction = sp.lambdify((x,y), accumulationFunction, 'numpy')
# -
concentrationFunction
fluxX
fluxY
# + [markdown] slideshow={"slide_type": "slide"}
# We use our quiver plotting capability to plot:
#
# * the scalar value for the concentration as `z`
# * the $\hat{i}$ component of the flux as `u`
# * the $\hat{j}$ component of the flux as `v`
# + slideshow={"slide_type": "slide"}
import numpy as np
x0, x1 = (-1,1)
y0, y1 = (-1,1)
plotResolution = 200
Y, X = np.mgrid[y0:y1:200j, x0:x1:200j]
# Quivers are on a seperate grid since they clutter things up.
Y1, X1 = np.mgrid[y0:y1:20j, x0:x1:20j]
Z = myConcentrationFunction(X,Y)
u = myFluxX(X1,Y1)
v = myFluxY(X1,Y1)
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
from matplotlib import cm
plt.contourf(X,Y,Z,20, cmap=cm.coolwarm)
plt.colorbar()
plt.quiver(X1,Y1,u,v, color='white')
plt.show()
# -
accumulationFunction
# + [markdown] slideshow={"slide_type": "skip"}
# In this example we use the contour plot to show locations of high (red) and low (blue) concentrations. From this concentration field we compute the flux and show that as a quiver plot overlaid on the contour plot. Examination of this figure confirms our physical understanding that mass flows from high to low concentration areas when the chemical potential is proportional to the concentration of species.
#
# The actual RATE of accumulation at this particular INSTANT in time is given by the `accumulationFunction`. This needs to be recomputed for every increment of time to be practical. I'll show you how to do that in the upcoming lectures.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Homework: Using the ideal solution model for the Gibbs free energy, *derive* the accumulation of species in a diffusive problem.
#
# The Gibbs free energy is an auxiliary function re-cast in variables that are more appropriate for experimental observation.
#
# $$
# G = G(T,P,n_i,...)
# $$
#
# The flux is the divergence of the chemical potential. The chemical potential is the derivative of the Gibbs free energy (per mole, hence the switch to mole fraction) with respect to composition. Review the chain rule before you start this problem. The ideal solution model in terms of mole fraction for a two component system is:
#
# $$
# G(X_B) = (1-X_B)G_A + X_B G_B + RT(X_B \ln X_B + (1-X_B) \ln (1-X_B))
# $$
#
# The ideal solution model requires that the activity of a species be proportional to the mole fraction of that species in solution and that the heat of mixing be zero.
#
# Compute the divergence of the gradient of the derivative (w.r.t. concentration) of this function. Note that the concentration is a function of spatial coordinates. In this problem assume that the mole fraction $X$ is only a function of $x$, i.e. $X(x)$. $G_A$ and $G_B$ are constants that depend on the melting temperature and heat of fusion, $R$ is the gas constant and $T$ is the absolute temperature.
# -
# # Advanced and Optional Activities
# + [markdown] slideshow={"slide_type": "skip"}
# There are multiple ways to interact with Python and get at the gradient of a function. In the first instance we can use the coordinate system capabilities of `sympy` so that we can access the built in method `.gradient()`. We start by defining a coordinate system and then calling gradient on our scalar function. Scalars and vectors are objects of the coordinate system. [See this page](http://docs.sympy.org/latest/modules/vector/intro.html) for more information on the vector module.
#
# For the purposes of numeric computing, `NumPy` has functions and operators that work as you would expect for vector operations. What is presented below is for convenience, only.
# + slideshow={"slide_type": "slide"}
import sympy as sp
import sympy.vector as spv
C = spv.CoordSys3D('C')
spv.gradient(sp.sin(2*C.x)*sp.sin(C.y))
# The gradient function should return something that looks like u+v
# where u is a vector in the x direction and v is a vector in the
# y direction.
# + slideshow={"slide_type": "slide"}
# Define your example scalar field (a concentration like
# variable C(x,y).
exampleField = sp.sin(sp.pi*C.x)*sp.cos(sp.pi*C.y)
exampleField
# + [markdown] slideshow={"slide_type": "skip"}
# We then use the built in `sympy` function `gradient` to compute the gradient:
# + slideshow={"slide_type": "slide"}
# Compute the gradient.
gradientOfField = -spv.gradient(exampleField)
gradientOfField
# + [markdown] slideshow={"slide_type": "skip"}
# We then compute the divergence of the gradient. Note the absence of `C.i` and `C.j` in the answer indicating that these are not components of a vector. (Compare this to the last slide.)
# + slideshow={"slide_type": "slide"}
# Compute the divergence.
accumulation = -spv.divergence(gradientOfField)
accumulation
# + [markdown] slideshow={"slide_type": "slide"}
# ## Optional: Read [this](http://blogs.nature.com/onyourwavelength/2018/07/18/heuristics-for-better-figures/) article and write a 200 word essay on choosing colors for heat maps.
| Lecture-11-Scalars-Vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #!/usr/bin/env python
# coding=utf-8
import pandas as pa
import numpy as np
import json
import os
import networkx as nx
import pygraphviz as gz
from networkx.drawing.nx_pydot import write_dot
import math
from scipy.stats import norm, normaltest, mannwhitneyu, ranksums
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib notebook
import itertools
import csv
from sqlalchemy import exists, func, and_
from database import *
from matplotlib import pylab, pyplot
from matplotlib import dates
import seaborn as sns
sns.set(color_codes=True)
from scipy import stats, integrate
from datetime import datetime, timedelta, date
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
date_format2 = '%Y-%m-%d %H:%M:%S'
plt.style.use(['seaborn-paper'])
sns.set_style("whitegrid")
#plt.rc('font', family='serif', serif='Charter')
plt.rc('font', family='serif', serif='DejaVu Serif')
SMALL_SIZE = 8
MEDIUM_SIZE = 9
BIGGER_SIZE = 13
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=MEDIUM_SIZE) # fontsize of the figure title
x_width = 6.8898
x_height = x_width / 1.618
s_width = 3.4449
s_height = s_width / 1.618
def save_plot(name, fig, width, height):
#fig.tight_layout()
fig.set_size_inches(width, height)
#f.subplots_adjust(top=0.86)
fig.savefig(CDIR+'/'+name, bbox_inches="tight")
#plt.savefig(CDIR+'/video_view_percentages.pdf', bbox_inches="tight")
# +
DIR = '../../data/data_evaluation_3MONTHS'
CDIR = '../../data/data_evaluation_3MONTHS/charts'
db = YTDatabase()
# +
# get video infos
with db._session_scope(False) as session:
df_videos = pa.read_sql(session.query(Video).statement, db.engine)
# +
df_videos['dateAdded'] = df_videos['dateAdded'].apply(lambda x: datetime.strptime(x, date_format))
df_videos['crawlTimestamp'] = df_videos['crawlTimestamp'].apply(lambda x: datetime.strptime(x, date_format2))
df_videos.head()
# +
# duration calculation
import isodate
durations = []
for dt in df_videos['duration']:
dur = isodate.parse_duration(dt)
durations.append(dur.total_seconds())
df_duration = pa.DataFrame(durations)
print 'Sum:', df_duration.sum(),'seconds'
print 'Sum:', df_duration.sum()/3600,'std'
print df_duration.describe()
# -
with db._session_scope(False) as session:
df_feature_videos = pa.read_sql(session.query(VideoFeatures.videoID, Video.duration).filter(and_(VideoFeatures.videoID==Video.id, Video.category != 20)).statement, db.engine)
df_feature_videos.drop_duplicates(inplace=True)
# +
durations_features = []
for dt in df_feature_videos['duration']:
dur = isodate.parse_duration(dt)
durations_features.append(dur.total_seconds())
df_durations_features = pa.DataFrame(durations_features)
print 'Sum:', df_durations_features.sum(),'seconds'
print 'Sum:', df_durations_features.sum()/3600,'std'
print df_durations_features.describe()
# +
fig = plt.figure()
ax = sns.distplot(df_duration, kde=False, bins=100)
ax.set_xlabel('Duration (seconds)')
ax.set_ylabel('Videos')
ax.set_xscale('log')
ax.set_yscale('symlog')
#ax.legend()
plt.title('Video Durations')
save_plot('video_durations.pdf', fig, s_width, s_height)
fig = plt.figure()
ax = sns.violinplot(df_duration)
ax.set_xlabel('Duration (seconds)')
ax.set_ylabel('Videos')
#ax.set_xscale('log')
#ax.set_yscale('symlog')
#ax.legend()
plt.title('Video Durations')
# +
# Video topics distribution
topics = [x for x in csv.reader(open('../../data/'+'topics.txt','r'), delimiter='\t')]
topicIDs = []
topicTitles = {}
for t, tt in topics:
topicIDs.append(t)
topicTitles[t]=tt
topicIDs.append('/m/None')
topicTitles['/m/None'] = 'None'
topicIDs.append('/m/NaT')
topicTitles['/m/NaT'] = 'Unknown ID'
topiclist = []
for ct in df_videos['topicIds']:
if len(json.loads(ct))==0:
topiclist.append('/m/None')
for t in json.loads(ct):
if t in topicIDs: # Filter not supported topics (as of 2017, Freebase deprecated)
topiclist.append(t)
else:
topiclist.append('/m/NaT')
df_topics = pa.DataFrame({ 'Topic' : [topicTitles[t] for t in topiclist]})
fig = plt.figure()
ax = df_topics['Topic'].value_counts().sort_values(ascending=True).plot(kind='barh')
ax.set_xlabel('Videos')
ax.set_ylabel('Topic')
ax.set_xscale('symlog', linthreshx=10)
#ax.set_yscale('log')
#ax.legend()
plt.title('Video Topics')
fig.tight_layout()
save_plot('video_topics.pdf', fig, x_width, 1.4*x_height)
# +
print len(df_videos)
print len(df_topics[df_topics.Topic!='None'])
df_topics['Topic'].value_counts()
# +
#categorys
categories = [x for x in csv.reader(open('../../data/'+'categories.txt','r'), delimiter='\t')]
catIDs = []
catTitles = {}
for t, tt in categories:
#print t, tt
catIDs.append(int(t))
catTitles[int(t)]=tt
categorylist = []
for vt in df_videos['category']:
if int(vt) in catIDs: # Filter not supported
categorylist.append(int(vt))
df_cats = pa.DataFrame({ 'Category' : [catTitles[t] for t in categorylist]})
fig = plt.figure()
ax = df_cats['Category'].value_counts().sort_values(ascending=True).plot(kind='barh')
ax.set_xlabel('Videos')
ax.set_ylabel('Category')
ax.set_xscale('log')
#ax.set_yscale('log')
#ax.legend()
plt.title('Video Categories')
#fig.tight_layout()
save_plot('video_Categories.pdf', fig, x_width, x_height)
print len(df_videos)
df_cats['Category'].value_counts()
# -
channel_groups = df_videos.groupby(by='channelID')
counts = channel_groups['id'].count().sort_values(ascending=False)
print len(channel_groups)
counts.head()
counts.describe()
df_videos['day_added'] = df_videos['dateAdded'].apply(lambda x: x.weekday())
df_videos['time_added'] = df_videos['dateAdded'].apply(lambda x: x.time())
df_videos['min_added'] = df_videos['dateAdded'].apply(lambda x: x.minute)
df_videos['hr_added'] = df_videos['dateAdded'].apply(lambda x: int(x.strftime('%H')))
df_videos['dayn'] = df_videos['dateAdded'].apply(lambda x: x.strftime("%A"))
df_videos.head()
# +
weekdays_dic ={-1.0:'',0.0:'Sun', 1.0:'Mon', 2.0:'Tue',3.0:'Wed',4.0:'Thu',5.0:'Fri',6.0:'Sat',7.0:''}
counts = df_videos['day_added'].value_counts()
print counts.index
fig = plt.figure()
#df_counts = pa.DataFrame(counts, index=[range(len(counts))])
ax = sns.barplot(x=counts.index, y=counts.values, palette=sns.color_palette("Blues_d"))
ax.set_xlabel('Weekday')
ax.set_ylabel('Videos')
ax.set_title('Videos per Weekday')
ax.set_xticklabels(ax.get_xticks())
labels = [item.get_text() for item in ax.get_xticklabels()]
print labels
ax.set_xticklabels([weekdays_dic[float(i)] for i in labels])
save_plot('video_uploads_weekdays.pdf', fig, s_width, s_height)
# +
# plotting upload minutes to hours and day to hour
# scatter plot the points to see the dist? heatplot?
#g = sns.lmplot(x="hr_added", y="min_added", hue='day_added', data=group, fit_reg=False)
from scipy.stats import gaussian_kde
# Calculate the point density
x = df_videos['hr_added']
y = df_videos['min_added']
z = df_videos['day_added']
# Set up the figure
f, ax = plt.subplots()
ax.set_aspect("auto")
# Draw the two density plots
ax = sns.kdeplot(x, y, cmap="Blues", shade=True, shade_lowest=False)
ax.set_xlabel('Hour')
ax.set_ylabel('Minute')
plt.title('Video Upload Times')
save_plot('video_uploads_time_map.pdf', f, x_width, x_height)
weekdays_dic ={-1.0:'',0.0:'Sun', 1.0:'Mon', 2.0:'Tue',3.0:'Wed',4.0:'Thu',5.0:'Fri',6.0:'Sat',7.0:''}
# Set up the figure
f, ax = plt.subplots()
ax.set_aspect("auto")
# Draw the two density plots
ax = sns.kdeplot(x, z, cmap="Blues", shade=True, shade_lowest=False)
ax.set_xlabel('Hour')
ax.set_ylabel('Day')
ax.set_yticklabels(ax.get_yticks())
labels = [item.get_text() for item in ax.get_yticklabels()]
print labels
ax.set_yticklabels([weekdays_dic[float(i)] for i in labels])
plt.title('Video Upload Times')
save_plot('video_uploads_day_map.pdf', f, x_width, x_height)
# +
# same but small
# scatter plot the points to see the dist? heatplot?
#g = sns.lmplot(x="hr_added", y="min_added", hue='day_added', data=group, fit_reg=False)
from scipy.stats import gaussian_kde
# Calculate the point density
x = df_videos['hr_added']
y = df_videos['min_added']
z = df_videos['day_added']
# Set up the figure
f, ax = plt.subplots()
ax.set_aspect("auto")
# Draw the two density plots
ax = sns.kdeplot(x, y, cmap="Blues", shade=True, shade_lowest=False)
ax.set_xlabel('Hour')
ax.set_ylabel('Minute')
plt.title('Video Upload Times')
save_plot('video_uploads_time_map_small.pdf', f, s_width, s_height)
weekdays_dic ={-1.0:'',0.0:'Sun', 1.0:'Mon', 2.0:'Tue',3.0:'Wed',4.0:'Thu',5.0:'Fri',6.0:'Sat',7.0:''}
# Set up the figure
f, ax = plt.subplots()
ax.set_aspect("auto")
# Draw the two density plots
ax = sns.kdeplot(x, z, cmap="Blues", shade=True, shade_lowest=False)
ax.set_xlabel('Hour')
ax.set_ylabel('Day')
ax.set_yticklabels(ax.get_yticks())
labels = [item.get_text() for item in ax.get_yticklabels()]
print labels
ax.set_yticklabels([weekdays_dic[float(i)] for i in labels])
plt.title('Video Upload Times')
save_plot('video_uploads_day_map_small.pdf', f, s_width, s_height)
# +
# frame extraction rate plot
min_frames = 600
max_frames = 8000
def index_spread(frames, duration):
#print flength, stime
num_frames = int(((1.0/6.0)* duration) + min_frames)
if num_frames >= max_frames:
num_frames = max_frames
return (duration, num_frames)
#slength = int(frame_num / frame_rate)
#metricFn(frame_num, slength)
l = [index_spread(i, int(i/24)) for i in range(0, 63200*24, 100)]
dat = pa.DataFrame(l, columns=['time_s', 'sel_frames'])
dat['time_h'] = dat['time_s'].apply(lambda x: float(x)/60.0/60.0)
p = dat[['time_h', 'sel_frames']].plot.line(x='time_h', y='sel_frames')
p.set_ylim([0, 10000])
p.set_xlabel('Duration (hour)')
p.set_ylabel('Frames')
plt.legend(['f(n, r)'])
fig = p.get_figure()
save_plot('video_extraction_rate.pdf', fig, s_width, s_height)
d = [0, 60, 600, 1800, 3600, 7200, 43200, 86400]
dl = [index_spread(int(i*24), i) for i in d]
dl
# -
| src/data_evaluation/XX_Video Statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In working through the SGCN problem with some new knowledge and thinking about interacting with ScienceBase, I'm experimenting here with a way to gather up every species name into one overall table to operate against. I still need to do a little thinking about how to deal with new data coming in over time in terms of how those should be stored in our final index. The process here is pretty straightforward and relies heavily on some Pandas fu. It uses sciencebasepy to grab every item from the source collection and then works them over to retrieve the flagged data files and take some initial cleanup steps.
#
# This produces one overall dataset out of every state list surprisingly quickly. I think the idea will be to run this process periodically, using the file date from the ScienceBase-stored files to see if there are any new state lists that need to be updated. I just haven't figured out what all to store from older data and how to run the API to pull the most current information.
#
# At the end, I dump this dataset to a CSV for later use. I experimented with the Pandas groupby capability which is really cool! Once we set up out clean_scientific_name field here, we can pull the CSV back into a dataframe, groupby that field, and have our unique names to lookup. After running the taxonomic authority consultation against ITIS and WoRMS, we can create an updated dataset with at least the ITIS TSN identifier for cases where it's best to go after related information with the ID vs. the name.
#
# This new process should work in a much more efficient manner compared to the old. We should only be running and storing individual records from each of the sources we consult with (taxonomic authorities, related data systems) tied by name or ID to the SGCN list. We should really be able to cache all of this related information in a data store somewhere that is leveraged and continually updating based on any incoming vector. Species lists can consult with the cache to see what's there, call for an update (earlier than some type of logical schedule) if they want, and pull back whatever they want to work with.
# +
from sciencebasepy import SbSession
from IPython.display import display
import pandas as pd
import requests
import json
import bispy
bis_utils = bispy.bis.Utils()
itis = bispy.itis.Itis()
# -
sb = SbSession()
# +
sgcn_base_item = sb.get_item('56d720ece4b015c306f442d5')
historic_national_list_file = next((f["url"] for f in sgcn_base_item["files"] if f["title"] == "Historic 2005 SWAP National List"), None)
if historic_national_list_file is not None:
historic_national_list = requests.get(historic_national_list_file).text.split("\n")
# -
def check_historic_list(scientificname):
if scientificname in historic_national_list:
return True
else:
return False
# +
params = {
"parentId": "56d720ece4b015c306f442d5",
"fields": "title,dates,files,tags",
"max": 500
}
items = sb.find_items(params)
sgcn_items = list()
while items and 'items' in items:
sgcn_items.extend(items["items"])
items = sb.next(items)
# +
# %%time
source_data = list()
for index, item in enumerate(sgcn_items):
data_file = next(l["url"] for l in item["files"] if l["title"] == "Process File")
try:
df_src = pd.read_csv(data_file, delimiter="\t")
except UnicodeDecodeError:
df_src = pd.read_csv(data_file, delimiter="\t", encoding='latin1')
# Make lower case columns to deal with slight variation in source files
df_src.columns = map(str.lower, df_src.columns)
# Set the file updated date from the ScienceBase file to each record in the dataset for future reference
df_src["source_file_date"] = next(l["dateUploaded"] for l in item["files"] if l["title"] == "Process File")
# Set the state name from the ScienceBase Item tag if needed
if "state" not in df_src.columns:
df_src["state"] = next(t["name"] for t in item["tags"] if t["type"] == "Place")
# Set the reporting year from the ScienceBase Item date if needed
if "year" not in df_src.columns:
df_src["year"] = next(d["dateString"] for d in item["dates"] if d["type"] == "Collected")
# Get rid of the reported '2005 SWAP' column because we can't count on it and it's too messy
if "2005 swap" in df_src.columns:
df_src.drop("2005 swap", axis=1, inplace=True)
# Standardize naming of the reported taxonomic group column (though we may get rid of this eventually)
if "taxonomy group" in df_src.columns:
df_src.rename(columns={"taxonomy group": "taxonomic category"}, inplace=True)
# Take care of the one weird corner case
if "taxonomy group (use drop down box)" in df_src.columns:
df_src.rename(columns={"taxonomy group (use drop down box)": "taxonomic category"}, inplace=True)
# Clean up the scientific name string for lookup by applying the function from bis_utils
df_src["clean_scientific_name"] = df_src.apply(lambda x: bis_utils.clean_scientific_name(x["scientific name"]), axis=1)
# Check the historic list and flag any species names that should be considered part of the 2005 National List
df_src["historic_list"] = df_src.apply(lambda x: check_historic_list(x["scientific name"]), axis=1)
source_data.append(df_src)
# Put the individual dataframes together into one overall set
df_source = pd.concat(source_data, sort=True)
# -
df_source.to_csv("sgcn_source_data.csv", index=False)
| Gather Species.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
# +
NUMERICAL_COLS = [
'crim', # numerical
'zn', # numerical
'nonretail', # numerical
'nox', # numerical
'rooms', # numerical
'age', # numerical
'dis', # numerical
'rad', # numerical
'tax', # numerical
'ptratio', # numerical
'b', # numerical
'lstat', # numerical
]
NO_TRANSFORM = ['river']
# -
def get_data():
dataframe = pd.read_csv('./datasets/housing.data', header=None, delim_whitespace=True)
# dataset does not have headers so we must manually rename the columns
dataframe.columns = [
'crim', # numerical
'zn', # numerical
'nonretail', # numerical
'river', # binary
'nox', # numerical
'rooms', # numerical
'age', # numerical
'dis', # numerical
'rad', # numerical
'tax', # numerical
'ptratio', # numerical
'b', # numerical
'lstat', # numerical
'medv', # numerical -- this is the target
]
return dataframe
# # Data Cleaning
# +
dataset_df = get_data()
N = len(dataset_df)
X = dataset_df.loc[:, dataset_df.columns != 'medv']
y = dataset_df.loc[:, dataset_df.columns == 'medv']
scaler = StandardScaler()
X = scaler.fit_transform(X)
y = np.log(y) # Here we care about relative error so we use log of y (being 5k off on 10k price is different than if price is 100k)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
print("X train", X_train)
print("Y train", y_train)
# -
# # Random Forest
rf_model = RandomForestRegressor(n_estimators=100)
rf_model.fit(X_train, np.ravel(y_train))
rf_predictions = rf_model.predict(X_test)
# +
plt.scatter(y_test, predictions)
plt.xlabel("target")
plt.ylabel("prediction")
ymin = np.round( min( min(y_test.values), min(predictions) ) )
ymax = np.ceil( max( max(y_test.values), max(predictions) ) )
print("ymin:", ymin, "ymax:", ymax)
r = range(int(ymin), int(ymax) + 1)
plt.plot(r, r, 'r')
plt.show()
plt.plot(y_test.values, label='targets')
plt.plot(predictions, label='predictions')
plt.legend()
plt.show()
# -
# # Baseline test
# +
baseline = LinearRegression()
single_tree = DecisionTreeRegressor()
print("Test single tree:", cross_val_score(single_tree, X_train, np.ravel(y_train.values), cv=5).mean())
print("CV baseline:", cross_val_score(baseline, X_train, np.ravel(y_train.values), cv=5).mean())
print("CV forest:", cross_val_score(rf_model, X_train, np.ravel(y_train.values), cv=5).mean())
single_tree.fit(X_train, y_train)
baseline.fit(X_train, y_train)
print("test score single tree:", single_tree.score(X_test, y_test.values))
print("test score baseline:", baseline.score(X_test, y_test.values))
print("test score forest:", rf_model.score(X_test, y_test.values))
| RandomForestRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PROBLEMAS DIVERSOS
# <h3>1.</h3>
# Realizar una función que permita la carga de n alumnos. Por cada alumno se deberá preguntar el nombre completo y permitir el ingreso de 3 notas. Las notas deben estar comprendidas entre 0 y 10. Devolver el listado de alumnos.
alumnos=[]
num=int(input("Ingrese el número de alumnos "))
listado_alumnos=[]
for i in range(num):
nomb=input("Ingrese el nombre completo del alumno: ")
nota1=int(input("Ingrese la nota 1 del alumno "))
nota2=int(input("Ingrese la nota 2 del alumno "))
nota3=int(input("Ingrese la nota 3 del alumno "))
alumnos={'nombre':nomb,'notas':[nota1,nota2,nota3]}
listado_alumnos.append(alumnos)
# ### 2.
# Definir una función que dado un listado de alumnos evalúe cuántos aprobaron y cuántos desaprobaron, teniendo en cuenta que se aprueba con 4. La nota será el promedio de las 3 notas para cada alumno.
listado_alumnos=[]
suma = nota1+ nota2+ nota3
promedio = suma/3
print("el promedio de notas es: %d"%promedio)
# ### 3.
# Informar el promedio de nota del curso total.
# +
### tener en cuenta que se apueba con nota >=4
if promedio>=4:
print("aprobado")
else:
print("desaprobado")
# -
# ### 4.
# Realizar una función que indique quién tuvo el promedio más alto y quién tuvo la nota promedio más baja.
# +
listado_alumnos=[]
suma = nota1+ nota2+ nota3
promedio = suma/3
print("el promedio de notas es: %d"%promedio)
# -
| Modulo3/Ejercicios/Problemas Diversos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import dependencies
from sqlalchemy import create_engine, Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
import pandas as pd
# Create classes
Base = declarative_base()
# +
class License(Base):
__tablename__ = 'license_records'
id = Column(Integer, primary_key=True)
rownumber = Column(Integer, nullable=False)
animalgender = Column(String(1), nullable=False)
breedname = Column(String(255), nullable=False)
borough = Column(String(255), nullable=False)
zipcode = Column(Integer, nullable=False)
class Relationship(Base):
__tablename__ = 'breed_breed_group_relationship'
id = Column(Integer, primary_key=True)
breedname = Column(String(255), nullable=False)
breedgroup = Column(String(255), nullable=False)
assumedflag = Column(Integer, nullable=False)
class Stats(Base):
__tablename__ = 'breed_stats'
id = Column(Integer, primary_key=True)
breedname = Column(String(255), nullable=False)
avgweight = Column(Float, nullable=False)
avglife = Column(Float, nullable=False)
# -
# Create SQLite database and session
engine = create_engine('sqlite:///SQLite/dog_data.sqlite')
Base.metadata.create_all(engine)
session = Session(bind=engine)
# Read CSV files into DataFrames
license_records = pd.read_csv('Resources/license_records.csv')
breed_breed_group_relationship = pd.read_csv('Resources/breed_breed_group_relationship.csv')
breed_stats = pd.read_csv('Resources/breed_stats.csv')
license_records.head(2)
breed_breed_group_relationship.head(2)
breed_stats.head(2)
| data_wrangling/.ipynb_checkpoints/app_test_notebook-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# # February 2, 2017 class: Precipitation, continued
# Let's start off with the usual import. The Seaborn package generally makes your plots look nicer. You probably need to install it first. Open up a terminal window and type `pip install seaborn`, and your computer will do the rest! If you don't have time to do this now, your plot will still work but won't look quite as pretty.
# +
# Import numerical tools
import numpy as np
#Import pandas for reading in and managing data
import pandas as pd
#This imports the statistics of the normal distribution
from scipy.stats import norm
# Import pyplot for plotting
import matplotlib.pyplot as plt
#Import seaborn (useful for plotting)
#import seaborn as sns
# Magic function to make matplotlib inline; other style specs must come AFTER
# %matplotlib inline
# %config InlineBackend.figure_formats = {'svg',}
# #%config InlineBackend.figure_formats = {'png', 'retina'}
# -
# We're going to read in [some actual rainfall gauge data](https://drive.google.com/file/d/0BzoZUD3hISA4bE1WbFJocHpSd3c/view?usp=sharing) from Niger. The variable precip6hrly is precipitation data collected every 6 hours for the full years 1990-2012. The variable PrecipDaily is the same dataset, aggregated at the daily time interval.
# Use pd.read_csv() to read in the data and store in a DataFrame
fname = '/Users/lglarsen/Desktop/Laurel Google Drive/Terrestrial hydrology Spr2017/Public/precipAll.csv'
df = pd.read_csv(fname)
df.head()
p6 = df['precip6hrly']
print(len(p6)) #Print out the length of this variable.
pday = df['precipDaily']
print(len(pday)) #Print out the length of this variable. It should be 4x shorter than p6, but you'll see it's not!
print(pday[len(pday)-1]) #Let's look at the last index of this array. Why do we index pd with len(pd)-1?
# We can see that the pd.read_csv command reads in the maximum number of rows on the spreadsheet. Although the data in the column for daily precipitation do not extend for as many rows as the data in the column for 6-hourly precipitation, the length of both arrays ends up being the same, with the "extra" rows in the daily column being occupied by "nan" (**N**ot **A** **N**umber--coding parlance for blanks here).
#
# So, let's get rid of those blanks.
pday_no_nan = pday[~np.isnan(pday)]
print(len(pday_no_nan))
# Let's break down what we just did. Recall that `np` is how we refer to a command that comes from the `numpy` library (whose commands are actually very similar to Matlab commands, in case you are familiar with those!). `np.isnan` queries each element in an array and assigns a "true" to those elements that are "NaN" and a "false" to elements that are not "NaN." It returns an array that is the same length as the argument in the command. Insert a cell below to print out the length of `np.isnan(pday)`. Do you remember how to look at the first few entries of this array?
#
# Next, the `~` symbol: This inverts the outcome of a logical operation. In other words, it will cause a "true" statemetn to become "false," and vice-versa.
#
# Last, when you index a variable with a logical array, the "output" will be just those elements of the array (rows) that evaluate as "true."
#
# Putting this all together, we are creating an array, `pday_no_nan`, which is equal to all of the rows of `pday` that are NOT NaNs. In other words, just the actual rows of daily data! We should see that the length is 1/4 that of p6, which is indeed the case!
#
# It's generally good practice not to overwrite a variable with a variable of the same name unless you are **absolutely sure** that you have coded the formula correctly. In this case, our check has shown us that we have, so we can now feel free to rename `pday_no_nan` as `pday` for convenience.
pday = pday_no_nan
# ## Manipulating the precipitation data
# ### Hyetograms
# Let's create a hyetogram of one of the storms that is present in the 6-hourly data. (If you'd like to practice on your own, do this for a different storm!) Browsing the input CSV file in the spreadsheet program of your choice, you see that rows 3801-3816 on the spreadsheet (remember that in python this correspondes to *indices* 3799-3814 (you subtract 2 because recall that the first row is the column headings, and Python indexes starting with 0) constitutes a nice storm. Let's make a hyetogram of it!
# +
mystorm = p6[3799:3814] #Grab only the data from that storm
print(mystorm) #Always a good idea to check and make sure you got the right thing.
#We will plot this using a bar plot from matplotlib.
#Bar locations
x = np.arange(len(mystorm)) #Generates a sequential range of numbers up to the length of mystorm.
#Let's label each bar with the median of the time increment that it corresponds to.
bar_labels = range(3, len(mystorm)*6-3, 6)#This command generates a sequence of numbers,
#starting at 3, advancing in increments of 6, all the way up to the length of 'mystorm'-3.
#Print it to verify!
#Set the bar widths
bar_width = 1 #This ensures that the bars are depicted side-by-side, without
#any space between them.
#Now let's generate the plot!
plt.bar(x, mystorm, width=bar_width, align='center')
plt.grid(axis='x') #Generates a grid parallel to the x-axis.
plt.xticks(x, bar_labels)
plt.ylabel('Rainfall amount, mm')
plt.xlabel('Hour')
plt.title('Hyetogram for Niger storm')
# -
# ### Frequency analysis
# Now let's look at the distributions of storms within the daily dataset! Let's first look at how they differ from daily, to monthly, to annually by creating histograms.
print(len(pday)/365)
plt.hist(pday, 20, normed=True) #creates a 20-bin, normalized histogram.
plt.title('Daily rainfall')
plt.xlabel('Rainfall, mm')
plt.ylabel('Probability density')
# Wow! Those zeros are really dominant! What does this distribution look like if we only look at the days when it is raining?
pd_raining = pday[pday>0] #Can you figure out what this means?
print(len(pd_raining)) #This should be shorter than pday!
plt.hist(pd_raining, 20, normed=True)
plt.xlabel('Daily rainfall, mm')
plt.ylabel('Probability density')
plt.title('Daily rainfall, no zeros')
# Still looks pretty extreme!
#
# Now let's aggregate this data into annual data. There is much less data (23 years), but it is still an interesting exercise. To do this, we will first "reshape" the daily array into a **matrix** in which the rows represent day of the year and the columns represent years. Thank god somebody has already taken out the "leap days" every 4 years, or this operation would be a lot trickier!
#
# Once we reshape the array into a matrix, we will simply sum all of the rows in each column to get an annual total, which will leave us with an array that has the total rainfall for each year (1 x 23 in size).
p_reshaped = np.reshape(pday, [365, 23])
pAnn = np.sum(p_reshaped,0) #The '0' indicates that we sum over the rows.
print(len(pAnn))
#What will be the output of this box if instead you sum over the columns (1)?
# Now let's generate the histogram of the annual data.
plt.hist(pAnn, normed=True)
plt.xlabel('Rainfall, mm')
plt.ylabel('Probability density')
plt.title('Annual rainfall')
# With some squinting, you might convince yourself that this is a normal distribution that we just haven't sampled very well. For the sake of this exercise, let us assume that it is. Based on this assumption and the past record of data, what is the probability that the rainfall in any given year exceeds the maximum value in this dataset?
#
# First, we need to figure out the maximum and compute its z-score. Next we need to figure out the cumulative probability of the normal distribution **below** that value, and subtract it from 1. Here is how we do that in Python:
print(np.max(pAnn)) #Tells us what the maximum value is, in mm.
zmax = (np.max(pAnn)-np.mean(pAnn))/np.std(pAnn) #See! Numpy can calculate the mean and standard deviation of arrays easily.
#zmax is the maximum z-score in the dataset.
print(zmax)
1-norm.cdf(zmax) #This gives us the probability that we will see a year with a greater rainfall than the maximum
#of this dataset. Note the difference between this and using a 'print' statement.
# Next, instead of dealing with the annual **total** rainfall, we'll do a frequency analysis with the annual **maximum daily** rainfall.
#
# Very similarly to what we just did above to calculate the annual total rainfall, we will generate an array of annual maximum daily rainfall (i.e., the greatest rainfall amount that fell within a single day each year).
# +
pMD = np.max(p_reshaped,0)
#Now let's look at its distribution!
plt.hist(pMD, normed=True)
# -
# Kind of messy! Well, it's real data. Let's see what the Gumbel and Weibull methods give us for the probability of exceeding the maximum daily rain event in this record.
# #### Gumbel distribution
# First we need to calculate alpha and beta...
# +
alpha = np.sqrt(6)/np.pi*np.std(pMD) #From the formula
beta = np.mean(pMD)-0.5772*alpha
G_exceedP = 1-np.exp(-np.exp(-(pMD-beta)/alpha)) #exceedence probability
#This is the probability that you will get a storm that delivers this amount
#of rain or more in any given year.
G_T = 1/G_exceedP #Return interval, in years
#Now let's plot the data as a frequency/magnitude curve.
plt.plot(G_exceedP, pMD, 'o')
plt.xlabel('Exceedence probability')
plt.ylabel('Daily rainfall, mm')
plt.title('Gumbel method')
#To answer the original question, we can also query the exceedence probability of the maximum.
print(G_exceedP[pMD==np.max(pMD)])
#This literally means "print the entry of G_exceedP from the same row as where
#pMD equals its maximum value."
# -
#And again in terms of return period...
plt.plot(G_T, pMD, 'ro') #ooh, 'r' makes the dots red!
plt.xlabel('Return period, years')
plt.ylabel('Daily rainfall, mm')
plt.title('Gumbel method')
# So is the max daily rainfall in this record more extreme or less extreme than you would expect to see in a data record that is this long?
# #### Weibull distribution
# To implement the Weibull method, we first have to sort the data so that we can rank them. Fortunately, there is an easy command for this in `numpy`.
#
# We also use the `np.arange` command, which generates an array of numbers. It is basically the same as the `range` command, but you cannot further manipulate numbers generated through `range` (by, for instance, adding to, multiplying, or dividing the list). If you want, you can create a new cell to play around with `range` and `arange`. With both of these commands, the last number generated is always one less than the number you specify! (You can see this if you play around with the command.) This is a tricky quirk of Python. Be careful with it.
ranked = -np.sort(-pMD) #This sorts in descending order (default is ascending, so we add the negatives)
W_T = np.divide(len(ranked)+1.0,np.arange(1,len(ranked)+1))
W_exceedP = 1/W_T
# +
#Now let's plot the data as a frequency/magnitude curve.
plt.plot(W_exceedP, ranked, 'o') #Why do we use 'ranked' instead of pMD?
plt.xlabel('Exceedence probability')
plt.ylabel('Daily rainfall, mm')
plt.title('Weibull method')
#Now let's print the exceedence probability of the maximum. Note the difference
#in how I do this.
print(W_exceedP[0])
# -
#Now let's plot the data as a frequency/magnitude curve.
plt.plot(W_T, ranked, 'ro')
plt.xlabel('Return period, years')
plt.ylabel('Daily rainfall, mm')
plt.title('Weibull method')
# Note that if we plot this on a logarithmic axis, it becomes more linear in appearance (hence easier to fit with a linear regression).
#Now let's plot the data as a frequency/magnitude curve.
plt.semilogx(W_T, ranked, 'ro')
plt.xlabel('Return period, years')
plt.ylabel('Daily rainfall, mm')
# How does the maximum return period compare to the Weibull calculations above?
# Just to take this one step further, we'll fit a linear regression to the data plotted on the log plot above using the `np.polyfit` command. This fits a polynomial of a specified degree to the x and y data that you supply and returns the coefficients. You can then use `np.polyval` to evaluate the fitted polynomial at each point in a list of x-values. Let's take a look...
# +
fitted_coeffs = np.polyfit(np.log10(W_T), ranked, 1) #Returns the slope and
#intercept of the linear regression. Note that we had to first take the
#log-base-10 of the return period (since the relationship is linear in log space)
#Generate a list of years to apply the fitted polynomial to, from the minimum
#return period to the maximum return period (log-transformed, of course)
yrs = np.log10(np.arange(np.min(W_T), np.max(W_T)+1))
yval = np.polyval(fitted_coeffs, yrs) #Solve the linear equation at each return period of interest
#Now let's remake the plot:
#Now let's plot the data as a frequency/magnitude curve.
plt.semilogx(W_T, ranked, 'ro', 10**yrs, yval, 'k-')
#Note that the semilogx command takes the log-base-10 of the x-variable.
#We ALREADY took the log of the return period in computing 'yrs', so
#to avoid taking the log of it again, we raise it to the power of 10 before
#feeding it to the 'semilogx' function.
plt.xlabel('Return period, years')
plt.ylabel('Daily rainfall, mm')
plt.title('Weibull method with fitted regression line')
# -
| PrecipFrequencyAnalyses.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Shell scripting
#
# ## Introduction
#
# Instead of typing all the UNIX commands we need to perform one after the other, we can save them all in a file (a "script") and execute them all at once. Recall from the [UNIX and Linux Chapter](./01-Unix.ipynb#Meet-the-UNIX-shell) that the bash shell (or terminal) is a text command processor that interfaces with the Operating System. The bash shell provides a computer language that can be used to build scripts (AKA shell scripts) that can be run through the terminal.
# ### What shell scripts are good for
#
# It is possible to write reasonably sophisticated programs with shell scripting, but the bash language is not featured to the extent that it can replace a "proper" language like C, Python, or R. However, you will find that shell scripting is necessary. This is because as such, as you saw in the previous chapter, UNIX has an incredibly powerful set of tools that can be used thorugh the bash terminal. Shell scripts can allow you to automate the usage of these commands and create your own, simple utility tools/scripts/programs for tasks such as backups, converting file formats, handling & manipulating files and directories). This enables you to perform many everyday tasks on your computer without having to invoke another language that might require installation or updating.
#
#
# ## Your first shell script
#
# Let's write our first shell script.
#
# $\star$ Write and save a file called `boilerplate.sh` in `CMEECourseWork/week1/code`, and add the following script to it
# (type it in your code editor):
#
# ```bash
# #!/bin/bash
# # Author: <NAME> <EMAIL>
# # Script: boilerplate.sh
# # Desc: simple boilerplate for shell scripts
# # Arguments: none
# # Date: Oct 2019
#
# echo -e "\nThis is a shell script! \n" #what does -e do?
#
# #exit
#
# ```
# The `.sh` extension is not necessary, but useful for you and your programing IDE (e.g., Visual Studio Code, Emacs, etc) to identifying the file type.
# * The first line is a "shebang" (or sha-bang or hashbang or pound-bang or hash-exclam or hash-pling! – Wikipedia). It can also can be written as `#!/bin/sh`. It tells the bash interpreter that this is a bash script and that it should be interpreted and run as such.
# * The hash marks in the following lines tell the interpreter that it should ignore the lines following them (that's how you put in script documentation (who wrote the script and when, what the script does, etc.) and comments on particular line of script.
# * Note that there is a commented out `exit` command at the end of the script. Uncommenting it will not change the behavior of the script, but will allow you to generate a error code, and if the command is inserted in the middle of the script, to stop the code at that point. To find out more, see [this](https://bash.cyberciti.biz/guide/The_exit_status_of_a_command) and [this](https://stackoverflow.com/questions/1378274/in-a-bash-script-how-can-i-exit-the-entire-script-if-a-certain-condition-occurs) in particular.
# Next, let's run this script.
# ## Running shell scripts
#
# There are two ways of running a script:
#
# 1. Call the interpreter bash to run the file:
#
# ```bash
# bash myscript.sh
# ```
#
# (You can also use ```sh myscript.sh```)
#
# This is the right way if the script is does something specific in a given project.
# 2. Make the script executable and execute it:
#
# ```bash
# chmod +x myscript.sh
# ./myscript.sh # the ./ is needed
# ```
# Use this second approach for a script that does something generic, and is likely to be reused again and again (*Can you think of examples?*)
# The generic scripts of type (2) can be saved in `username/bin/`, and made easily accessible by telling UNIX to look in `/home/bin` for commands
#
# ```bash
# mkdir ~/bin
# PATH=$PATH:$HOME/bin
# ```
# So let's run your first shell script.
#
# $\star$ `cd` to your `code` directory, and run it (here I am assuming you are in `sandbox` or `data`, continuing where you [left off](./01-Unix.ipynb#Using-grep) in the Unix and Linux Chapter):
cd ../code
bash boilerplate.sh
# ## A useful shell-scripting example
#
# Let's write a shell script to transform comma-separated files (csv) to tab-separated files and vice-versa. This can be handy — for example, in certain computer languages, it is much easier to read tab or space
# separated files than csv (e.g., `C`)
#
# To do this, in the bash we can use `tr` (abbreviation of `tr`anslate or `tr`ansliterate), which deletes or substitute characters. Here are some examples.
echo "Remove excess spaces." | tr -s " "
echo "remove all the a's" | tr -d "a"
echo "set to uppercase" | tr [:lower:] [:upper:]
echo "10.00 only numbers 1.33" | tr -d [:alpha:] | tr -s " " ","
# Now write a shell script to substitute all tabs with commas called `tabtocsv.sh` in `week1/code`:
#
# ```bash
# #!/bin/bash
# # Author: Your name <EMAIL>
# # Script: tabtocsv.sh
# # Description: substitute the tabs in the files with commas
# #
# # Saves the output into a .csv file
# # Arguments: 1 -> tab delimited file
# # Date: Oct 2019
#
# echo "Creating a comma delimited version of $1 ..."
# cat $1 | tr -s "\t" "," >> $1.csv
# echo "Done!"
# exit
# ```
#
# Now test it (note where the output file gets saved and why). First create a text file with tab-separated text:
echo -e "test \t\t test" >> ../sandbox/test.txt # again, note the relative path!
# Now run your script on it
bash tabtocsv.sh ../sandbox/test.txt
# Note that
#
# * `$1` is the way a shell script defines a placeholder for a variable (in this case the filename). See next section for more on variable names in shell scripts.
#
# * The new file gets saved in the same location as the original (*Why is that?*)
#
# * The file got saved with a `.txt.csv` extension. That's not very nice. Later you will get an opportunity to fix this!
# ## Variables in shell scripts
#
# There are three ways to assign values to variables (note lack of spaces!):
#
# 1. Explicit declaration: `MYVAR=myvalue`
# 2. Reading from the user: `read MYVAR`
# 3. Command substitution: `MYVAR=\$( (ls | wc -l) )`
#
# Here are some examples of assignments (try them out and save as a single `week1/code/variables.sh` script):
#
# ```bash
#
# #!/bin/bash
#
# # Shows the use of variables
# MyVar='some string'
# echo 'the current value of the variable is' $MyVar
# echo 'Please enter a new string'
# read MyVar
# echo 'the current value of the variable is' $MyVar
#
# ## Reading multiple values
# echo 'Enter two numbers separated by space(s)'
# read a b
# echo 'you entered' $a 'and' $b '. Their sum is:'
# mysum=`expr $a + $b`
# echo $mysum
# ```
#
# And also (save as `week1/code/MyExampleScript.sh`):
#
# ```bash
# #!/bin/bash
#
# msg1="Hello"
# msg2=$USER
# echo "$msg1 $msg2"
# echo "Hello $USER"
# echo
# ```
# ### Some more examples
#
# Here are a few more illustrative examples (test each one out, save in `week1/code/` with the given name):
#
# #### Count lines in a file
#
# Save this as `CountLines.sh`:
#
# ```bash
# #!/bin/bash
#
# NumLines=`wc -l < $1`
# echo "The file $1 has $NumLines lines"
# echo
# ```
# The `<` redirects the contents of the file to the stdin ([standard input](https://en.wikipedia.org/wiki/Standard_streams)) of the command `wc -l`. It is needed here because without it, you would not be able to catch *just* the numerical output (number of lines). To see this, try deleting `<` from the script and see what the output looks like (it will also print the script name, which you do not want).
#
# #### Concatenate the contents of two files
#
# Save this as `ConcatenateTwoFiles.sh`:
#
# ```bash
# #!/bin/bash
#
# cat $1 > $3
# cat $2 >> $3
# echo "Merged File is"
# cat $3
# ```
#
# #### Convert tiff to png
#
# This assumes you have done `apt install imagemagick` (remember `sudo`!)
#
# Save this as `tiff2png.sh`:
#
# ```bash
# #!/bin/bash
#
# for f in *.tif;
# do
# echo "Converting $f";
# convert "$f" "$(basename "$f" .tif).png";
# done
# ```
# ## Practicals
#
# ### Instructions
#
# * Along with the completeness of the practicals/exercises themselves, you will be marked on the basis of how complete and well-organized your directory structure and content is.
#
# * Review (especially if you got lost along the way) and make sure all the shell scripts you created in this chapter are functional.
#
# * Make sure you have your weekly directory organized with `data`, `sandbox`, `code` with the necessary files, under ` CMEECourseWork/week1`.
#
# * *All scripts should run on any other Unix/Linux machine* — for example, always call data from the `data` directory using relative paths.
#
# * Make sure there is a `readme` file in every week's directory. This file should give an overview of the weekly directory contents, listing all the scripts and what they do. This is different from the `readme` for your overall git repository, of which `Week 1` is a part. You will write a similar ` readme` for each subsequent weekly submission.
#
# * Don't put any scripts that are part of the submission in your `home/bin` directory! You can put a copy there, but a working version should be in your repository.
#
# ### Improving scripts
#
# Note that some of the shell scripts that you have created in this chapter above requires input files. For example, `tabtocsv.sh` needs one input file, and `ConcatenateTwoFiles.sh` needs two. When you run any of these scripts without inputs (e.g., just `bash tabtocsv.sh`), you either get no result, or an error.
#
# * The goal of this exercise is to make each such script robust so that it gives feedback to the user and exits if the right inputs are not provided.
#
# ### A new shell script
#
# * Write a `csvtospace.sh` shell script that takes a `c`omma `s`eparated `v`alues and converts it to a space separated values file. However, it must not change the input file — it should save it as a differently named file.
#
# * This script should be able to handle wronmg or missing inputs (similar to the previous exercise (a)).
#
# * Save the script in `CMEECourseWork/week1/code`, and run it on the `csv` data files that are in `Temperatures` in the master repository's `Data` directory.
# ## Readings & Resources
#
# - Plenty of shell scripting resources and tutorials out there; in particular, look up
# [http://www.tutorialspoint.com/unix/unix-using-variables.htm](http://www.tutorialspoint.com/unix/unix-using-variables.htm)
#
# * Some shell scripting [examples](https://linuxhint.com/30_bash_script_examples)
| content/notebooks/02-ShellScripting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/xmpuspus/census-income-webapp/blob/master/TabularClassificationFastAI.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="0gyTlRdWfE8p" colab_type="code" colab={}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + id="bl8TikR9JZJC" colab_type="code" outputId="3b9a682c-5df9-45bf-cdcc-53d2bba7941a" colab={"base_uri": "https://localhost:8080/", "height": 34}
import gc
gc.collect()
# + id="7e8PTQ0eMCPL" colab_type="code" colab={}
# Load FastAI
from fastai import *
from fastai.tabular import *
# + id="RCZmpSlnMcob" colab_type="code" colab={}
# train test split
from sklearn.model_selection import train_test_split
# + id="IamjtGvVJbWJ" colab_type="code" outputId="e36388a9-9e13-4ca4-d247-f6bea9aea0df" colab={"base_uri": "https://localhost:8080/", "height": 54}
# Load data from google drive
from google.colab import drive
drive.mount('/content/gdrive')
# + id="SeUlmVM5KmeB" colab_type="code" colab={}
data_repo_url = '/content/gdrive/My Drive/Datasets/'
data_url = data_repo_url + 'Census Income/census.csv'
# + id="Hylyw5QYQoZh" colab_type="code" colab={}
# Load Data
df = pd.read_csv(data_url)
# Rename target
df['high_income'] = df['income'] == '>50K'
# + id="zuMeKC_dz6-Z" colab_type="code" outputId="e3639750-c179-42e5-a231-f983778b290b" colab={"base_uri": "https://localhost:8080/", "height": 343}
df.head()
# + id="L2gNes1q4XLw" colab_type="code" outputId="dd52132a-7d9f-4f78-814e-8f7863655cb5" colab={"base_uri": "https://localhost:8080/", "height": 68}
df.high_income.value_counts()/df.shape[0]
# + id="cmTPhpQNK5lO" colab_type="code" colab={}
dep_var = 'high_income'
cat_names = ['workclass', 'education_level', 'marital-status',
'occupation', 'relationship', 'race', 'sex',
'native-country']
cont_names = ['age', 'capital-gain', 'capital-loss', 'hours-per-week']
# + id="8YCAUebKL2iM" colab_type="code" outputId="e0a1fcc8-770b-410f-9c4b-7d690f8a68e5" colab={"base_uri": "https://localhost:8080/", "height": 71}
print("Categorical columns are : ", cat_names)
print('Continuous numerical columns are :', cont_names)
procs = [FillMissing, Categorify, Normalize]
# + id="LYQ6qC48Mbu9" colab_type="code" colab={}
df_train, df_test = train_test_split(df, test_size=0.3, random_state=42)
# + id="y6SxWubrOXa6" colab_type="code" outputId="1804e715-b810-45c3-cdb8-7244dfc39ac9" colab={"base_uri": "https://localhost:8080/", "height": 34}
df_train.shape, df_test.shape
# + id="j8d478w4L8H5" colab_type="code" outputId="4b4a9ea8-361d-470b-cb5a-4a5cf5c421e9" colab={"base_uri": "https://localhost:8080/", "height": 600}
# Transformations
procs = [FillMissing, Categorify, Normalize]
# Test Tabular List
test = TabularList.from_df(df_test, cat_names=cat_names, cont_names=cont_names, procs=procs)
# Train Data Bunch
data = (TabularList.from_df(df_train, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(list(range(0,200)))
.label_from_df(cols = dep_var)
.add_test(test, label=0)
.databunch())
data.show_batch(rows=10)
# + id="SvIhIQeMMMAw" colab_type="code" outputId="389c32ac-f074-4969-aed7-49109283fed0" colab={"base_uri": "https://localhost:8080/", "height": 993}
# Create deep learning model
learn = tabular_learner(data, layers=[15, 100, 200], metrics=accuracy, emb_drop=0.1, callback_fns=ShowGraph)
# select the appropriate learning rate
learn.lr_find()
# we typically find the point where the slope is steepest
learn.recorder.plot()
# Fit the model based on selected learning rate
learn.fit_one_cycle(5, max_lr=1e-3)
# Analyse our model
learn.model
learn.recorder.plot_losses()
# + id="ZJUxfgYOoobB" colab_type="code" colab={}
# Predict our target value
predictions, *_ = learn.get_preds(DatasetType.Test)
labels = np.argmax(predictions, 1)
# + id="GAcWSMoP7CBr" colab_type="code" outputId="07502346-76d6-48d1-c611-d5f8e5d1019d" colab={"base_uri": "https://localhost:8080/", "height": 34}
labels
# + id="2X8tjFfy7Cq0" colab_type="code" colab={}
# metrics
from sklearn.metrics import classification_report
# + id="1zC2eY-17JAK" colab_type="code" outputId="2cca5a34-b162-4fe7-c4c2-2cdece0e8374" colab={"base_uri": "https://localhost:8080/", "height": 170}
report = classification_report(df_test['high_income'], labels)
print(report)
| TabularClassificationFastAI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 7.24 Laminar Flow
# ## Objective: Rate an orifice plate in laminar flow
# Problem statement: calculate the Reynolds number to determine the type of fluid
#
# Given: S.A.E. 10W oil flows through a 3" schedule 40 pipe. It has a measured delta P of 0.4 psi. The orifice plate has a 2.15" diameter bore, and is a standard sharp-edged orifice. Find the flow rate through the orifice in gallons/minute.
# +
from fluids.units import *
from math import pi
NPS, Di, Do, t = nearest_pipe(NPS=3, schedule='40')
A = 0.25*pi*Di*Di
D2 = 2.15*u.inch
mu = 40*u.cP # given
rho = 53.6*u.lb/u.ft**3
# Assume an absolute pressure of 5 bar.
dP = 0.4*u.psi
P1 = 5*u.bar
P2 = P1-dP
k = 1.3 # assumed
# -
# First calculate the orifice with the standard formula
m = differential_pressure_meter_solver(D=Di, rho=rho, mu=mu, k=k, D2=D2, P1=P1, P2=P2,
m=None, meter_type='ISO 5167 orifice',
taps='corner')
Q = (m/rho).to_base_units()
print('Flow rate is: %s'% Q.to(u.gal/u.min))
v = Q/A
Re = rho*v*Di/mu
Re.to_base_units()
# Because the flow rate is laminar, outside the range of the ISO formula, we turn to another set of data - a set of CFD results developed for laminar flow by Hollingshead.
# First calculate the orifice with the standard formula
m = differential_pressure_meter_solver(D=Di, rho=rho, mu=mu, k=k, D2=D2, P1=P1, P2=P2,
m=None, meter_type='Hollingshead orifice')
Q = (m/rho).to_base_units()
print('Flow rate is: %s'% Q.to(u.gal/u.min))
v = Q/A
Re = rho*v*Di/mu
Re.to_base_units()
# The answer given in Crane is that a calibration for the meter must be provided. They assume a `C` of 0.75. The value of `C` according to Hollingshead is below.
differential_pressure_meter_C_epsilon(D=Di, D2=D2, m=m, P1=P1, P2=P2, rho=rho, mu=mu, k=k,
meter_type='Hollingshead orifice')[0]
| docs/Examples/Crane TP 410 Solved Problems/7.24 Laminar Flow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
# # POLINOMIO DE INTERPOLACIÓN DE HERMITE
# Grupo $\iota$ (iota)<br/>
# Importante: Para correr los códigos, es necesario tener instalado lo siguiente:
# - Python 2.7 o superior
# - Numpy (para el manejo de arreglos optimizados)
# - Sympy (para los polinomios)
# - Matplotlib (para las gráficas)
# Consideremos el problema de buscar un polinomio que cumpla con las siguientes características:
# - Sean $x_0, . . . , x_n$ números distintos pertenecientes al intervalo $[a,b]$ de $f$ en $x_i$, un polinomio $p(x_i)$ que aproxime a las mismas imágenes de $f(x_i)$ con $i=0,1,2,...,n$
#
# $$p(x_i)=f(x_i)$$
#
#
# - Además, $p(x_i)$ debe aproximar a las imágenes de la primera derivada de $f(x_i)$
#
# $$p'(x_i)=f'(x_i)$$
#
# El polonomio que cumple con esas características es el **_Polinomio de Interpolación de Hermite_**.
# ## TEOREMA:
#
# Si $f ∈ C^1 [a, b] $ y $x_0, . . . , x_n ∈ [a, b]$ son distintos, el **único** polinomio de menor grado que concuerda con $f$ y $f'$ en $x_0,x_1,...,x_n$ es el polinomio de Hermite de grado a lo sumo $2n + 1$ dado por
#
# $$H_{2n+1}(x) = \sum_{j=0}^{n} f(x_j)H_{n,j}(x) + \sum_{j=0}^{n} f'(x_j)\hat{H}_{n,j}(x)$$
#
# Siendo:
#
# $$H_{n,j}(x) = [1 - 2(x-x_j)L'_{n,j}(x_j)][L_{n,j}(x)]^2$$
# $$\hat{H}_{n,j}(x) = (x-x_j)[L_{n,j}(x)]^2$$
#
# Donde cada $L_{n,j}(x)$ son el $i$-ésimo coeficiente del polonomio de Lagrange de grado n.
# ## DEMOSTRACIÓN:
#
# - ### $H_{2n + 1}(x) = f(x)$
#
# Al igual que el _Lagrange_, Hermite es un polinomio $H_{2n + 1}(x)$ que interpola los mismos puntos que una función $f(x)$, por eso se le denomina **_polinomio osculante_**, ya que no tendrá específicamente la misma forma que $f(x)$, si no que solo "besará" los puntos dados de la función.
#
# Por lo tanto, recordemos que en _Lagrange_:
#
# $$L_{n,j}(x_i) = \begin{Bmatrix}0, i \neq j \\1, i=j\end{Bmatrix}$$
#
# Entonces, cuando $i \neq j$:
#
# $$H_{n,j}(x_i) = 0$$
# $$y$$
# $$\hat{H}_{n,j}(x_i)=0$$
#
# Por lo que para cada $i$:
#
# $$H_{n,i}(x_i) = [1 - 2(x_i-x_i)L'_{n,i}(x_i)]\cdot 1^2 = 1$$
# $$y$$
# $$\hat{H}_{n,i}(x_i) = (x_i-x_i)\cdot1^2 = 0$$
#
# Sustituyendo:
#
# $$H_{2n+1}(x_i) = \sum_{\substack{j=0 \\j\neq i}} ^{n} f(x_j) \cdot 0 + f(x_i) \cdot 1 + \sum_{j=0}^{n} f'(x_j) \cdot 0$$
# $$H_{2n+1}(x_i) = f(x_i)$$
# $$lqqd$$
#
# - ### $H'_{2n + 1}(x) = f'(x)$
#
# Realizando las derivadas obtenemos:
#
# $$H'_{n,j}(x_i) = -2L'_{n,j}(x_i)\cdot[L_{n,j}(x_i)]^2 + 2\cdot[1 - 2(x_i-x_j)L'_{n,j}(x_i)]\cdot[L_{n,j}(x_i)]\cdot[L'_{n,j}(x_i)]$$
# $$H'_{n,j}(x_i) = -2L'_{n,j}(x_i) + 2L'_{n,j}(x_i)$$
# $$H'_{n,j}(x_i) = 0$$
#
# Por lo tanto, $H'_{n,j}(x_i) = 0$ para todas las $i$ y $j$, finalmente:
#
# $$\hat{H}'_{n,j}(x_i) = [L_{n,j}(x_i)]^2 + (x_i - x_j)2L_{n,j}(x_i)L'_{n,j}(x_i)$$
# $$\hat{H}'_{n,j}(x_i) = 0$$
#
# Sustituyendo:
#
# $$H'_{2n+1}(x_i) = \sum_{j=0}^{n} f(x_j) \cdot 0 + \sum_{\substack{j=0\\j\neq i}}^{n} f'(x_j) \cdot 0 + f'(x_i) \cdot 1$$
# $$H'_{2n+1}(x_i) = f'(x_i)$$
# $$lqqd$$
# ## VENTAJAS:
# - Además de encontrar con Hermite un polinomio que tenga las mismas imágenes que la función original, sus derivadas también son las mismas imágenes que las derivadas de la función original.
# - Es fácil de calcular ya que garantiza la posición de los frames al ser una interpolación además de brindar la certeza de que la tangente de la curva generada es continua a lo largo de multiples segmentos.
# - La precisión será más exacta, ya que las rectas tangentes serán las mismas que las de la función original.
# ## DESVENTAJAS:
# - Ya que el grado del polinomio que cumple con los requisitos es de grado $2n + 1$, el grado n para obtener el polinomio de Hermite siempre será más alto de lo necesario.
# - Requiere la disponibilidad de las primeras derivadas, lo cual en muchos casos son desconocidas.
# - Similar a Lagrange, Hermite requiere que $f'(x_i)$ esté dentro de $0 < i < 1$, y no todas las funciones son capaces de cumplirlo.
# ## EJERCICIOS:
# - ### Ejercicio 1
#
# Use el polinomio de Hermite que concuerda con los datos listados en la tabla para encontrar una aproximación de $f(1.5)$.
#
#
# |$k$|$x_k$|$f(x_k)$ |$f'(x_k)$ |
# |---|-----|-----------|------------|
# | 0 | 1.3 | 0.6200860 | −0.5220232 |
# | 1 | 1.6 | 0.4554022 | −0.5698959 |
# | 2 | 1.9 | 0.2818186 | −0.5811571 |
#
#
# **Solución:**
# Primero calculamos los polinomios de Lagrange y sus derivadas.
#
# $$L_{2,0}(x)= \frac{(x-x_1)(x-x_2)}{(x_0-x_1)(x_0-x_2)}= \frac{50}{9}x^2-\frac{175}{9}x+\frac{152}{9}, \ \ \ \ \ \ \ \ \ L'_{2,0}(x)=\frac{100}{9}x-\frac{175}{9};$$
#
# $$L_{2,1}(x)= \frac{(x-x_0)(x-x_2)}{(x_1-x_0)(x_1-x_2)}= \frac{-100}{9}x^2-\frac{320}{9}x+\frac{247}{9}, \ \ \ \ \ \ \ \ \ L'_{2,1}(x)=\frac{-200}{9}x-\frac{320}{9};$$
#
# $$L_{2,2}(x)= \frac{(x-x_0)(x-x_1)}{(x_2-x_0)(x_2-x_1)}= \frac{50}{9}x^2-\frac{145}{9}x+\frac{104}{9}, \ \ \ \ \ \ \ \ \ L'_{2,2}(x)=\frac{100}{9}x-\frac{145}{9}.$$
#
# Los polinomios $H_{2,j}(x)$ y $\hat{H}_{2,j}(x)$ son entonces
#
# $$H_{2,0}(x)=[1-2(x-1.3)(-5)](\frac{50}{9}x^2-\frac{175}{9}x+\frac{152}{9})^2 = (10x-12)(\frac{50}{9}x^2-\frac{175}{9}x+\frac{152}{9})^2,$$
#
# $$H_{2,1}(x)=1.(\frac{-100}{9}x^2+\frac{320}{9}x-\frac{247}{9})^2,$$
#
# $$H_{2,2}(x)=10(2-x)(\frac{50}{9}x^2-\frac{145}{9}x+\frac{104}{9})^2,$$
#
# $$\hat{H}_{2,0}(x)=(x-1.3)(\frac{50}{9}x^2-\frac{175}{9}x+\frac{152}{9})^2,$$
#
# $$\hat{H}_{2,1}(x)=(x-1.6)(\frac{-100}{9}x^2+\frac{320}{9}x-\frac{247}{9})^2,$$
#
# $$\hat{H}_{2,2}(x)=(x-1.9)(\frac{50}{9}x^2-\frac{145}{9}x+\frac{104}{9})^2.$$
#
# Finalmente, multiplicamos los $H_{2,j}(x)$ por las imagenes en "y" $f(x_k)$ y los $\hat{H}_{2,j}(x)$ por los valores de las derivadas $f'(x_k)$
#
# $$H_5(x) = 0.6200860 H_{2,0}(x) + 0.4554022 H_{2,1}(x) + 0.2818186 H_{2,2}(x) − 0.5220232 \hat{H}_{2,0}(x) - 0.5698959 \hat{H}_{2,1}(x) - 0.5811571 \hat{H}_{2,2}(x)$$
#
# $$H_5(1.5) = 0.6200860 (\frac{4}{27}) + 0.4554022 (\frac{64}{81}) + 0.2818186 (\frac{5}{81}) − 0.5220232 (\frac{4}{405}) - 0.5698959 (\frac{-32}{405}) - 0.5811571 (\frac{-2}{405})$$
#
# $$H_5(1.5) = {\color{green}{0.5118277}}$$
# - ### Ejercicio 1.1: Realizando el ejercicio anterior con python
# +
from sympy import *
import matplotlib.pyplot as plt
import numpy as np
x = Symbol("x")
'''
n : valor del grado del polinomio 2n + 1
xs : valores de x
ys : valores de y
dys : valores de la primera derivada
'''
def hermite(n, xs, ys, dys):
h = []
hatH = []
for i in range(0, (n+1)//2):
l = lagrangeFactor(n, xs, i)
d = diff(l)
f = (1-2*(x-xs[i])*d.subs(x, xs[i]))*l**2
hatF = (x-xs[i])*l**2
h.append(f)
hatH.append(hatF)
p = 0
for i in range(0, (n+1)//2):
p += ys[i]*h[i]
for i in range(0, (n+1)//2):
p += dys[i]*hatH[i]
return p
'''
n : valor del grado del polinomio 2n + 1
xs : valores de x
k : valor iterativo
'''
def lagrangeFactor(n, xs, k):
L = 1
for i in range(0, (n+1)//2):
if (i != k):
L *= (x - xs[i])/(xs[k] - xs[i])
return L
xs = [1.3, 1.6, 1.9]
ys = [0.6200860, 0.4554022, 0.2818186]
dys = [-0.5220232, -0.5698959, -0.5811571]
pol = hermite(5, xs, ys, dys)
pol.expand()
# -
# Por lo tanto, al sustituír $H_{2n + 1}(1.5)$ obtenemos:
pol.subs(x, 1.5)
# Graficando los puntos y el polinomio obtenemos:
# +
# Datos
a = np.min(xs[0]-1)
b = np.max(xs[len(xs)-1]+1)
hx = np.linspace(a,b,200)
py = lambdify(x,pol)
hy = py(hx)
# Gráfica
plt.plot(xs, ys, 'o', label='Puntos')
plt.plot(hx,hy,label='Polinomio')
plt.legend()
plt.xlabel('xi')
plt.ylabel('fi')
plt.title('Lagrange')
plt.show()
# -
# ## INTERPOLACIÓN DE HERMITE CON DIFERENCIAS DIVIDIDAS
# Las diferencias divididas generalizadas se construyen de igual manera que las _Diferencias Divididas de Newton_, con la única diferencia que ahora se necesita replicar nuestras muestras $x_i$ tantas veces como las derivadas que poseamos, en este caso se estudiara únicamente cuando se conoce la primera derivada.
#
# De igual manera que en la interpolación de Lagrange la interpolación de Hermite con el grado $2n + 1$ se escribirá inmediatamente calculadas las _Diferencias Divididas_.
#
# $$H_{2n+1}(x) = f[x_0] + f[x_0,x_0](x-x_0)+...+f[x_0,x_0,...,x_n,x_n](x-x_0)^2(x-x_{n-1})^2(x-x_n)$$
#
# Aparentemente se nota que $f[x_i,x_i]$ no estan definida puesto que
#
# $$f[x_i, x_i] = \frac{f[x_i]-f[x_i]}{x_i-x_i} = \frac{0}{0}$$
#
# Sin embargo, podemos tomar un paso infinitisimal $h$ para el termino resultando en
#
# $$f[x_i+h, x_i] = \frac{f[x_i+h]-f[x_i]}{x_i+h-x_i} = \frac{f[x_i+h]-f[x_i]}{h}$$
#
# Si se toma el limite cuando $h \rightarrow 0$ se tendrá
#
# $$f[x_i, x_i] = f'(x_i)$$
#
# Con lo anterior, es posible concluir que la nueva tabla de diferencias divididas es la siguiente:
#
# | $z$ | $f(z)$ | $1^{ra} Diferencia Dividida$ |$2^{da} Diferencia Dividida$ |
# |----------|-------------------|----------------------------------------------|------------------------------------------------------|
# |$z_0=x_0$ |$ f[z_0) = f(x_0)$ | | |
# | | |$f[z_0,z_1] = f'(x_0)$ | |
# |$z_1=x_0$ |$ f[z_1] = f(x_0)$ | |$f[z_0,z_1,z_2]=\frac{f[z_1,z_2]-f[z_0,z_1]}{z_2-z_0}$|
# | | |$f[z_1,z_2] = \frac{f[z_2]-f[z_1]}{z_2-z_1}$ | |
# |$z_2=x_1$ |$ f[z_2] = f(x_1)$ | |$f[z_1,z_2,z_3]=\frac{f[z_2,z_3]-f[z_1,z_2]}{z_3-z_1}$|
# | | |$f[z_2,z_3] = f'(x_1)$ | |
# |$z_3=x_1$ |$ f[z_3] = f(x_1)$ | |$f[z_2,z_3,z_4]=\frac{f[z_3,z_4]-f[z_2,z_3]}{z_4-z_2}$|
# | | |$f[z_3,z_4] = \frac{f[z_4]-f[z_3]}{z_4 - z_5}$| |
# |$z_4=x_2$ |$ f[z_4] = f(x_2)$ | |$f[z_3,z_4,z_5]=\frac{f[z_4,z_5]-f[z_3,z_4]}{z_5-z_3}$|
# | | |$f[z_4,z_5] = f'(x_2)$ | |
# |$z_5=x_2$ |$ f[z_5] = f(x_2)$ | | |
# - ### Ejercicio 2: Realizar el ejercicio 1 utilizando Interpolación de Hermite con Diferencias Divididas
# Usando los datos que se proporcionan en la tabla y el método de diferencias divididas de Newton, determine la aproximación polinomial de Hermite en $x=1.5$.
#
# |$k$|$x_k$|$f(x_k)$ |$f'(x_k)$ |
# |---|-----|-----------|------------|
# | 0 | 1.3 | 0.6200860 | −0.5220232 |
# | 1 | 1.6 | 0.4554022 | −0.5698959 |
# | 2 | 1.9 | 0.2818186 | −0.5811571 |
#
# **Solución:** Primero calcularemos las diferencias divididas:
#
# | $z$ | $f(z)$ |$1^{ra}$ |$2^{da}$ |$3^{ra}$ |$4^{ta}$ |$5^{ta}$ |
# |---------------------|---------------------------|----------------------------|-------------|------------|------------|-------------|
# |${\color{orange}{1.3}}$|${\color{orange}{0.6200860}}$| | | | | |
# | | |${\color{orange}{-0.5220232}}$| | | | |
# |${\color{orange}{1.3}}$|${\color{orange}{0.6200860}}$| |$-0.0897427$ | | | |
# | | |$-0.5489460$ | |$0.0663657$ | | |
# |${\color{orange}{1.6}}$|${\color{orange}{0.4554022}}$| |$-0.0698330$ | |$0.0026663$ | |
# | | |${\color{orange}{−0.5698959}}$| |$0.0679655$ | |$-0.0027738$ |
# |${\color{orange}{1.6}}$|${\color{orange}{0.4554022}}$| |$-0.0290537$ | |$0.0010020$ | |
# | | |$-0.5786120$ | |$0.0685667$ | | |
# |${\color{orange}{1.9}}$|${\color{orange}{0.2818186}}$| |$-0.0084837$ | | | |
# | | |${\color{orange}{−0.5811571}}$| | | | |
# |${\color{orange}{1.9}}$|${\color{orange}{0.2818186}}$| | | | | |
#
#
# Las entradas en color naranja en las primeras tres columnas de la tabla son los datos que se proporcionaron por el ejercicio. Las entradas restantes en esta tabla se generan con la fórmula de diferencias divididas estándar. Por ejemplo, para la segunda entrada en la tercera columna usamos la segunda entrada 1.3 en la segunda columna y la primera entrada 1.6 en esa columna para obtener:
#
# $$ \frac{0.4554022 − 0.6200860}{1.6-1.3} = −0.5489460 $$
#
# Para la primera entrada en la cuarta columna, usamos la primera entrada 1.3 en la tercera columna y la primera entrada 1.6 en esa columna para obtener:
#
# $$ \frac{−0.5489460 − (−0.5220232)}{1.6 − 1.3} = −0.0897427 $$
#
# El valor del polinomio de Hermite en 1.5 es:
#
# $$H_5(1.5) = f[1.3]+f'(1.3)(1.5 − 1.3) + f[1.3, 1.3, 1.6](1.5 − 1.3)^2+f[1.3, 1.3, 1.6, 1.6](1.5 − 1.3)^2(1.5 − 1.6)$$
# $$\ \ \ \ \ \ \ \ \ +f[1.3, 1.3, 1.6, 1.6, 1.9](1.5 − 1.3)^2(1.5 − 1.6)^2+f[1.3, 1.3, 1.6, 1.6, 1.9, 1.9](1.5 − 1.3)^2(1.5 − 1.6)^2(1.5 − 1.9)$$
#
# $$H_5(1.5) =0.6200860 + (−0.5220232)(0.2) + (−0.0897427)(0.2)^2+ 0.0663657(0.2)^2(−0.1) + 0.0026663(0.2)^2(−0.1)^2+ (−0.0027738)(0.2)^2(−0.1)^2(−0.4)$$
# $$H_5(1.5) = {\color{green}{0.5118277}}$$
# - ### Ejercicio 2.1: Realizar el ejercicio 2 utilizando Interpolación de Hermite con Diferencias Dividas
# +
def graphic(pol, xi, fi, x):
# polinomio para evaluacion numérica
px = lambdify(x,pol)
# Puntos para la gráfica
samples = 200
a = np.min(xi-1)
b = np.max(xi+1)
pxi = np.linspace(a,b,samples)
pfi = px(pxi)
np.set_printoptions(precision = 4)
# Gráfica
plt.plot(xi,fi,'o', label = 'Puntos')
plt.plot(pxi,pfi, label = 'Polinomio')
plt.legend()
plt.xlabel('xi')
plt.ylabel('fi')
plt.title('Diferencias Divididas Simple Generalizada')
plt.show()
def getSimplePolynomial(factors, fi, xi, n, x ):
pol = fi[0]
for j in range(1,n,1):
factor = factors[j-1]
term = 1
for k in range(0,j,1):
term = term*(x-xi[k])
pol = pol + term*factor
return pol.expand()
def fillTable(table, n, m, xi, dfi):
d = n-1
j = 3
tmp_table = table
# Calcula tabla, inicia en columna 3
while (j < m):
# cada fila de columna
i = 0
step = j-2 # inicia en 1
derivate_iterator = 0
while (i < d):
denominador = (xi[i+step]-xi[i])
numerador = tmp_table[i+1,j-1]-tmp_table[i,j-1]
if denominador == 0 and numerador == 0:
tmp_table[i, j] = dfi[derivate_iterator]
derivate_iterator = derivate_iterator + 1
else :
tmp_table[i,j] = numerador/denominador
i = i+1
d = d - 1
j = j+1
return tmp_table
def duplicateArray(arr):
duplicates_values = np.array([])
for number in arr:
duplicates_values = np.append(duplicates_values, np.repeat(number, 2))
return duplicates_values
def newtonHermite(xi, fi, dfi) :
duplicate_xi = duplicateArray(xi)
duplicate_fi = duplicateArray(fi)
array_length = len(duplicate_xi)
ki = np.arange(0,array_length,1)
table = np.concatenate(([ki],[duplicate_xi],[duplicate_fi]),axis=0)
table = np.transpose(table)
dfinita = np.zeros(shape=(array_length,array_length),dtype=float)
table = np.concatenate((table,dfinita), axis=1)
[array_length,m] = np.shape(table)
table = fillTable(table, array_length, m, duplicate_xi, dfi)
newton_factors = table[0,3:]
array_length = len(dfinita)
x = Symbol('x')
polynomial = getSimplePolynomial(newton_factors, duplicate_fi, duplicate_xi, array_length, x)
graphic(polynomial, duplicate_xi, duplicate_fi, x)
polynomial
return polynomial
hPol = newtonHermite(np.array([1.3,1.6, 1.9]), np.array([ 0.6200860, 0.4554022, 0.2818186]),np.array([ -0.5220232, -0.5698959, -0.5811571]))
# -
hPol
# Por lo tanto, al sustituír $H_{2n + 1}(1.5)$ obtenemos:
hPol.subs(x,1.5)
# - ### Ejercicio 3:
#
# Use la interpolación de Hermite en en python que concuerda con los datos listados en la tabla para encontrar una aproximación de $f(1.2)$.
#
#
# |$k$|$x_k$| $f(x_k)$ | $f'(x_k)$ |
# |---|-----|-----------|------------|
# | 0 | -1 | -2 | 18 |
# | 1 | 0 | 2 | -1 |
# | 2 | 1 | 6 | 18 |
# - #### Usando la función propuesta en el ejercicio 1.1
# +
xs = [-1, 0, 1]
ys = [-2,2,6]
dys = [18,-1,18]
pol = hermite(5, xs, ys, dys)
pol.expand()
# -
# Por lo tanto, al sustituír $H_{2n + 1}(0.5)$ obtenemos:
pol.subs(x,0.5)
# Graficando los puntos y el polinomio obtenemos:
# +
# Datos
a = np.min(xs[0]-1)
b = np.max(xs[len(xs)-1]+1)
hx = np.linspace(a,b,101)
py = lambdify(x,pol)
hy = py(hx)
# Gráfica
plt.plot(xs, ys, 'o', label='Puntos')
plt.plot(hx,hy,label='Polinomio')
plt.legend()
plt.xlabel('xi')
plt.ylabel('fi')
plt.title('Lagrange')
plt.show()
# -
# - #### Usando la función del ejercicio 2.1
hPol = newtonHermite(np.array([-1,0, 1]), np.array([ -2, 2, 6]),np.array([ 18, -1, 18]))
hPol
# Por lo tanto, al sustituír $H_{2n + 1}(0.5)$ obtenemos:
hPol.subs(x,0.5)
# ## COTA DEL ERROR:
# Sea $f$ una función continua sobre $[a,b]$, tal que las derivadas de $f$ de orden $\leqslant 2n + 2$ existen y son continuas en $[a,b]$. Si $p_{2n+1} \in P_{2n+1}$ es el polinomio que interpola a la función $f$ en los $n +1$ valores distintos $x_0,x_1,...,x_n \in [a,b]$, entonces para todo $x \in [a,b]$, existe $\xi = \xi(x) \in \ ]a,b[$, tal que:
#
# $$f(x) = P_{2n+1}(x) + \frac{f^{(2n+2)}(\xi)}{(2n + 2)!}[\pi_{n+1}(x)]^2$$
#
# Donde:
#
# $$\pi_{n+1}(x) = \prod_{j=0}^{n} (x-x_j) = (x-x_0)(x-x_1)...(x-x_n)$$
#
# Ademas, para $M_{2n+2} = \max_{z \in [a,b]}|f^{(2n+2)}(z)|$, se cumple que:
#
# $$|f(x)- P_{2n+1}(x)| \leqslant \frac{M_{2n+2}}{(2n+2)!}[\pi_{n+1}(x)]^2 \ \ \ \ \ \ \ \ \ \forall x \in [a,b].$$
# - ### Ejemplo
# Considerando $f(x) = ln(x)$ y $p_3 \in P_3$ el polinomio de interporlación de Hermite para los nodos $x_0 = 1$ y $x_1 = 2$.
# Determine una cota de error del polinomio $p_3$.
#
# La cota del error del polinomio de interporlación de Hermite está dado por:
#
# Al tener dos nodos, $n=1$, por lo tanto:
#
# $$|f(x) - p_3 (x)| \leqslant \frac{M_4}{4!}[\pi_4 (x)]^2 = \frac{M_4}{24}(x-1)^2(x-2)^2$$
#
# Observe que:
#
# $$f'(x) = \frac{1}{x}$$
# $$f''(x) = \frac{-1}{x^2}$$
# $$f'''(x) = \frac{2}{x^3}$$
# $$f^{(4)}(x) = \frac{-6}{x^4}$$
#
# Buscando el maximo:
#
# $$M_4 = \max_{x \in [1,2]}|f^{(4)}(x) | = \max_{x \in [1,2]}\{\frac{6}{x^4}\} = \frac{6}{1^4} = 6 $$
#
# Mientras que:
#
# $$[\pi_4(x)]^2 = (x^2-2x+1)(x^2-4x+4)$$
# $$[\pi_4(x)]^2 = x^4-6x^3+13x^2-12x+4$$
#
# Por otro lado:
#
# $$0 = [[\pi_4(x)]^2]'$$
# $$0 = 4x^3 - 18x^2 + 26x - 12$$
# $$0 = 2(x-1)(2x-3)(x-2)$$
#
# Lo que nos permite conocer los puntos criticos que son: $x =1$, $x = \frac{3}{2}$, $ x = 2$
#
# Evaluamos en los extremos y en los puntos críticos:
# - _Observar que el valor máximo siempre será positivo porque está elevado al cuadrado_
#
# $$[\pi_4(1)]^2 = 0$$
# $$[\pi_4(1.5)]^2 = \frac{1}{16}$$
# $$[\pi_4(2)]^2 = 0$$
#
# Se deduce que:
#
# $$[\pi_4(x)]^2 \leqslant \frac{1}{16} \ \ \ \ \ \ \ \ \ \forall x \in [1,2].$$
#
# Por lo tanto se obtiene el valor de la cota:
#
# $$|f(x) - p_3(x)| \leqslant \frac{6}{24} . \frac{1}{16} = {\color{green}{\frac{1}{64}}} \ \ \ \ \ \ \ \ \ \forall x \in [1,2].$$
# ## REFERENCIAS:
# _<NAME> & <NAME> (2010). Hermite Interpolation en Numerical Analysis, novena edición (pp. 136-144). Cengage Learning._ <br/>
# _<NAME>. (2020, 12 octubre). 21. Polinomio de interpolación de Hermite [Vídeo]. YouTube. https://www.youtube.com/watch?v=H57JvLEu8TY&feature=youtu.be_ <br/>
# _<NAME> & <NAME> (2003). Hermite Interpolation en An Introduction to Numerical Analysis, primera edición (pp. 187-191). Cambridge University Pres_
| PolinomioDeInterpolacionDeHermite.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Feature Scaling
# ## What is **Feature Scaling**?
# Feature scaling is a method used to normalize/scale the range of independent variables or features of data. [2]
# The two most discussed scaling methods are Normalization and Standardization. [1]
# + Normalization typically means rescales the values into a range of [0,1].
# + Standardization typically means rescales data to have a mean of 0 and a standard deviation of 1 (unit variance).
# ## Why **Feature Scaling**?
#
# ## Different Scaling Methods
# 1. Standard Scaling
# + Explanation: Substract data points with mean and divided by the variance.
# + Main Effect: Removes the effect of mean and scales the data to unit variance.
# + The scaling shrinks the range of the feature values. The maximum range of the scaled data is **uncertain**.
# + Influence by Outliers: However, the outliers have an influence when computing the empirical mean and standard deviation. Note in particular that because the outliers on each feature have different magnitudes, the spread of the transformed data on each feature could be very different. StandardScaler therefore **cannot guarantee balanced feature scales in the presence of outliers**.
#
# 2. Min-max Scaling
# + Explanation: Substract data points with minimun values and divided by the range.
# + Main Effect: It is the simplest method and consists in rescaling the range of features to scale the range in [0, 1] or [−1, 1]
# + The maximum range of the scaled data is **certain**.
# + Influence by Outliers: Very sensitive to the presence of outliers. For example, when the range is large, the scaled data could shrink a lot more than expectation.
#
# 3. Max-abs Scaling
# + Explanation: Divide each data points with its maximun absolute data.
# + Main Effect: This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity.
# + Similar to min-max scaling. The maximum range of the scaled data is **certain**.
# + Influence by Outliers: Very sensitive to the presence of outliers. For example, when the range is large, the scaled data could shrink a lot more than expectation.
#
# 4. Robust Scaling
# + Explanation: Substract data with the median and scales the data according to the **quantile range** (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile).
# + Main Effect: This normalization is based on the fact that for a normal distribution, the interquartile range is approximately 1.349 times the standard deviation. The mean of the scaled data will not be zero, and the information of variance will also be preserved.
# + The maximum range of the scaled data is **uncertain**.
# + Influence by Outliers: The interquartile range is less effected by extremes than the standard deviation. The effect of outliers will not impact the range of the scaled data.
#
# 5. Power Transformation
# + Explanation: In statistics, a power transform is a family of functions applied to create a monotonic transformation of data using power functions. It is a data transformation technique used to stabilize variance, make the data more normal distribution-like, improve the validity of measures of association (such as the Pearson correlation between variables), and for other data stabilization procedures.
# + Main Effect: Applies a power transformation to each feature to make the data more Gaussian-like in order to stabilize variance and minimize skewness.
# + Yeo–Johnson transformation: The Yeo–Johnson transformation[15] allows also for zero and negative values of {\displaystyle y}y. {\displaystyle \lambda }\lambda can be any real number, where {\displaystyle \lambda =1}\lambda =1 produces the identity transformation.
# + Box–Cox transformation:
# + Currently the Yeo-Johnson and Box-Cox transforms are supported in **scikit-learn** and the optimal scaling factor is determined via maximum likelihood estimation in both methods. By default, PowerTransformer applies zero-mean, unit variance normalization. Note that Box-Cox can only be applied to strictly positive data, if negative values are present the Yeo-Johnson transformed is preferred.
#
# ## Reference
#
# 1. [Normalization vs Standardization — Quantitative analysis](https://towardsdatascience.com/normalization-vs-standardization-quantitative-analysis-a91e8a79cebf)
# 2. [Wikipedia - Feature Scaling](https://en.wikipedia.org/wiki/Feature_scaling)
# 3. [Compare the effect of different scalers on data with outliers](https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#sphx-glr-auto-examples-preprocessing-plot-all-scaling-py)
# 4. [Wikipedia - Power Transform](https://en.wikipedia.org/wiki/Power_transform)
# 5. []()
# +
# Feature Scaling
| Feature Engineering/Feature Scaling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: citylearn
# language: python
# name: citylearn
# ---
# +
from citylearn import CityLearn
from pathlib import Path
from agent import RL_Agents
import numpy as np
import tensorflow as tf
import gym
import argparse
from collections import deque
import random
from tensorflow.keras.layers import Input, Dense, Flatten, Lambda
from tensorflow.keras.optimizers import Adam
# +
# Select the climate zone and load environment
climate_zone = 1
data_path = Path("data/Climate_Zone_"+str(climate_zone))
building_attributes = data_path / 'building_attributes.json'
weather_file = data_path / 'weather_data.csv'
solar_profile = data_path / 'solar_generation_1kW.csv'
building_state_actions = 'buildings_state_action_space.json'
building_ids = ["Building_1","Building_2","Building_3","Building_4","Building_5","Building_6","Building_7","Building_8","Building_9"]
objective_function = ['ramping','1-load_factor','average_daily_peak','peak_demand','net_electricity_consumption']
env = CityLearn(data_path, building_attributes, weather_file, solar_profile, building_ids, buildings_states_actions = building_state_actions, cost_function = objective_function)
observations_spaces, actions_spaces = env.get_state_action_spaces()
# Provides information on Building type, Climate Zone, Annual DHW demand, Annual Cooling Demand, Annual Electricity Demand, Solar Capacity, and correllations among buildings
building_info = env.get_building_information()
# +
tf.keras.backend.set_floatx('float64')
parser = argparse.ArgumentParser()
parser.add_argument('--gamma', type=float, default=0.95)
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--eps', type=float, default=1.0)
parser.add_argument('--eps_decay', type=float, default=0.995)
parser.add_argument('--eps_min', type=float, default=0.01)
args = parser.parse_args()
# -
gamma = 0.95
lr = 0.005
batch_size = 32
eps = 1.0
eps_decay = 0.995
eps_min = 0.01
class ReplayBuffer:
def __init__(self, capacity=10000):
self.buffer = deque(maxlen=capacity)
def put(self, state, action, reward, next_state, done):
self.buffer.append([state, action, reward, next_state, done])
def sample(self):
sample = random.sample(self.buffer, batch_size)
states, actions, rewards, next_states, done = map(np.asarray, zip(*sample))
states = np.array(states).reshape(batch_size, -1)
next_states = np.array(next_states).reshape(batch_size, -1)
return states, actions, rewards, next_states, done
def size(self):
return len(self.buffer)
class ActionStateModel:
def __init__(self, state_dim, aciton_dim):
self.state_dim = state_dim
self.action_dim = aciton_dim
self.epsilon = eps
self.model = self.create_model()
def create_model(self):
model = tf.keras.Sequential([
Input((self.state_dim,)),
Dense(32, activation='relu'),
Dense(16, activation='relu'),
Dense(self.action_dim)
])
model.compile(loss='mse', optimizer=Adam(lr))
return model
def predict(self, state):
return self.model.predict(state)
def get_action(self, state):
state = np.reshape(state, [1, self.state_dim])
self.epsilon *= eps_decay
self.epsilon = max(self.epsilon, eps_min)
q_value = self.predict(state)[0]
if np.random.random() < self.epsilon:
return random.randint(0, self.action_dim-1)
return np.argmax(q_value)
def train(self, states, targets):
self.model.fit(states, targets, epochs=1, verbose=0)
class Agent:
def __init__(self, building_info, observation_spaces = None, action_spaces = None):
self.env = env
self.state_dim = len(observation_spaces)
self.action_dim = len(action_spaces)
self.model = ActionStateModel(self.state_dim, self.action_dim)
self.target_model = ActionStateModel(self.state_dim, self.action_dim)
self.target_update()
self.buffer = ReplayBuffer()
# Parameters
self.device = "cuda:0"
self.time_step = 0
self.building_info = building_info # Can be used to create different RL agents based on basic building attributes or climate zones
self.observation_spaces = observation_spaces
self.action_spaces = action_spaces
self.n_buildings = len(observation_spaces)
self.networks_initialized = False
def target_update(self):
weights = self.model.model.get_weights()
self.target_model.model.set_weights(weights)
def replay(self):
for _ in range(10):
states, actions, rewards, next_states, done = self.buffer.sample()
targets = self.target_model.predict(states)
next_q_values = self.target_model.predict(next_states).max(axis=1)
targets[range(batch_size), actions] = rewards + (1-done) * next_q_values * gamma
self.model.train(states, targets)
def train(self, max_episodes=1000):
for ep in range(max_episodes):
done, total_reward = False, 0
state = self.env.reset()
while not done:
action = self.model.get_action(state)
next_state, reward, done, _ = self.env.step(action)
self.buffer.put(state, action, reward*0.01, next_state, done)
total_reward += reward
state = next_state
if self.buffer.size() >= batch_size:
self.replay()
self.target_update()
print('EP{} EpisodeReward={}'.format(ep, total_reward))
# +
# Select many episodes for training. In the final run we will set this value to 1 (the buildings run for one year)
episodes = 10
k, c = 0, 0
cost, cum_reward = {}, {}
# -
# RL CONTROLLER
#Instantiating the control agent(s)
agents = Agent(building_info, observations_spaces, actions_spaces)
# The number of episodes can be replaces by a stopping criterion (i.e. convergence of the average reward)
for e in range(1):
cum_reward[e] = 0
rewards = []
state = env.reset()
done = False
while not done:
if k%(1000)==0:
print('hour: '+str(k)+' of '+str(8760*episodes))
action = agents.model.get_action(state)
next_state, reward, done, _ = env.step(action)
agents.buffer.put(state, action, reward, next_state, done)
state = next_state
cum_reward[e] += reward[0]
rewards.append(reward)
k+=1
cost[e] = env.cost()
if c%20==0:
print(cost[e])
c+=1
len(observations_spaces)
len(actions_spaces)
state
state = np.reshape(state, [1, self.state_dim])
self.epsilon *= eps_decay
self.epsilon = max(self.epsilon, eps_min)
q_value = self.predict(state)[0]
state = np.reshape(state, [1, 9])
state
q=ActionStateModel.predict(state)
action = agents.model.get_action(state)
e=gym.make('CartPole-v1')
agent=Agent(e)
env
env.observation_space
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Stock Market Predictions with LSTM
#
# Datacamp Tutorial: https://www.datacamp.com/community/tutorials/lstm-python-stock-market
#
# In this tutorial, you will see how you can use a time-series model known as Long Short-Term Memory (LSTM) models. LSTM models are powerful, especially retaining long-term memory, by their design as you will see later. You'll tackle the following topics in this tutorial:
#
# - Understand why should you need to be able to predict stock prices / movements
# - Download the data - You will be using stock market data gathered from Yahoo finance
# - Split train-test data and data normalization
# - See few averaging techniques that can be used for one-step ahead predictions
# - Motivate and briefly discuss an LSTM model as it allows to predict more than one-step ahead
# - Predict and visualize future stock market with current data
# - Final remarks
#
# ## Why Do You Need Time Series Models?
#
# You would like to model this stock prices correctly, so as a stock buyer you can reasonably decide when to buy stocks and when to sell them to gain profit. This is where time series modelling comes in. You need good machine learning models that can look at the history of a sequence of data and correctly predict the future elements of the sequence are going to be.
#
# **Warning**: Stock market prices are highly unpredictable and volatile. This means that there are no consistent patterns in the data that allows us to model stock prices over time near-perfectly. Don't take it from me, take it from Princeton University economist <NAME>, who argues in his 1973 book, "A Random Walk Down Wall Street," that if the market is truly efficient and a share price reflects all factors immediately as soon as they're made public, a blindfolded monkey throwing darts at a newspaper stock listing should do as well as any investment professional.
#
# However, let's not go all the way believing that this is just a stochastic / random process and no hope for machine learning. Let's see if you can at least model the data, so that the predictions you make correlate with the actual behavior of the data. In other words, you don't need the exact stock values of the future, but the stock price movements (that is, if it is going to rise of fall in the near future).
# Make sure that you have all these libaries available to run the code successfully
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import datetime as dt
import urllib.request, json
import os
import numpy as np
import tensorflow as tf # This code has been tested with TensorFlow 1.6
from sklearn.preprocessing import MinMaxScaler
print (os.path.join('/home/ec2-user/datacamp_tutorials/Reviewed/Stocks','ge.us.txt'))
# +
data_source = 'kaggle' # alphavantage or kaggle
if data_source == 'alphavantage':
# ====================== Loading Data from Alpha Vantage ==================================
api_key = '<your API key>'
# American Airlines stock market prices
ticker = "AAL"
# JSON file with all the stock market data for AAL within the last 20 years
url_string = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&outputsize=full&apikey=%s"%(ticker,api_key)
# Save data to this file
file_to_save = 'stock_market_data-%s.csv'%ticker
# If you haven't already saved data,
# Go ahead and grab the data from the url
# And store date, low, high, volume, close, open values to a Pandas dataframe
if not os.path.exists(file_to_save):
with urllib.request.urlopen(url_string) as url:
data = json.loads(url.read().decode())
# extract stock market data
data = data['Time Series (Daily)']
df = pd.DataFrame(columns=['Date','Low','High','Close','Open'])
for k,v in data.items():
date = dt.datetime.strptime(k, '%Y-%m-%d')
data_row = [date.date(),float(v['3. low']),float(v['2. high']),
float(v['4. close']),float(v['1. open'])]
df.loc[-1,:] = data_row
df.index = df.index + 1
print('Data saved to : %s'%file_to_save)
df.to_csv(file_to_save)
# If the data is already there, just load it from the CSV
else:
print('File already exists. Loading data from CSV')
df = pd.read_csv(file_to_save)
else:
# ====================== Loading Data from Kaggle ==================================
# You will be using HP's data. Feel free to experiment with other data.
# But while doing so, be careful to have a large enough dataset and also pay attention to the data normalization
df = pd.read_csv(os.path.join('/home/ec2-user/datacamp_tutorials/Reviewed/Stocks','ge.us.txt'),delimiter=',',usecols=['Date','Open','High','Low','Close'])
print('Loaded data from the Kaggle repository')
# -
# ### Data Exploration
#
# Here you will print the data you collected in to the dataframe. You also will make sure that the data is sorted by the date. Because the order of data is crucial in time series modelling.
# +
# Sort dataframe by date
df = df.sort_values('Date')
# Double check the result
df.head()
# -
# #### Data Visualization
# Now let's see what sort of data you have at our hand. You want data with various patters occurring over time.
plt.figure(figsize = (18,9))
plt.plot(range(df.shape[0]),(df['Low']+df['High'])/2.0)
plt.xticks(range(0,df.shape[0],500),df['Date'].loc[::500],rotation=45)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid Price',fontsize=18)
plt.show()
# This graph already says a lot of things. The Specific reason I picked this company over others is that this graph is bursting with different behaviors of stock prices over time. This will make the learning more robust as well as give us a change to test how good the predictions are for a variety of situations.
#
# Another thing to notice is that the values close to 2017 are much higher and highly fluctuating than the values close to 1970s. Therefore you need to make sure that the data behaves in similar value ranges throughout the time frame. You will take care of this during the *data normalization*.
#
# ## Breaking Data to Train and Test and Normalizing Data
#
# You will use the mid price calculated by taking the average of the highest and lowest recorded prices on a day
# First calculate the mid prices from the highest and lowest
high_prices = df.loc[:,'High'].as_matrix()
low_prices = df.loc[:,'Low'].as_matrix()
mid_prices = (high_prices+low_prices)/2.0
# Now you can break the train data and test data. Train data will be the first 11000 data points of the time series and rest will be test data.
train_data = mid_prices[:11000]
test_data = mid_prices[11000:]
# Now you need to define a scaler to normalize data `MinMaxScalar` scales all the data to be in the region of 0 and 1. You also reshape the train and test data to be in the shape `[data_size, num_features]`.
# Scale the data to be between 0 and 1
# When scaling remember! You normalize both test and train data w.r.t training data
# Because you are not supposed to have access to test data
scaler = MinMaxScaler()
train_data = train_data.reshape(-1,1)
test_data = test_data.reshape(-1,1)
# Due to the observation you made earlier, that is, different time periods of data has different value ranges, you normalize data by breaking the full series to windows. If not the earlier data will all be close to 0 and will not add much value to learning. Here you choose a window size of 2500. When choosing the window size make sure it will not be too small. Because when performing windowed-normalization, it can introduce a break at the very end of each window, as each window is normalized independently. In our exampe, 4 data points will be affected by this. But given you have 11000 data points, 4 points will not cause any issue
# +
# Train the Scaler with training data and smooth data
smoothing_window_size = 2500
for di in range(0,10000,smoothing_window_size):
scaler.fit(train_data[di:di+smoothing_window_size,:])
train_data[di:di+smoothing_window_size,:] = scaler.transform(train_data[di:di+smoothing_window_size,:])
# You normalize the last bit of remaining data
scaler.fit(train_data[di+smoothing_window_size:,:])
train_data[di+smoothing_window_size:,:] = scaler.transform(train_data[di+smoothing_window_size:,:])
# -
# Reshape data back to the shape of `[data_size]`
# +
# Reshape both train and test data
train_data = train_data.reshape(-1)
# Normalize test data
test_data = scaler.transform(test_data).reshape(-1)
# -
# You now smooth data using exponential moving average. This helps us to get rid of the inherent raggedness of the data in stock prices and preduce a smoother curve. Note that you should only smooth training data.
# +
# Now perform exponential moving average smoothing
# So the data will have a smoother curve than the original ragged data
EMA = 0.0
gamma = 0.1
for ti in range(11000):
EMA = gamma*train_data[ti] + (1-gamma)*EMA
train_data[ti] = EMA
# Used for visualization and test purposes
all_mid_data = np.concatenate([train_data,test_data],axis=0)
# -
# ## One-Step Ahead Prediction via Averaging
#
# Averaging mechanisms allows us to predict (often one time step ahead) by representing the future stock price as an average of the previously observed stock prices. Doing this for more than one time step can produce quite bad results. You will look at two averaging techniques below; standard averaging and exponential moving average. You will evaluate both qualitatively (visual inspection) and quantitatively (mean squared error) the results produced by two algorithms.
#
# Mean squared error (MSE) can be calculated by taking the mean squared error between the true value at one step ahead and the predicted value and averaging it over all the predictions.
#
# ### Standard Average
#
# Let us understand the difficulty of this problem first by trying to model this as an average calculation problem. That is first you will try to predict the future stock market prices (for example, $x_{t+1}$) as an average of the previously observed stock market prices within a fixed size window (for example, $x_{t-N},\ldots,x_{t}$) (say previous 100 days). Thereafter you will try a bit more fancier "exponential moving average" method and see how well that does. Then you will move on to the "holy-grail" of time-series prediction; Long Short-Term Memory models.
#
# First you will see how normal averaging works. That is you say,
#
# $x_{t+1} = 1/N \sum_{i=t-N}^{t} x_i$
#
# In other words, you say the prediction at $t+1$ is the average value of all the stock prices you observed withing a window of $t$ to $t-N$.
# +
window_size = 100
N = train_data.size
std_avg_predictions = []
std_avg_x = []
mse_errors = []
for pred_idx in range(window_size,N):
if pred_idx >= N:
date = dt.datetime.strptime(k, '%Y-%m-%d').date() + dt.timedelta(days=1)
else:
date = df.loc[pred_idx,'Date']
std_avg_predictions.append(np.mean(train_data[pred_idx-window_size:pred_idx]))
mse_errors.append((std_avg_predictions[-1]-train_data[pred_idx])**2)
std_avg_x.append(date)
print('MSE error for standard averaging: %.5f'%(0.5*np.mean(mse_errors)))
# -
#
# Below you look at the averaged results. Well, it follows the actual behavior of stock quite closely. Next you look at a more accurate one-step prediction method
# +
plt.figure(figsize = (18,9))
plt.plot(range(df.shape[0]),all_mid_data,color='b',label='True')
plt.plot(range(window_size,N),std_avg_predictions,color='orange',label='Prediction')
#plt.xticks(range(0,df.shape[0],50),df['Date'].loc[::50],rotation=45)
plt.xlabel('Date')
plt.ylabel('Mid Price')
plt.legend(fontsize=18)
plt.show()
# -
# So what does the above graphs (and the MSE) say? It seems that it is not too bad of a model for very short predictiosn (one day ahead). Given that stock prices don't change from 0 to 100 over night, this behavior is sensible. Next you look at a fancier averaging technique known as exponential moving average.
#
# ### Exponential Moving Average (Replicating Some of the so-called amazing stock market prediction learning)
#
# You might have seen some articles in the internet using very complex models and predicting almost the exact behavior of the stock market. But **beware!** all of them I have seen are just optical illusions and not due to learning something useful. You will see below how you can replicate that behavior with a simple averaging method.
#
# In the exponential moving average method, you calculate $x_{t+1}$ as,
#
# * $x_{t+1} = EMA_{t} = \gamma \times EMA_{t-1} + (1-\gamma) x_t$ where $EMA_0 = 0$ and $EMA$ is the exponential moving average value you maintain over time.
#
# The above equation basically calulates the exponential moving averag from $t+1$ time step and uses that as the one step ahead prediction. $\gamma$ decides what is the contribution of the most recent prediction to the EMA. For example, a $\gamma=0.1$ gets only 10% of the current value into the EMA. Because you take only a very small fraction of the most recent, it allows to preserve much older values you saw very early in the average. Let us look how good this looks when used to predict one-step ahead.
#
#
# +
window_size = 100
N = train_data.size
run_avg_predictions = []
run_avg_x = []
mse_errors = []
running_mean = 0.0
run_avg_predictions.append(running_mean)
decay = 0.5
for pred_idx in range(1,N):
running_mean = running_mean*decay + (1.0-decay)*train_data[pred_idx-1]
run_avg_predictions.append(running_mean)
mse_errors.append((run_avg_predictions[-1]-train_data[pred_idx])**2)
run_avg_x.append(date)
print('MSE error for EMA averaging: %.5f'%(0.5*np.mean(mse_errors)))
# +
plt.figure(figsize = (18,9))
plt.plot(range(df.shape[0]),all_mid_data,color='b',label='True')
plt.plot(range(0,N),run_avg_predictions,color='orange', label='Prediction')
#plt.xticks(range(0,df.shape[0],50),df['Date'].loc[::50],rotation=45)
plt.xlabel('Date')
plt.ylabel('Mid Price')
plt.legend(fontsize=18)
plt.show()
# -
# ### If Exponential Moving Average is this Good, Why do You Need Better Models?
#
# You see that it fits a perfect line that follows the True distribution (and justified by the very low MSE). Practically speaking, you can't do much with just the stock market value of the next day. Personally what I'd like is not the exact stock market price for the next day, but *would the stock market prices go up or down in the next 30 days*. Try to do this, and you will expose the incapability of EMA method.
#
# Let us try to make predictions in windows (say you predict next 2 days window, instead of just next day). Then you will realize how wrong EMA can go. Let us understand this through an example.
#
# ### Think About What Happens when You Need to Predict More Than One Step into the Future
# To make things concrete, let us assum values, say $x_t=0.4$, $EMA=0.5$ and $\gamma = 0.5$
# * Say you get the output with the following equation
# * $x_{t+1} = EMA_t = \gamma \times EMA_{t-1} + (1-\gamma) x_t$
# * So you have $x_{t+1} = 0.5 \times 0.5 + (1-0.5) \times 0.4 = 0.45$
# * So $x_{t+1} = EMA_t = 0.45$
# * So the next prediction $x_{t+2}$ becomes,
# * $x_{t+2} = \gamma \times EMA_t + (1-\gamma) x_{t+1}$
# * Which is $x_{t+2} = \gamma \times EMA_t + (1-\gamma) EMA_t = EMA_t$
# * Or in this example, $x_{t+2} = x_{t+1} = 0.45 $
#
# So no matter how many steps you predict in to the future, you'll keep getting the same answer for all the future prediction steps.
#
# One solution you have that will output useful information is to look at **momentum-based algorithms**. They make predictions based on whether the past recent values were going up or going down (not the exact values). For example, they will say the next day price is likely to be lower, if the prices have been dropping for that past days. Which sounds reasonable. However you will use a more complex model; an LSTM model. LSTM models have taken the realm of time series prediction by a storm! Because they are so good at modelling time series data. You will see if there actually exists patterns hidden in the data that the LSTM can exploit.
#
# ## Introduction to LSTM: Making Stock Movement Predictions Far into the Future
#
# Long Short-Term Memory (LSTM) models are extremely powerful time-series models. A LSTM can predict an arbitrary number of steps into the future. A LSTM module (or a cell) has 5 essential components which allows them to model both long-term and short-term data.
# * Cell state ($c_t$) - This represents the internal memory of the cell which stores both short term memory and long-term memories
# * Hidden state ($h_t$) - This is output state information calculated w.r.t. current input, previous hidden state and current cell input which you eventually use to predict the future stock market prices. Additionally, the hidden state can decide to only retrive the short or long-term or both types of memory stored in the cell state to make the next prediction.
# * Input gate ($i_t$) - Decides how much information from current input flows to the cell state
# * Forget gate ($f_t$) - Decides how much information from the current input and the previous cell state flows into the current cell state
# 5. Output gate ($o_t$) - Decides how much information from the current cell state flows into the hidden state, so that if needed LSTM can only pick the long-term memories or short-term memories and long-term memories
#
# An LSTM cell looks like below.
#
# <img src="lstm.png" alt="Drawing" style="width: 400px;"/>
#
# And the equations for calculating each of these entities are as follows.
#
# * $i_t = \sigma(W_{ix}x_t + W_{ih}h_{t-1}+b_i)$
# * $\tilde{c}_t = \sigma(W_{cx}x_t + W_{ch}h_{t-1} + b_c)$
# * $f_t = \sigma(W_{fx}x_t + W_{fh}h_{t-1}+b_f)$
# * $c_t = f_t c_{t-1} + i_t \tilde{c}_t$
# * $o_t = \sigma(W_{ox}x_t + W_{oh}h_{t-1}+b_o)$
# * $h_t = o_t tanh(c_t)$
#
# For a better (more technical) understanding about LSTMs you can refer http://colah.github.io/posts/2015-08-Understanding-LSTMs/
#
# Tensorflow provides a nice sub API (called RNN API) for implementing time series models. You will be using that for our implementations.
#
# ### Data Generator for LSTM
#
# You are first going to implement a data generator to train our LSTM. This data generator will have a method called **unroll_batches(...)** which will output a set of *num_unrollings* batches of input data obtained sequentially, where a batch of data is of size *[batch_size, 1]*. Then each batch of input data will have a corresponding output batch of data.
#
# For example if *num_unrollings=3* and *batch_size=4* a set of unrolled batches it might look like,
# * input data: $[x_0,x_10,x_20,x_30], [x_1,x_11,x_21,x_31], [x_2,x_12,x_22,x_32]$
# * output data: $[x_1,x_11,x_21,x_31], [x_2,x_12,x_22,x_32], [x_3,x_13,x_23,x_33]$
#
# #### Data Augmentation
# Also to make our model robust you will not make the output for $x_t$ always $x_{t+1}$. Rather you will randomly sample an output from the set $x_{t+1},x_{t+2},\ldots,x_{t+N}$ where $N$ is a small window size. Here you are making the following assumption.
#
# * $x_{t+1},x_{t+2},\ldots,x_{t+N}$ will not be very far from each other
#
# I personally think is a reasonable assumption for stock movement prediction.
#
# Below you illustrate how a batch of data is created visually.
#
# <img src="batch.png" alt="Drawing" style="width: 600px;"/>
# +
class DataGeneratorSeq(object):
def __init__(self,prices,batch_size,num_unroll):
self._prices = prices
self._prices_length = len(self._prices) - num_unroll
self._batch_size = batch_size
self._num_unroll = num_unroll
self._segments = self._prices_length //self._batch_size
self._cursor = [offset * self._segments for offset in range(self._batch_size)]
def next_batch(self):
batch_data = np.zeros((self._batch_size),dtype=np.float32)
batch_labels = np.zeros((self._batch_size),dtype=np.float32)
for b in range(self._batch_size):
if self._cursor[b]+1>=self._prices_length:
#self._cursor[b] = b * self._segments
self._cursor[b] = np.random.randint(0,(b+1)*self._segments)
batch_data[b] = self._prices[self._cursor[b]]
batch_labels[b]= self._prices[self._cursor[b]+np.random.randint(0,5)]
self._cursor[b] = (self._cursor[b]+1)%self._prices_length
return batch_data,batch_labels
def unroll_batches(self):
unroll_data,unroll_labels = [],[]
init_data, init_label = None,None
for ui in range(self._num_unroll):
data, labels = self.next_batch()
unroll_data.append(data)
unroll_labels.append(labels)
return unroll_data, unroll_labels
def reset_indices(self):
for b in range(self._batch_size):
self._cursor[b] = np.random.randint(0,min((b+1)*self._segments,self._prices_length-1))
dg = DataGeneratorSeq(train_data,5,5)
u_data, u_labels = dg.unroll_batches()
for ui,(dat,lbl) in enumerate(zip(u_data,u_labels)):
print('\n\nUnrolled index %d'%ui)
dat_ind = dat
lbl_ind = lbl
print('\tInputs: ',dat )
print('\n\tOutput:',lbl)
# -
# ### Defining Hyperparameters
#
# Here you define several hyperparameters. `D` is the dimensionality of the input. It's straightforward as you take the previous stock price as the input and predict the next, and should be 1. Then you have `num_unrollings`, this is a hyperparameter related to the backpropagation through time (BPTT) that is used to optimize the LSTM model. This denotes how many continuous time steps you consider for a single optimization step. You can think of this as, instead of optimizing the LSTM by looking at a single time step, you optimize the network looking at `num_unrollings` time steps. Larger the better. Then you have the `batch_size`. Batch size is how many data samples you consider in a single time steps. Next you define `num_nodes` which represents the number of hidden neurons in each LSTM cell. You can see that there are three layers of LSTMs in this example.
# +
D = 1 # Dimensionality of the data. Since our data is 1-D this would be 1
num_unrollings = 50 # Number of time steps you look into the future.
batch_size = 500 # Number of samples in a batch
num_nodes = [200,200,150] # Number of hidden nodes in each layer of the deep LSTM stack we're using
n_layers = len(num_nodes) # number of layers
dropout = 0.2 # dropout amount
tf.reset_default_graph() # This is important in case you run this multiple times
# -
# ### Defining Inputs and Outputs
#
# Next you define placeholders for training inputs and labels. This is very straightforward as you have a list of input placeholders, where each placeholder contains a single batch of data. And the list has `num_unrollings` placeholders, that will be used at once for a single optimization step.
# +
# Input data.
train_inputs, train_outputs = [],[]
# You unroll the input over time defining placeholders for each time step
for ui in range(num_unrollings):
train_inputs.append(tf.placeholder(tf.float32, shape=[batch_size,D],name='train_inputs_%d'%ui))
train_outputs.append(tf.placeholder(tf.float32, shape=[batch_size,1], name = 'train_outputs_%d'%ui))
# -
# ### Defining Parameters of the LSTM and Regression layer
#
# You will have a three layers of LSTMs and a linear regression layer (denoted by `w` and `b`), that takes the output of the last LSTM cell and output the prediction for the next time step. You can use the `MultiRNNCell` in TensorFlow to encapsualate the three `LSTMCell` objects you created. Additionally you can have the dropout implemented LSTM cells, as they improve performance and reduce overfitting.
# +
lstm_cells = [
tf.contrib.rnn.LSTMCell(num_units=num_nodes[li],
state_is_tuple=True,
initializer= tf.contrib.layers.xavier_initializer()
)
for li in range(n_layers)]
drop_lstm_cells = [tf.contrib.rnn.DropoutWrapper(
lstm, input_keep_prob=1.0,output_keep_prob=1.0-dropout, state_keep_prob=1.0-dropout
) for lstm in lstm_cells]
drop_multi_cell = tf.contrib.rnn.MultiRNNCell(drop_lstm_cells)
multi_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)
w = tf.get_variable('w',shape=[num_nodes[-1], 1], initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b',initializer=tf.random_uniform([1],-0.1,0.1))
# -
# ### Calculating LSTM output and Feeding it to the regression layer to get final prediction
#
# In this section, you first create TensorFlow variables (`c` and `h`) that will hold the cell state and the hidden state of the LSTM. Then you transform the list of `train_inputs` to have a shape of `[num_unrollings, batch_size, D]`, this is needed for calculating the outputs with the `tf.nn.dynamic_rnn` function. You then calculate the lstm outputs with the `tf.nn.dynamic_rnn` function and split the output back to a list of `num_unrolling` tensors. the loss between the predictions and true stock prices.
# +
# Create cell state and hidden state variables to maintain the state of the LSTM
c, h = [],[]
initial_state = []
for li in range(n_layers):
c.append(tf.Variable(tf.zeros([batch_size, num_nodes[li]]), trainable=False))
h.append(tf.Variable(tf.zeros([batch_size, num_nodes[li]]), trainable=False))
initial_state.append(tf.contrib.rnn.LSTMStateTuple(c[li], h[li]))
# Do several tensor transofmations, because the function dynamic_rnn requires the output to be of
# a specific format. Read more at: https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
all_inputs = tf.concat([tf.expand_dims(t,0) for t in train_inputs],axis=0)
# all_outputs is [seq_length, batch_size, num_nodes]
all_lstm_outputs, state = tf.nn.dynamic_rnn(
drop_multi_cell, all_inputs, initial_state=tuple(initial_state),
time_major = True, dtype=tf.float32)
all_lstm_outputs = tf.reshape(all_lstm_outputs, [batch_size*num_unrollings,num_nodes[-1]])
all_outputs = tf.nn.xw_plus_b(all_lstm_outputs,w,b)
split_outputs = tf.split(all_outputs,num_unrollings,axis=0)
# -
# ### Loss Calculation and Optimizer
#
# Here you calculate the loss. However note that there is a unique characteristic when calculating the loss. For each batch of predictions and true outputs, you calculate the mean squared error. And you sum (not average) all these mean squared losses together. Finally you define the optimizer you're going to use to optimize the LSTM. Here you can use Adam, which is a very recent and well-performing optimizer.
# +
# When calculating the loss you need to be careful about the exact form, because you calculate
# loss of all the unrolled steps at the same time
# Therefore, take the mean error or each batch and get the sum of that over all the unrolled steps
print('Defining training Loss')
loss = 0.0
with tf.control_dependencies([tf.assign(c[li], state[li][0]) for li in range(n_layers)]+
[tf.assign(h[li], state[li][1]) for li in range(n_layers)]):
for ui in range(num_unrollings):
loss += tf.reduce_mean(0.5*(split_outputs[ui]-train_outputs[ui])**2)
print('Learning rate decay operations')
global_step = tf.Variable(0, trainable=False)
inc_gstep = tf.assign(global_step,global_step + 1)
tf_learning_rate = tf.placeholder(shape=None,dtype=tf.float32)
tf_min_learning_rate = tf.placeholder(shape=None,dtype=tf.float32)
learning_rate = tf.maximum(
tf.train.exponential_decay(tf_learning_rate, global_step, decay_steps=1, decay_rate=0.5, staircase=True),
tf_min_learning_rate)
# Optimizer.
print('TF Optimization operations')
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
optimizer = optimizer.apply_gradients(
zip(gradients, v))
print('\tAll done')
# -
# ### Prediction Related Calculations
#
# Here you define the prediction related TensorFlow operations. First define a placeholder for feeding in the input (`sample_inputs`), then similar to the training stage, you define state variables for prediction (`sample_c` and `sample_h`). Finally you calculate the prediction with the `tf.nn.dynamic_rnn` function and then sending the output through the regression layer (`w` and `b`). You also should define the `reset_sample_state` opeartion, that resets the cell state and the hidden state of the LSTM. You should execute this operation at the start, every time you make a sequence of predictions.
#
# +
print('Defining prediction related TF functions')
sample_inputs = tf.placeholder(tf.float32, shape=[1,D])
# Maintaining LSTM state for prediction stage
sample_c, sample_h, initial_sample_state = [],[],[]
for li in range(n_layers):
sample_c.append(tf.Variable(tf.zeros([1, num_nodes[li]]), trainable=False))
sample_h.append(tf.Variable(tf.zeros([1, num_nodes[li]]), trainable=False))
initial_sample_state.append(tf.contrib.rnn.LSTMStateTuple(sample_c[li],sample_h[li]))
reset_sample_states = tf.group(*[tf.assign(sample_c[li],tf.zeros([1, num_nodes[li]])) for li in range(n_layers)],
*[tf.assign(sample_h[li],tf.zeros([1, num_nodes[li]])) for li in range(n_layers)])
sample_outputs, sample_state = tf.nn.dynamic_rnn(multi_cell, tf.expand_dims(sample_inputs,0),
initial_state=tuple(initial_sample_state),
time_major = True,
dtype=tf.float32)
with tf.control_dependencies([tf.assign(sample_c[li],sample_state[li][0]) for li in range(n_layers)]+
[tf.assign(sample_h[li],sample_state[li][1]) for li in range(n_layers)]):
sample_prediction = tf.nn.xw_plus_b(tf.reshape(sample_outputs,[1,-1]), w, b)
print('\tAll done')
# -
# ### Running the LSTM
#
# Here you will train and predict stock price movements for several epochs and see whether the predictions get better or worse over time. You follow the following procedure.
# * Define a test set of starting points (`test_points_seq`) on the time series to evaluate the LSTM at
# * For each epoch
# * For full sequence length of training data
# * Unroll a set of `num_unrollings` batches
# * Train the LSTM with the unrolled batches
# * Calculate the average training loss
# * For each starting point in the test set
# * Update the LSTM state by iterating through the previous `num_unrollings` data points found before the test point
# * Make predictions for `n_predict_once` steps continuously, using the previous prediction as the current input
# * Calculate the MSE loss between the `n_predict_once` points predicted and the true stock prices at those time stamps
# +
epochs = 30
valid_summary = 1 # Interval you make test predictions
n_predict_once = 50 # Number of steps you continously predict for
train_seq_length = train_data.size # Full length of the training data
train_mse_ot = [] # Accumulate Train losses
test_mse_ot = [] # Accumulate Test loss
predictions_over_time = [] # Accumulate predictions
session = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Used for decaying learning rate
loss_nondecrease_count = 0
loss_nondecrease_threshold = 2 # If the test error hasn't increased in this many steps, decrease learning rate
print('Initialized')
average_loss = 0
# Define data generator
data_gen = DataGeneratorSeq(train_data,batch_size,num_unrollings)
x_axis_seq = []
# Points you start our test predictions from
test_points_seq = np.arange(11000,12000,50).tolist()
for ep in range(epochs):
# ========================= Training =====================================
for step in range(train_seq_length//batch_size):
u_data, u_labels = data_gen.unroll_batches()
feed_dict = {}
for ui,(dat,lbl) in enumerate(zip(u_data,u_labels)):
feed_dict[train_inputs[ui]] = dat.reshape(-1,1)
feed_dict[train_outputs[ui]] = lbl.reshape(-1,1)
feed_dict.update({tf_learning_rate: 0.0001, tf_min_learning_rate:0.000001})
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
# ============================ Validation ==============================
if (ep+1) % valid_summary == 0:
average_loss = average_loss/(valid_summary*(train_seq_length//batch_size))
# The average loss
if (ep+1)%valid_summary==0:
print('Average loss at step %d: %f' % (ep+1, average_loss))
train_mse_ot.append(average_loss)
average_loss = 0 # reset loss
predictions_seq = []
mse_test_loss_seq = []
# ===================== Updating State and Making Predicitons ========================
for w_i in test_points_seq:
mse_test_loss = 0.0
our_predictions = []
if (ep+1)-valid_summary==0:
# Only calculate x_axis values in the first validation epoch
x_axis=[]
# Feed in the recent past behavior of stock prices
# to make predictions from that point onwards
for tr_i in range(w_i-num_unrollings+1,w_i-1):
current_price = all_mid_data[tr_i]
feed_dict[sample_inputs] = np.array(current_price).reshape(1,1)
_ = session.run(sample_prediction,feed_dict=feed_dict)
feed_dict = {}
current_price = all_mid_data[w_i-1]
feed_dict[sample_inputs] = np.array(current_price).reshape(1,1)
# Make predictions for this many steps
# Each prediction uses previous prediciton as it's current input
for pred_i in range(n_predict_once):
pred = session.run(sample_prediction,feed_dict=feed_dict)
our_predictions.append(np.asscalar(pred))
feed_dict[sample_inputs] = np.asarray(pred).reshape(-1,1)
if (ep+1)-valid_summary==0:
# Only calculate x_axis values in the first validation epoch
x_axis.append(w_i+pred_i)
mse_test_loss += 0.5*(pred-all_mid_data[w_i+pred_i])**2
session.run(reset_sample_states)
predictions_seq.append(np.array(our_predictions))
mse_test_loss /= n_predict_once
mse_test_loss_seq.append(mse_test_loss)
if (ep+1)-valid_summary==0:
x_axis_seq.append(x_axis)
current_test_mse = np.mean(mse_test_loss_seq)
# Learning rate decay logic
if len(test_mse_ot)>0 and current_test_mse > min(test_mse_ot):
loss_nondecrease_count += 1
else:
loss_nondecrease_count = 0
if loss_nondecrease_count > loss_nondecrease_threshold :
session.run(inc_gstep)
loss_nondecrease_count = 0
print('\tDecreasing learning rate by 0.5')
test_mse_ot.append(current_test_mse)
print('\tTest MSE: %.5f'%np.mean(mse_test_loss_seq))
predictions_over_time.append(predictions_seq)
print('\tFinished Predictions')
# -
# ## Visualizing the LSTM Predictions
#
# You can see how the MSE loss is going down with the amount of training. This is good sign that the model is learning something useful. To quantify your findings, you can compare the LSTM's MSE loss to the MSE loss you obtained when doing the standard averaging (0.004). You can see that the LSTM is doing better than the standard averaging. And you know that standard averaging (though not perfect) followed the true stock prices movements reasonably.
# +
best_prediction_epoch = 28 # replace this with the epoch that you got the best results when running the plotting code
plt.figure(figsize = (18,18))
plt.subplot(2,1,1)
plt.plot(range(df.shape[0]),all_mid_data,color='b')
# Plotting how the predictions change over time
# Plot older predictions with low alpha and newer predictions with high alpha
start_alpha = 0.25
alpha = np.arange(start_alpha,1.1,(1.0-start_alpha)/len(predictions_over_time[::3]))
for p_i,p in enumerate(predictions_over_time[::3]):
for xval,yval in zip(x_axis_seq,p):
plt.plot(xval,yval,color='r',alpha=alpha[p_i])
plt.title('Evolution of Test Predictions Over Time',fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid Price',fontsize=18)
plt.xlim(11000,12500)
plt.subplot(2,1,2)
# Predicting the best test prediction you got
plt.plot(range(df.shape[0]),all_mid_data,color='b')
for xval,yval in zip(x_axis_seq,predictions_over_time[best_prediction_epoch]):
plt.plot(xval,yval,color='r')
plt.title('Best Test Predictions Over Time',fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid Price',fontsize=18)
plt.xlim(11000,12500)
plt.show()
# -
# Though not perfect, LSTM seems to be able to predict stock price behavior correctly most of the time. Note that you are making predictions roughly in the range of 0 and 1.0 (that is, not the true stock prices). This is okey, because you're predicting the stock price movement, not the prices themselves.
# ## Final Remarks
#
# I'm hoping the readers will find this tutorial useful. I should mention that this was a rewarding experience for me. In this tutorial, I learnt how difficult it can be to device a model that is able to correctly predict stock price movements. You started with a motivation for why you need to model stock prices. This was followed by explnation and code for downloading data. Then you looked at two averaging techniques that allow us to make predictions one step into the future. You next saw that these methods are futile when you need to predict more than one step into the future. Thereafter you discussed how you can use LSTMs to make predictions many steps into the future. Finally you visualized the results and saw that our model (though not perfect) is quite good at correctly predicting stock price movements.
#
# Here, I'm stating serveral takeaways of this tutorial.
#
# 1. Stock price/movement prediction is an extremely difficult task. Personally I don't think any of the stock prediction models out there shouldn't be taken for granted and blidly rely on them. However models might be able to predict stock price movement correctly most of the time, but not always.
#
# 2. Do not be fooled by articles out there that shows predictions curves that perfectly overlaps the true stock prices. This can be replicated with a simple averaging technique and in practice it's useless. A more sensible thing to do is predicting the stock price movements.
#
# 3. The LSTM model hyperparameters are extremely sensitive to the results you obtain. So a very good thing to do would be run some hyperparameter optimization technique (for example, Grid search / Random search) on the hyperparameters. Below I list some of the most critical hyperparameters
# * The learning rate of the optimizer
# * Number of layers and the number of hidden units in each layer
# * The optimizer. I found Adam to perform the best
# * Type of the model. You can try GRU/ Standard LSTM/ LSTM with Peepholes and evaluation performance difference
#
# 4. In this tutorial you did something faulty (due to the small size of data)! That is you used the test loss to decay the learning rate. This indirectly leaks information about test set into the training procedure. A better way of handling this is to have a separate validation set (apart from the test set) and decay learning rate with respect to performance of the validation set.
#
# ## Author Details
# * Author: <NAME>
# * Email: <EMAIL>
# * Website: http://www.thushv.com/
# * LinkedIn: https://www.linkedin.com/in/thushanganegedara/
#
# Datacamp Tutorial: https://www.datacamp.com/community/tutorials/lstm-python-stock-market
#
# If you find this tutorial useful, **don't forget to upvote the post at Datacamp** :)
#
# ## References
#
# I referred the following code to get an understanding about how to use LSTMs for stock predictions. But details can be vastly different from the implementation found in the reference.
#
# [1] https://github.com/jaungiers/LSTM-Neural-Network-for-Time-Series-Prediction
| lstm_stock_market_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import sys
import os
from matplotlib import rc
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(os.path.join(sys.path[0], '..'))
import kernels
from regressor import GaussianProcessRegressor
# -
np.random.seed(0)
plt.rcParams.update({'figure.figsize': (20, 10), 'font.size': 16})
rc('font', **{'family': 'serif'})
rc('text', usetex=True)
# # Gaussian Process Regression
# <hr/>
# Very simple use case of the tutorial implementation of GP regression provided in `regressor.py`.
#
# Since writing this I have updated the plotting methods of the GaussianProcessRegressor, so no need to do plots like this manually.
# Ground truth
@np.vectorize
def f(x):
return np.sin(x) * x
# generate some training data
grid = np.linspace(-10, 10, 1000).reshape(-1, 1).astype(np.float64)
train_x = np.linspace(-5, 5, 10).reshape(-1, 1).astype(np.float64)
train_y = f(train_x)
# y-values observed with noise of this amplitude
noise_level = 0.01
plt.plot(train_x, train_y,
marker='+',
linestyle='none',
markersize=15,
label='Training Data')
plt.plot(grid, f(grid), label='Ground Truth')
plt.legend(loc='upper left', ncol=2, fontsize=20)
plt.title('Training Data', fontsize=30);
gp = GaussianProcessRegressor(kernel=kernels.RadialBasisFunction(length_scale=1.))
prior_samples, prior_stds = gp.sample_prior(grid, size=5, return_std=True)
plt.plot(grid, prior_samples, label='Sample from Prior');
ax = plt.gca()
ax.fill_between(x=grid.squeeze(),
y1=0. - 1.96 * prior_stds,
y2=0. + 1.96 * prior_stds,
color='gray',
alpha=0.25,
label=r'95\% Confidence')
plt.grid(alpha=0.1)
plt.title('Draws from the Prior Process', fontsize=30)
plt.legend(bbox_to_anchor=(1, 1), loc='upper left');
gp.fit(train_x, train_y, noise_level=noise_level)
prediction, stds = gp.predict(grid, return_std=True)
post_samples = gp.sample_posterior(grid, size=5)
plt.plot(train_x, train_y,
marker='x',
linestyle='none',
color='k',
markersize=15,
zorder=100,
label='Training Data')
plt.plot(grid, prediction, color='k', label='Prediction')
plt.plot(grid, post_samples, label='Sample from posterior');
plt.fill_between(x=grid.squeeze(),
y1=prediction.squeeze() - 1.96 * stds,
y2=prediction.squeeze() + 1.96 * stds,
color='gray',
alpha=0.25,
label=r'95\% Confidence')
plt.grid(alpha=0.1)
plt.title('Draws from the Posterior Process', fontsize=30)
plt.legend(bbox_to_anchor=(1, 1), loc='upper left');
| examples/GaussianProcessRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## DCGAN Overview
#
# <img src="https://camo.githubusercontent.com/45e147fc9dfcf6a8e5df2c9b985078258b9974e3/68747470733a2f2f63646e2d696d616765732d312e6d656469756d2e636f6d2f6d61782f313030302f312a33394e6e6e695f6e685044614c7539416e544c6f57772e706e67" alt="dcgan" style="width: 1000px;"/>
#
# References:
# - [Unsupervised representation learning with deep convolutional generative adversarial networks](https://arxiv.org/pdf/1511.06434). <NAME>, <NAME>, <NAME>, 2016.
# - [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html). <NAME>, <NAME>. Aistats 9, 249-256
# - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167). <NAME>, <NAME>. 2015.
#
# ## MNIST Dataset Overview
#
# This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 255.
#
# In this example, each image will be converted to float32 and normalized from [0, 255] to [0, 1].
#
# 
#
# More info: http://yann.lecun.com/exdb/mnist/
# +
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.keras import Model, layers
import numpy as np
# +
# MNIST Dataset parameters.
num_features = 784 # data features (img shape: 28*28).
# Training parameters.
lr_generator = 0.0002
lr_discriminator = 0.0002
training_steps = 20000
batch_size = 128
display_step = 500
# Network parameters.
noise_dim = 100 # Noise data points.
# -
# Prepare MNIST data.
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Convert to float32.
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)
# Normalize images value from [0, 255] to [0, 1].
x_train, x_test = x_train / 255., x_test / 255.
# Use tf.data API to shuffle and batch data.
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(10000).batch(batch_size).prefetch(1)
# +
# Create TF Model.
class Generator(Model):
# Set layers.
def __init__(self):
super(Generator, self).__init__()
self.fc1 = layers.Dense(7 * 7 * 128)
self.bn1 = layers.BatchNormalization()
self.conv2tr1 = layers.Conv2DTranspose(64, 5, strides=2, padding='SAME')
self.bn2 = layers.BatchNormalization()
self.conv2tr2 = layers.Conv2DTranspose(1, 5, strides=2, padding='SAME')
# Set forward pass.
def call(self, x, is_training=False):
x = self.fc1(x)
x = self.bn1(x, training=is_training)
x = tf.nn.leaky_relu(x)
# Reshape to a 4-D array of images: (batch, height, width, channels)
# New shape: (batch, 7, 7, 128)
x = tf.reshape(x, shape=[-1, 7, 7, 128])
# Deconvolution, image shape: (batch, 14, 14, 64)
x = self.conv2tr1(x)
x = self.bn2(x, training=is_training)
x = tf.nn.leaky_relu(x)
# Deconvolution, image shape: (batch, 28, 28, 1)
x = self.conv2tr2(x)
x = tf.nn.tanh(x)
return x
# Generator Network
# Input: Noise, Output: Image
# Note that batch normalization has different behavior at training and inference time,
# we then use a placeholder to indicates the layer if we are training or not.
class Discriminator(Model):
# Set layers.
def __init__(self):
super(Discriminator, self).__init__()
self.conv1 = layers.Conv2D(64, 5, strides=2, padding='SAME')
self.bn1 = layers.BatchNormalization()
self.conv2 = layers.Conv2D(128, 5, strides=2, padding='SAME')
self.bn2 = layers.BatchNormalization()
self.flatten = layers.Flatten()
self.fc1 = layers.Dense(1024)
self.bn3 = layers.BatchNormalization()
self.fc2 = layers.Dense(2)
# Set forward pass.
def call(self, x, is_training=False):
x = tf.reshape(x, [-1, 28, 28, 1])
x = self.conv1(x)
x = self.bn1(x, training=is_training)
x = tf.nn.leaky_relu(x)
x = self.conv2(x)
x = self.bn2(x, training=is_training)
x = tf.nn.leaky_relu(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.bn3(x, training=is_training)
x = tf.nn.leaky_relu(x)
return self.fc2(x)
# Build neural network model.
generator = Generator()
discriminator = Discriminator()
# +
# Losses.
def generator_loss(reconstructed_image):
gen_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=reconstructed_image, labels=tf.ones([batch_size], dtype=tf.int32)))
return gen_loss
def discriminator_loss(disc_fake, disc_real):
disc_loss_real = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=disc_real, labels=tf.ones([batch_size], dtype=tf.int32)))
disc_loss_fake = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=disc_fake, labels=tf.zeros([batch_size], dtype=tf.int32)))
return disc_loss_real + disc_loss_fake
# Optimizers.
optimizer_gen = tf.optimizers.Adam(learning_rate=lr_generator)#, beta_1=0.5, beta_2=0.999)
optimizer_disc = tf.optimizers.Adam(learning_rate=lr_discriminator)#, beta_1=0.5, beta_2=0.999)
# -
# Optimization process. Inputs: real image and noise.
def run_optimization(real_images):
# Rescale to [-1, 1], the input range of the discriminator
real_images = real_images * 2. - 1.
# Generate noise.
noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)
with tf.GradientTape() as g:
fake_images = generator(noise, is_training=True)
disc_fake = discriminator(fake_images, is_training=True)
disc_real = discriminator(real_images, is_training=True)
disc_loss = discriminator_loss(disc_fake, disc_real)
# Training Variables for each optimizer
gradients_disc = g.gradient(disc_loss, discriminator.trainable_variables)
optimizer_disc.apply_gradients(zip(gradients_disc, discriminator.trainable_variables))
# Generate noise.
noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)
with tf.GradientTape() as g:
fake_images = generator(noise, is_training=True)
disc_fake = discriminator(fake_images, is_training=True)
gen_loss = generator_loss(disc_fake)
gradients_gen = g.gradient(gen_loss, generator.trainable_variables)
optimizer_gen.apply_gradients(zip(gradients_gen, generator.trainable_variables))
return gen_loss, disc_loss
# Run training for the given number of steps.
for step, (batch_x, _) in enumerate(train_data.take(training_steps + 1)):
if step == 0:
# Generate noise.
noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)
gen_loss = generator_loss(discriminator(generator(noise)))
disc_loss = discriminator_loss(discriminator(batch_x), discriminator(generator(noise)))
print("initial: gen_loss: %f, disc_loss: %f" % (gen_loss, disc_loss))
continue
# Run the optimization.
gen_loss, disc_loss = run_optimization(batch_x)
if step % display_step == 0:
print("step: %i, gen_loss: %f, disc_loss: %f" % (step, gen_loss, disc_loss))
# Visualize predictions.
import matplotlib.pyplot as plt
# +
# Testing
# Generate images from noise, using the generator network.
n = 6
canvas = np.empty((28 * n, 28 * n))
for i in range(n):
# Noise input.
z = np.random.normal(-1., 1., size=[n, noise_dim]).astype(np.float32)
# Generate image from noise.
g = generator(z).numpy()
# Rescale to original [0, 1]
g = (g + 1.) / 2
# Reverse colours for better display
g = -1 * (g - 1)
for j in range(n):
# Draw the generated digits
canvas[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = g[j].reshape([28, 28])
plt.figure(figsize=(n, n))
plt.imshow(canvas, origin="upper", cmap="gray")
plt.show()
| Tutorials/TensorFlow_V2/notebooks/3_NeuralNetworks/dcgan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/SpecificAndLatentHeat/specific-and-latent-heat.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# (Click **Cell** > **Run All** before proceeding.)
# + tags=["hide-input"]
# %matplotlib inline
#----------
#Import modules and packages
import ipywidgets as widgets
import random
import math
import matplotlib.pyplot as plt
from ipywidgets import Output, IntSlider, VBox, HBox, Layout
from IPython.display import clear_output, display, HTML, Javascript, SVG
#----------
#import ipywidgets as widgets
#import random
#This function produces a multiple choice form with four options
def multiple_choice(option_1, option_2, option_3, option_4):
option_list = [option_1, option_2, option_3, option_4]
answer = option_list[0]
letters = ["(A) ", "(B) ", "(C) ", "(D) "]
#Boldface letters at the beginning of each option
start_bold = "\033[1m"; end_bold = "\033[0;0m"
#Randomly shuffle the options
random.shuffle(option_list)
#Print the letters (A) to (D) in sequence with randomly chosen options
for i in range(4):
option_text = option_list.pop()
print(start_bold + letters[i] + end_bold + option_text)
#Store the correct answer
if option_text == answer:
letter_answer = letters[i]
button1 = widgets.Button(description="(A)"); button2 = widgets.Button(description="(B)")
button3 = widgets.Button(description="(C)"); button4 = widgets.Button(description="(D)")
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
container = widgets.HBox(children=[button1,button2,button3,button4])
display(container)
print(" ", end='\r')
def on_button1_clicked(b):
if "(A) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Moccasin'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Lightgray'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
def on_button2_clicked(b):
if "(B) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Moccasin'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Lightgray'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
def on_button3_clicked(b):
if "(C) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Moccasin'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Lightgray'; button4.style.button_color = 'Whitesmoke'
def on_button4_clicked(b):
if "(D) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Moccasin'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Lightgray'
button1.on_click(on_button1_clicked); button2.on_click(on_button2_clicked)
button3.on_click(on_button3_clicked); button4.on_click(on_button4_clicked)
# -
# # Specific and Latent Heat
# ## Introduction
#
# **Heat** is defined as the *transfer of energy* from one object to another due to a difference in their relative temperatures. As heat flows from one object into another, the temperature of either one or both objects changes.
#
# <img src="Images/heat.svg" width="55%"/>
#
# ### Specific Heat Capacity
#
# The amount of heat required to change the temperature of a given material is given by the following equation:
#
# $$Q = m C \Delta T$$
#
# where $Q$ represents heat in joules (J), $m$ represents mass kilograms (kg), and $\Delta T$ represents the change in temperature in Celsius (°C) or kelvin (K). The parameter $C$ is an experimentally determined value characteristic of a particular material. This parameter is called the **specific heat** or **specific heat capacity** (J/kg$\cdot$°C). The specific heat capacity of a material is determined by measuring the amount of heat required to raise the temperature of 1 kg of the material by 1°C. For ordinary temperatures and pressures, the value of $C$ is considered constant. Values for the specific heat capacity of common materials are shown in the table below:
#
# Material | Specific Heat Capacity (J/kg$\cdot$°C)
# --- | ---
# Aluminum | 903
# Brass | 376
# Carbon | 710
# Copper | 385
# Glass | 664
# Ice | 2060
# Iron | 450
# Lead | 130
# Methanol | 2450
# Silver | 235
# Stainless Steal | 460
# Steam | 2020
# Tin | 217
# Water | 4180
# Zinc | 388
# Use the slider below to observe the relationship between the specific heat capacity and the amount of heat required to raise the temperature of a 5 kg mass by 50 °C.
# + tags=["hide-input"]
#import ipywidgets as widgets
#from ipywidgets import Output, VBox, HBox
mass_1 = 5
delta_temperature = 50
specific_heat_capacity = widgets.IntSlider(description="C (J/kg⋅°C)",min=100,max=1000)
#Boldface text between these strings
start_bold = "\033[1m"; end_bold = "\033[0;0m"
def f(specific_heat_capacity):
heat_J = int((mass_1 * specific_heat_capacity * delta_temperature))
heat_kJ = int(heat_J/1000)
print(start_bold + "Heat = (mass) X (specific heat capacity) X (change in temperature)" + end_bold)
print("Heat = ({} X {} X {}) J = {} J or {} kJ".format(mass_1, specific_heat_capacity, delta_temperature, heat_J, heat_kJ))
out1 = widgets.interactive_output(f,{'specific_heat_capacity': specific_heat_capacity,})
HBox([VBox([specific_heat_capacity]), out1])
# -
# **Question:** *As the specific heat increases, the amount of heat required to cause the temperature change:*
# + tags=["hide-input"]
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "Increases"
option_2 = "Decreases"
option_3 = "Remains constant"
option_4 = "Equals zero"
multiple_choice(option_1, option_2, option_3, option_4)
# -
# ### Example
# How many kilojoules (kJ) of heat are needed to raise the temperature of a 3.0 kg piece of aluminum from 10°C to 50°C? Round the answer to 2 significant figures.
# + tags=["hide-input"]
#import ipywidgets as widgets
#from ipywidgets import Output, VBox
#from IPython.display import clear_output, display, HTML
out2 = Output()
button_step1 = widgets.Button(description="Step One", layout=Layout(width='20%', height='100%'), button_style='primary')
count1 = 1
text1_1 = widgets.HTMLMath(value="The first step is to identify all known and unknown variables required to solve the problem. In this case, three variables are known ($m$, $C$, $\Delta T$), and one variable is unknown ($Q$):")
text1_2 = widgets.HTMLMath(value="$m$ = 3.0 kg")
text1_3 = widgets.HTMLMath(value="$\Delta T$ = 50°C $-$ 10°C = 40°C")
text1_4 = widgets.HTMLMath(value="$C$ = 903 J/kg$\cdot$°C (The specific heat capacity for aluminum may be found in the table above.)")
text1_5 = widgets.HTMLMath(value="$Q$ = ?")
def on_button_step1_clicked(b):
global count1
count1 += 1
with out2:
clear_output()
if count1 % 2 == 0:
display(text1_1, text1_2, text1_3, text1_4, text1_5)
display(VBox([button_step1, out2]))
button_step1.on_click(on_button_step1_clicked)
# + tags=["hide-input"]
#import ipywidgets as widgets
#from ipywidgets import Output, VBox
#from IPython.display import clear_output, display, HTML
out3 = Output()
button_step2 = widgets.Button(description="Step Two", layout=Layout(width='20%', height='100%'), button_style='primary')
count2 = 1
text2_1 = widgets.HTMLMath(value="Substitute each known variable into the formula to solve for the unknown variable:")
text2_2 = widgets.HTMLMath(value="$Q = mC\Delta T$")
text2_3 = widgets.HTMLMath(value="$Q$ = (3.0 kg) (903 J/kg$\cdot$°C) (40°C) = 108,360 J")
text2_4 = widgets.HTMLMath(value="$Q$ = 108,360 J")
def on_button_step2_clicked(b):
global count2
count2 += 1
with out3:
clear_output()
if count2 % 2 == 0:
display(text2_1, text2_2, text2_3, text2_4)
display(VBox([button_step2, out3]))
button_step2.on_click(on_button_step2_clicked)
# + tags=["hide-input"]
#import ipywidgets as widgets
#from ipywidgets import Output, VBox
#from IPython.display import clear_output, display, HTML
out4 = Output()
button_step3 = widgets.Button(description="Step Three", layout=Layout(width='20%', height='100%'), button_style='primary')
count3 = 1
text3_1 = widgets.HTMLMath(value="Round the answer to the correct number of significant figures and convert to the correct units (if needed):")
text3_2 = widgets.HTMLMath(value="$Q$ = 108,360 J = 110,000 J or 110 kJ")
text3_3 = widgets.HTMLMath(value="The amount of heat required to increase the temperature of a 3.0 kg piece of aluminum from 10°C to 50°C is 110,000 J or 110 kJ.")
def on_button_step3_clicked(b):
global count3
count3 += 1
with out4:
clear_output()
if count3 % 2 == 0:
display(text3_1, text3_2, text3_3)
display(VBox([button_step3, out4]))
button_step3.on_click(on_button_step3_clicked)
# -
# ### Practice
#
# The heat transfer equation shown above may be rearranged to solve for each variable in the equation. These rearrangements are shown below:
#
# $$Q = mC\Delta T \qquad m = \dfrac{Q}{C \Delta T} \qquad C = \dfrac{Q}{m \Delta T} \qquad \Delta T = \dfrac{Q}{mC}$$
#
# Try the four different practice problems below. Each question will require the use of one or more formula above. Use the *Generate New Question* button to generate additional practice problems.
# + tags=["hide-input"]
#from IPython.display import Javascript, display
#from ipywidgets import widgets
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Randomize variables
mass = round(random.uniform(25.0, 50.0), 1)
temperature_initial = round(random.uniform(15.0, 25.0), 1)
temperature_final = round(random.uniform(55.0, 65.0), 1)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "silver": 235, "stainless steal": 460, "tin": 217, "zinc": 388}
material = random.choice(list(materials.keys()))
#Print question
question = "How much heat is required to raise the temperature of a {} g sample of {} from {}°C to {}°C?".format(mass, material, temperature_initial, temperature_final)
print(question)
#Answer and option calculations
answer = (mass/1000) * materials[material] * (temperature_final - temperature_initial)
#Define range of values for random multiple choices
mini = 100
maxa = 2300
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Round options to the specified number of significant figures
def round_sf(number, significant):
return round(number, significant - len(str(number)))
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(round_sf(int(answer),3)) + " J"
option_2 = str(round_sf(int(choice_list[0]),3)) + " J"
option_3 = str(round_sf(int(choice_list[1]),3)) + " J"
option_4 = str(round_sf(int(choice_list[2]),3)) + " J"
multiple_choice(option_1, option_2, option_3, option_4)
# + tags=["hide-input"]
#import math
#from IPython.display import Javascript, display
#from ipywidgets import widgets
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Randomize variables
heat = random.randint(10, 250)
temperature_initial = round(random.uniform(10.0, 35.0), 1)
temperature_final = round(random.uniform(45.0, 100.0), 1)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "silver": 235, "stainless steal": 460, "tin": 217, "zinc": 388}
material = random.choice(list(materials.keys()))
#Print question
question = "Suppose some {} lost {} kJ of heat as it cooled from {}°C to {}°C. Find the mass. Note: you will need to make the sign of Q negative because heat is flowing out of the material as it cools.".format(material, heat, temperature_final, temperature_initial)
print(question)
#Answer calculation
answer = (-heat*1000) / (materials[material] * (temperature_initial - temperature_final))
#Define range of values for random multiple choices
mini = 100
maxa = 2000
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str('{:.2f}'.format(round(answer,2))) + " kg"
option_2 = str(round(choice_list[0],2)/100) + " kg"
option_3 = str(round(choice_list[1],2)/100) + " kg"
option_4 = str(round(choice_list[2],2)/100) + " kg"
multiple_choice(option_1, option_2, option_3, option_4)
# + tags=["hide-input"]
#from IPython.display import Javascript, display
#from ipywidgets import widgets
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Randomize variables
heat = round(random.uniform(23.00, 26.00),1)
mass = round(random.uniform(1.00, 3.00), 2)
temperature_initial = round(random.uniform(24.0, 25.0), 1)
temperature_final = round(random.uniform(35.0, 36.0), 1)
#Print question
question = "A newly made synthetic material weighing {} kg requires {} kJ to go from {}°C to {}°C (without changing state). What is the specific heat capacity of this new material?".format(mass, heat, temperature_initial, temperature_final)
print(question)
#Answer calculation
answer = (heat*1000) / (mass * (temperature_final - temperature_initial))
#Define range of values for random multiple choices
mini = 990
maxa = 2510
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Round options to the specified number of significant figures
def round_sf(number, significant):
return round(number, significant - len(str(number)))
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(round_sf(int(answer),3)) + " J/(kg°C)"
option_2 = str(round_sf(int(choice_list[0]),3)) + " J/(kg°C)"
option_3 = str(round_sf(int(choice_list[1]),3)) + " J/(kg°C)"
option_4 = str(round_sf(int(choice_list[2]),3)) + " J/(kg°C)"
multiple_choice(option_1, option_2, option_3, option_4)
# + tags=["hide-input"]
#import math
#from IPython.display import Javascript, display
#from ipywidgets import widgets
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "silver": 235, "stainless steal": 460, "tin": 217, "zinc": 388}
material = random.choice(list(materials.keys()))
#Randomize Variables
heat = random.randint(100, 150)
mass = round(random.uniform(1.0, 5.0), 1)
temperature_initial = round(random.uniform(10.0, 30.0), 1)
temperature_final = round(random.uniform(40.0, 60.0), 1)
#Determine question type
question_type = random.randint(1,3)
if question_type == 1:
#Type 1: Finding change in temperature
question = "If {} kg of {} receives {} kJ of heat, determine its change in temperature to one decimal place.".format(mass, material, heat)
print(question)
answer = (heat*1000) / (materials[material] * mass)
elif question_type == 2:
#Type 2: Finding final temperature
question = "If {} kg of {} receives {} kJ of heat, and if the {}'s initial temperature is {}°C, determine its final temperature to one decimal place. Hint: ΔT = final temperature - initial temperature.".format(mass, material, heat, material, temperature_initial)
print(question)
answer = ((heat*1000) / (materials[material] * mass)) + temperature_initial
elif question_type == 3:
#Type 3: Finding initial temperature
question = "If {} kg of {} receives {} kJ of heat, and if the {}'s final temperature is {}°C, determine its initial temperature to one decimal place. Hint: ΔT = final temperature - initial temperature.".format(mass, material, heat, material, temperature_final)
print(question)
answer = temperature_final - ((heat*1000) / (materials[material] * mass))
#Define range of values for random multiple choices
mini = int(answer*100 - 1000)
maxa = int(answer*100 + 1000)
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str((round(answer,1))) + " °C"
option_2 = str(round(choice_list[0]/100,1)) + " °C"
option_3 = str(round(choice_list[1]/100,1)) + " °C"
option_4 = str(round(choice_list[2]/100,1)) + " °C"
multiple_choice(option_1, option_2, option_3, option_4)
# -
# ## Change of Phase
#
# In the previous examples and exercises, the material remained in a constant state while heat was added or taken away. However, the addition or subtraction of heat is often accompanied by a **phase change**. The three most common phases are solid, liquid, and gas:
#
# <img src="Images/phase_change.svg" width="75%"/>
# **Problem:** *Determine the amount of heat required to raise the temperature of a 100 g block of ice from -20°C to steam at 200°C.*
#
# **Attempt:** There are two phase changes in this problem: (1) the melting of ice into water, and (2) the boiling of water into steam. To determine $Q$, let's utilize the heat formula:
#
# $$Q=mC\Delta T$$
#
# To solve this problem, we can split it up into steps that are simple to calculate. For example, we can start by calculating the heat required to warm ice from -20°C to 0°C. Then, we can calculate the heat required to warm water from 0°C to 100°C. Finally, we can calculate the heat required to warm steam from 100°C to 200°C:
#
# $Q_{ice}$ = (0.100 kg) (2060 J/kg$\cdot$°C) (0°C - (-20°C)) = 4120 J
#
# $Q_{water}$ = (0.100 kg) (4180 J/kg$\cdot$°C) (100°C - 0°C) = 41800 J
#
# $Q_{steam}$ = (0.100 kg) (2020 J/kg$\cdot$°C) (200°C - 100°C) = 20200 J
#
# Then, by adding up the heat calculated in each step, the original problem can be solved:
#
# $Q$ = (4120 + 41800 + 20200) J = 66120 J, or 66.1 kJ.
# ### Experiment
#
# Let's conduct an experiment to check the above calculation. We will start with a 100 g sample of ice at -20°C, and then add a constant amount of heat until the entire sample is converted to steam at 200°C. Every minute, we will take the temperature of the sample.
#
# The data from this experiment is shown in the interactive graphs below. The temperature of the material versus time is shown on left. The heat added to the material versus time is shown on the right.
# + tags=["hide-input"]
#import ipywidgets as widgets
#import matplotlib.pyplot as plt
#from ipywidgets import HBox, Output, VBox
#from IPython.display import clear_output
out5 = Output()
play = widgets.Play(interval=500, value=0, min=0, max=25, step=1, description="Press play", disabled=False)
time_slider = widgets.IntSlider(description='Time (min)', value=0, min=0, max=25, continuous_update = False)
widgets.jslink((play, 'value'), (time_slider, 'value'))
#Make lists of x and y values
x_values = list(range(26))
y_values = [-20, -10, 0, 0, 10, 40, 80, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 120, 140, 160, 180, 200]
heat_y = []
increment = 0
for i in range(26):
heat_y.append(increment)
increment += 13.021
#Plot graphs
def plot_graphs(change):
x = change['new']
with out5:
clear_output(wait=True)
temp_x_values = []
temp_y_values = []
graph2y = []
for i in range(x+1):
temp_x_values.append(x_values[i])
temp_y_values.append(y_values[i])
graph2y.append(heat_y[i])
plt.figure(figsize=(15,5))
plt.style.use('seaborn')
plt.rcParams["axes.edgecolor"] = "black"
plt.rcParams["axes.linewidth"] = 0.5
plt.subplot(1,2,1)
plt.ylim(-30, 210)
plt.xlim(-0.5,26)
plt.scatter(temp_x_values, temp_y_values)
plt.ylabel('Temperature (°C)')
plt.xlabel('Time (min)')
plt.subplot(1,2,2)
plt.ylim(-25, 350)
plt.xlim(-2,26)
plt.scatter(temp_x_values, graph2y, color='red')
plt.ylabel('Heat (kJ)')
plt.xlabel('Time (min)')
plt.show()
#Get slider value
time_slider.observe(plot_graphs, 'value')
plot_graphs({'new': time_slider.value})
#Display widget
display(HBox([play, time_slider]))
display(out5)
# -
# **Question**: *Examine the graph on the left. It shows the temperature of the material at each minute. At what temperature(s) does the temperature remain constant for some time?*
# + tags=["hide-input"]
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "0°C and 100°C. We have horizontal lines at those temperatures."
option_2 = "-20°C, 0°C, 100°C, and 200°C."
option_3 = "100°C."
option_4 = "The temperature is never constant."
multiple_choice(option_1, option_2, option_3, option_4)
# -
# **Question:** *Examine the graph on the right. It shows how much heat was required to turn a block of ice at -20°C into steam at 200°C. Does this agree with the value we arrived at from our above calculation (66.1 kJ)?*
# + tags=["hide-input"]
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "Based on the graph, the amount of heat required is around 325 kJ. It does not agree with our calculation."
option_2 = "Based on the graph, the amount of heat required is close enough to our calculation; hence, it does agree."
option_3 = "Both values match perfectly."
option_4 = "The values are close and it is impossible to say if they match perfectly or not."
multiple_choice(option_1, option_2, option_3, option_4)
# -
# **Question**: *Examine the graph on the right. Observe that the slope of the line is constant. What does this imply?*
# + tags=["hide-input"]
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The amount of heat added to the system is constant for the entire 25 min period."
option_2 = "The amount of heat added to the system is not constant, the rate increases throughout the 25 min period."
option_3 = "No heat is added at the start, but around 325 kJ of heat is added at the very end."
option_4 = "As time increases, the amount of heat required decreases."
multiple_choice(option_1, option_2, option_3, option_4)
# -
# ### Experimental Results
#
# Our experimental data indicates that our calculation of 66.1 kJ is incorrect and that it in fact takes around 325 kJ to heat ice from -20°C to steam at 200°C. *So what did we miss?*
#
# **Answer:** The *phase changes*.
#
# The graph on the right shows us that the rate heat was added to the system over the 25 minute period was constant, yet the temperature remained constant at two points for some time (0°C and 100°C). How is this possible? That is, *how can we add heat to a material while its temperature remains constant?*
#
# **Answer:** Every material has two common "critical temperature points". These are the points at which the *state* of the material *changes*. For water, these points are at 0°C and 100°C. If heat is coming into a material *during a phase change*, then this energy is used to overcome the intermolecular forces between the molecules of the material.
#
# Let's consider when ice melts into water at 0°C. Immediately after the molecular bonds in the ice are broken, the molecules are moving (vibrating) at the same average speed as before, and so their average kinetic energy remains the same. *Temperature* is precisely a measure of the average kinetic energy of the particles in a material. Hence, during a phase change, the temperature remains constant.
# ### Latent Heat of Fusion and Vaporization
# The **latent heat of fusion ($H_f$)** is the quantity of heat needed to melt 1 kg of a solid to a liquid without a change in temperature.
#
# <img src="Images/latent_heat_fusion.svg" width="65%"/>
#
# The **latent heat of vaporization ($H_v$)** is the quantity of heat needed to vaporise 1 kg of a liquid to a gas without a change in temperature.
#
# <img src="Images/latent_heat_vaporization.svg" width="65%"/>
#
# The latent heats of fusion and vaporization are empirical characteristics of a particular material. As such, they must be experimentally determined. Values for the latent heats of fusion and vaporization of common materials are shown in the table below:
#
# Materials | Heat of Fusion (J/kg) |Heat of Vaporization (J/kg)
# --- | --- | ---
# Copper | $2.05 \times 10^5$ | $5.07 \times 10^6$
# Gold | $6.03 \times 10^4$ | $1.64 \times 10^6$
# Iron | $2.66 \times 10^5$ | $6.29 \times 10^6$
# Lead | $2.04 \times 10^4$ | $8.64 \times 10^5$
# Mercury | $1.15 \times 10^4$ | $2.72 \times 10^5$
# Methanol | $1.09 \times 10^5$ | $8.78 \times 10^5$
# Silver | $1.04 \times 10^4$ | $2.36 \times 10^6$
# Water (ice) | $3.34 \times 10^5$ | $2.26 \times 10^6$
# The following formulae are used to calculate the amount of heat needed to change a material from a solid to a liquid (fusion), or from a liquid to a gas (vaporization):
#
# $$Q_f = mH_f \qquad Q_v = mH_v$$
#
# ### Example (revisited)
#
# Recall our previous problem:
#
# **Problem:** *Determine the amount of heat required to raise the temperature of a 100 g block of ice from -20°C to steam at 200°C.*
#
# **Solution:** Previously, we split the problem into three steps. It turns out that those steps correctly calculated the heat required to warm ice from -20°C to 0°C, water from 0°C to 100°C. and steam from 100°C to 200°C. What was absent was the latent heat required to complete the phase changes at 0°C and 100°C. Therefore, we need to **add two more steps**, which incorporate the above formulae.
#
# For completion, the previous steps are restated and the entire calculation is shown in **five steps** below (plus a final step to sum up the heats calculated in the previous steps):
# + tags=["hide-input"]
#import ipywidgets as widgets
#from ipywidgets import Output, VBox, HBox
#from IPython.display import clear_output, SVG, HTML, display
out6 = Output()
frame_1 = 1
#Toggle images
def show_steps_1():
global frame_1
I11 = widgets.HTMLMath(value="Step 1: Calculate the heat required to change ice from -20°C to 0°C. Since the temperature changes, we use $Q = mCΔT$.")
Q11 = widgets.HTMLMath(value="$Q_{1}$ = (0.1 kg) (2060 J/kg°C) (0°C - (-20°C)) = 4120 J")
I12 = widgets.HTMLMath(value="Step 2: Calculate the heat required to change ice at 0°C to water at 0°C. Since the temperature does not change as we are at the melting point of water, we use $Q = mH_{f}$.")
Q12 = widgets.HTMLMath(value="$Q_{2}$ = (0.1 kg) (334000 J/kg) = 33400 J")
I13 = widgets.HTMLMath(value="Step 3: Calculate the heat required to change water from 0°C to 100°C. Since the temperature changes, we use $Q = mCΔT$.")
Q13 = widgets.HTMLMath(value="$Q_{3}$ = (0.1 kg) (4180 J/kg°C) (100°C - 0°C) = 41800 J")
I14 = widgets.HTMLMath(value="Step 4: Calculate the heat required to change water at 100°C to steam at 100°C. Since the temperature does not change at we are at the boiling point of water, we use $Q = mH_{v}$.")
Q14 = widgets.HTMLMath(value="$Q_{4}$ = (0.1 kg) (2260000 J/kg) = 226000 J")
I15 = widgets.HTMLMath(value="Step 5: Calculate the heat required to change steam from 100°C to 200°C. Since the temperature changes, we use $Q = mCΔT$.")
Q15 = widgets.HTMLMath(value="$Q_{5}$ = (0.1 kg) (2020 J/kg°C) (200°C - 100°C) = 20200 J")
I16 = widgets.HTMLMath(value="Summary: Calculate total heat by adding up the values calculated in the previous steps. $Q$ = $Q_1$ + $Q_2$ + $Q_3$ + $Q_4$ + $Q_5$")
Q16 = widgets.HTMLMath(value="$Q$ = (4120 + 33400 + 41800 + 226000 + 20200) J = 325520 J or 326 kJ")
if frame_1 == 0:
display(SVG("Images/phase_diagram_1_0.svg"))
frame_1 = 1
elif frame_1 == 1:
display(SVG("Images/phase_diagram_1_1.svg"))
display(I11, Q11)
frame_1 = 2
elif frame_1 == 2:
display(SVG("Images/phase_diagram_1_2.svg"))
display(I11, Q11, I12, Q12)
frame_1 = 3
elif frame_1 == 3:
display(SVG("Images/phase_diagram_1_3.svg"))
display(I11, Q11, I12, Q12, I13, Q13)
frame_1 = 4
elif frame_1 == 4:
display(SVG("Images/phase_diagram_1_4.svg"))
display(I11, Q11, I12, Q12, I13, Q13, I14, Q14)
frame_1 = 5
elif frame_1 == 5:
display(SVG("Images/phase_diagram_1_5.svg"))
display(I11, Q11, I12, Q12, I13, Q13, I14, Q14, I15, Q15)
frame_1 = 6
elif frame_1 == 6:
display(SVG("Images/phase_diagram_1_6.svg"))
display(I11, Q11, I12, Q12, I13, Q13, I14, Q14, I15, Q15, I16, Q16)
frame_1 = 0
button_phase_diagram_1 = widgets.Button(description="Show Next Step", button_style = 'primary')
display(button_phase_diagram_1)
def on_submit_button_phase_diagram_1_clicked(b):
with out6:
clear_output(wait=True)
show_steps_1()
with out6:
display(SVG("Images/phase_diagram_1_0.svg"))
button_phase_diagram_1.on_click(on_submit_button_phase_diagram_1_clicked)
display(out6)
# -
# **Note:** that the *state* of a material can include more than one *phase*. For example, at 0°C, the state of water includes both solid (ice) and liquid (water) phases. At 100°C, the state of water includes both liquid (water) and gas (steam) phases.
#
# It is common to cool down a material (as opposed to heating it up). In this scenario, heat must be taken away. By convention, a negative $Q$ is used to represent heat being taken away from a material (cooling), while a positive $Q$ is used to represent heat being added to a material (warming). Be aware of the sign of $Q$ as it indicates the direction the heat is flowing. For $Q=mH_f$ and $Q=mH_v$, you must be aware of whether heat is being added to or taken away from the material. If heat is being taken away, then a negative sign must be placed in front of $H_f$ and $H_v$.
#
# It is not necessary for each problem to be five steps. A problem could have 1-5 steps depending on the situation. Let's do another example together. An interactive graph is provided to help determine the number of steps required.
# ### Example
#
# How much heat must be removed to change 10.0 g of steam at 120.0°C to water at 50°C? Round to two significant figures.
# + tags=["hide-input"]
#import ipywidgets as widgets
#import matplotlib.pyplot as plt
#from ipywidgets import HBox, Output, VBox
#from IPython.display import clear_output
out7 = Output()
play2 = widgets.Play(interval=500, value=0, min=0, max=25, step=1, description="Press play", disabled=False)
time_slider2 = widgets.IntSlider(description='Time', value=0, min=0, max=20, continuous_update = False)
widgets.jslink((play2, 'value'), (time_slider2, 'value'))
#Make lists of x and y values
x_values2 = list(range(21))
y_values2 = [120, 110, 100, 100, 100, 100, 100, 100, 100, 100, 100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50]
heat_y2 = []
increment2 = 0
for i in range(26):
heat_y2.append(increment2)
increment2 += 13021
#Plot graph
def time_temp(change):
x = change['new']
with out7:
clear_output(wait=True)
temp_x_values2 = []
temp_y_values2 = []
graph2y2 = []
for i in range(x+1):
temp_x_values2.append(x_values2[i])
temp_y_values2.append(y_values2[i])
graph2y2.append(heat_y2[i])
plt.figure(figsize=(7,5))
plt.style.use('seaborn')
plt.rcParams["axes.edgecolor"] = "black"
plt.rcParams["axes.linewidth"] = 0.5
plt.ylim(0, 150)
plt.xlim(-0.5,26)
plt.xticks([])
plt.scatter(temp_x_values2, temp_y_values2)
plt.ylabel('Temperature (°C)')
plt.xlabel('Time')
plt.figtext(0.5, 0.01, "This graph consists of three line-segments. This indicates that we require three steps.", wrap=True, horizontalalignment='center', fontsize=12)
plt.show()
#Get slider value
time_temp({'new': time_slider2.value})
time_slider2.observe(time_temp, 'value')
#Display widget
display(HBox([play2, time_slider2]))
display(out7)
# + tags=["hide-input"]
#import ipywidgets as widgets
#from IPython.display import clear_output, SVG
out8 = widgets.Output()
frame_2 = 1
#Toggle images
def show_steps_2():
global frame_2
I21 = widgets.HTMLMath(value="Step 1: Calculate the heat loss required to change steam from 120°C to 100°C. Since there is no phase change taking place, we use $Q = mCΔT$.")
Q21 = widgets.HTMLMath(value="$Q_{1}$ = (0.01 kg) (2020 J/kg°C) (100°C - 120°C) = -404 J")
I22 = widgets.HTMLMath(value="Step 2: Calculate the heat loss required to change steam at 100°C to water at 100°C. Since a phase change is taking place (condensation), we use $Q = -mH_{v}$.")
Q22 = widgets.HTMLMath(value="$Q_{2}$ = - (0.01 kg) (2260000 J/kg) = -22600 J")
I23 = widgets.HTMLMath(value="Step 3: Calculate the heat loss required to change water from 100°C to 50°C. Since there is no phase change taking place, we use $Q = mCΔT$.")
Q23 = widgets.HTMLMath(value="$Q_{3}$ = (0.01 kg) (4180 J/kg°C) (50°C - 100°C) = -2090 J")
I24 = widgets.HTMLMath(value="Summary: Calculate the total heat loss by adding up the values calculated in the previous steps. $Q$ = $Q_1$ + $Q_2$ + $Q_3$")
Q24 = widgets.HTMLMath(value="$Q$ = (-404 + -22600 + -2090) J = -25000 J or -25 kJ")
if frame_2 == 0:
display(SVG("Images/phase_diagram_2_0.svg"))
frame_2 = 1
elif frame_2 == 1:
display(SVG("Images/phase_diagram_2_1.svg"))
display(I21, Q21)
frame_2 = 2
elif frame_2 == 2:
display(SVG("Images/phase_diagram_2_2.svg"))
display(I21, Q21, I22, Q22)
frame_2 = 3
elif frame_2 == 3:
display(SVG("Images/phase_diagram_2_3.svg"))
display(I21, Q21, I22, Q22, I23, Q23)
frame_2 = 4
elif frame_2 == 4:
display(SVG("Images/phase_diagram_2_4.svg"))
display(I21, Q21, I22, Q22, I23, Q23, I24, Q24)
frame_2 = 0
button_phase_diagram_2 = widgets.Button(description="Show Next Step", button_style = 'primary')
display(button_phase_diagram_2)
def on_submit_button_phase_diagram_2_clicked(b):
with out8:
clear_output(wait=True)
show_steps_2()
with out8:
display(SVG("Images/phase_diagram_2_0.svg"))
button_phase_diagram_2.on_click(on_submit_button_phase_diagram_2_clicked)
display(out8)
# -
# ### Practice
#
# There are many variations that are possible with specific heat and latent heat questions. Use the *Generate New Question* button to generate additional practice problems. These practice problems will vary from one to five steps.
# **One Step Problem**
# + tags=["hide-input"]
#import math
#import random
#from IPython.display import Javascript, display
#from ipywidgets import widgets, Layout
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "lead": 130, "silver": 235, "stainless Steal": 460, "tin": 217, "water": 4180, "zinc": 388}
material = random.choice(list(materials.keys()))
#Randomize variables
mass = round(random.uniform(100.0, 1000.0), 1)
temperature_initial, temperature_final = 0,0
variety1 = random.randint(1,5)
if variety1 == 1:
#Makes certain that initial and final temps are different
while temperature_initial == temperature_final:
temperature_initial = round(random.uniform(-50.0, 0.0), 1)
temperature_final = round(random.uniform(-50.0, 0.0), 1)
question = "How much heat is needed for a {} g block of ice at {}°C to change temperature to {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 2060 * (temperature_final - temperature_initial)
elif variety1 == 2:
while temperature_initial == temperature_final:
temperature_initial = round(random.uniform(0.0, 100.0), 1)
temperature_final = round(random.uniform(0.0, 100.0), 1)
question = "How much heat is needed for {} g of water at {}°C to change temperature to {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 4180 * (temperature_final - temperature_initial)
elif variety1 == 3:
while temperature_initial == temperature_final:
temperature_initial = round(random.uniform(100.0, 150.0), 1)
temperature_final = round(random.uniform(100.0, 150.0), 1)
question = "How much heat is needed for {} g of steam at {}°C to change temperature to {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 2020 * (temperature_final - temperature_initial)
elif variety1 == 4:
temperature_initial = 0
temperature_final = 0
direction_variety = random.randint(1,2)
if direction_variety == 1:
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 334000
elif direction_variety == 2:
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * -334000
elif variety1 == 5:
temperature_initial = 100
temperature_final = 100
direction_variety = random.randint(1,2)
if direction_variety == 1:
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 2260000
elif direction_variety == 2:
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * -2260000
#Define range of values for random multiple choices
mini = -1000
maxa = 1000
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(int(round(answer/1000))) + " kJ"
option_2 = str(round(choice_list[0],1)) + " kJ"
option_3 = str(round(choice_list[1],1)) + " kJ"
option_4 = str(round(-1*choice_list[2],1)) + " kJ"
multiple_choice(option_1, option_2, option_3, option_4)
# -
# **Two Step Problem**
# + tags=["hide-input"]
#import math
#import random
#from IPython.display import Javascript, display
#from ipywidgets import widgets, Layout
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "lead": 130, "silver": 235, "stainless Steal": 460, "tin": 217, "water": 4180, "zinc": 388}
material = random.choice(list(materials.keys()))
#Randomize variables
mass = round(random.uniform(100.0, 1000.0), 1)
temperature_initial, temperature_final = 0,0
variety2 = random.randint(1,4)
if variety2 == 1:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = 0
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final - temperature_initial)) + ((mass/1000) * 334000)
elif direction_variety == 2:
temperature_initial = 0
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final - temperature_initial)) + ((mass/1000) * -334000)
elif variety2 == 2:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = 0
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * 334000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = 0
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * -334000)
elif variety2 == 3:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = 100
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = 100
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * -2260000)
elif variety2 == 4:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = 100
temperature_final = round(random.uniform(101.0, 150.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(temperature_final - temperature_initial)) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(101.0, 150.0), 1)
temperature_final = 100
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(temperature_final - temperature_initial)) + ((mass/1000) * -2260000)
#Define range of values for random multiple choices
mini = -1000
maxa = 1000
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(int(round(answer/1000))) + " kJ"
option_2 = str(round(choice_list[0],1)) + " kJ"
option_3 = str(round(choice_list[1],1)) + " kJ"
option_4 = str(round(-1*choice_list[2],1)) + " kJ"
multiple_choice(option_1, option_2, option_3, option_4)
# -
# **Three Step Problem**
# + tags=["hide-input"]
#import math
#import random
#from IPython.display import Javascript, display
#from ipywidgets import widgets, Layout
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "lead": 130, "silver": 235, "stainless Steal": 460, "tin": 217, "water": 4180, "zinc": 388}
material = random.choice(list(materials.keys()))
#Randomize variables
mass = round(random.uniform(100.0, 1000.0), 1)
temperature_initial, temperature_final = 0,0
variety3 = random.randint(1,2)
if variety3 == 1:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(0 - temperature_initial)) + ((mass/1000)*4180*(temperature_final - 0)) + ((mass/1000) * 334000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final-0)) + ((mass/1000)*4180*(0 - temperature_initial)) + ((mass/1000) * -334000)
elif variety3 == 2:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = round(random.uniform(101.0, 150.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(100 - temperature_initial)) + ((mass/1000)*2020*(temperature_final - 100)) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(101.0, 150.0), 1)
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final-100)) + ((mass/1000)*2020*(100 - temperature_initial)) + ((mass/1000) * -2260000)
#Define range of values for random multiple choices
mini = -1000
maxa = 1000
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(int(round(answer/1000))) + " kJ"
option_2 = str(round(choice_list[0],1)) + " kJ"
option_3 = str(round(choice_list[1],1)) + " kJ"
option_4 = str(round(-1*choice_list[2],1)) + " kJ"
multiple_choice(option_1, option_2, option_3, option_4)
# -
# **Four Step Problem**
# + tags=["hide-input"]
#import math
#import random
#from IPython.display import Javascript, display
#from ipywidgets import widgets, Layout
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "lead": 130, "silver": 235, "stainless Steal": 460, "tin": 217, "water": 4180, "zinc": 388}
material = random.choice(list(materials.keys()))
#Randomize variables
mass = round(random.uniform(100.0, 1000.0), 1)
temperature_initial, temperature_final = 0,0
variety4 = random.randint(1,2)
if variety4 == 1:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = 100
question = "How much heat is needed to change {} g of ice at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(0 - temperature_initial)) + ((mass/1000)*4180*(temperature_final - 0)) + ((mass/1000) * 334000) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = 100
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final-0)) + ((mass/1000)*4180*(0 - temperature_initial)) + ((mass/1000) * -334000) + ((mass/1000) * -2260000)
elif variety4 == 2:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = 0
temperature_final = round(random.uniform(100.0, 150.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(temperature_final - 100)) + ((mass/1000)*4180*(100 - temperature_initial)) + ((mass/1000) * 334000) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(100.0, 150.0), 1)
temperature_final = 0
question = "How much heat is needed to change {} g of steam at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(100-temperature_initial)) + ((mass/1000)*4180*(temperature_final-100)) + ((mass/1000) * -334000) + ((mass/1000) * -2260000)
#Define range of values for random multiple choices
mini = -1000
maxa = 1000
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(int(round(answer/1000))) + " kJ"
option_2 = str(round(choice_list[0],1)) + " kJ"
option_3 = str(round(choice_list[1],1)) + " kJ"
option_4 = str(round(-1*choice_list[2],1)) + " kJ"
multiple_choice(option_1, option_2, option_3, option_4)
# -
# **Five Step Problem**
# + tags=["hide-input"]
#import math
#import random
#from IPython.display import Javascript, display
#from ipywidgets import widgets, Layout
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Dictionary of materials
materials = {"aluminum": 903, "brass": 376, "carbon": 710, "copper": 385, "glass": 664, "iron": 450, "lead": 130, "silver": 235, "stainless Steal": 460, "tin": 217, "water": 4180, "zinc": 388}
chosen_material = random.choice(list(materials.keys()))
#Randomize variables
mass = round(random.uniform(100.0, 1000.0), 1)
temperature_initial, temperature_final = 0,0
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = round(random.uniform(101.0, 150.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(0 - temperature_initial)) + ((mass/1000)*4180*(100 - 0)) + ((mass/1000)*2020*(temperature_final - 100)) + ((mass/1000) * 334000) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(101.0, 150.0), 1)
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(100 - temperature_initial)) + ((mass/1000)*4180*(0 - 100)) + ((mass/1000)*2060*(temperature_final - 0)) + ((mass/1000) * -334000) + ((mass/1000) * -2260000)
#Define range of values for random multiple choices
mini = -1000
maxa = 1000
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(int(round(answer/1000))) + " kJ"
option_2 = str(round(choice_list[0],1)) + " kJ"
option_3 = str(round(choice_list[1],1)) + " kJ"
option_4 = str(round(-1*choice_list[2],1)) + " kJ"
multiple_choice(option_1, option_2, option_3, option_4)
# -
# **Mixed Step Problems**
#
# In the dropdown menus below, select how many steps are required and select the correct amount of heat required for each question.
#
# **Hint:** Have some scrap-paper nearby for the calculations and be sure to sketch a diagram of each scenario to determine how many steps are required.
# + tags=["hide-input"]
#import math
#import random
#from IPython.display import Javascript, display
#from ipywidgets import widgets, Layout
def generate_new_question(ev):
display(Javascript('IPython.notebook.execute_cell()'))
button_generate_question = widgets.Button(description="Generate New Question", layout=Layout(width='20%', height='100%'), button_style='success')
button_generate_question.on_click(generate_new_question)
display(button_generate_question)
#Randomize variables
mass = round(random.uniform(100.0, 1000.0), 1)
temperature_initial, temperature_final = 0,0
#Determine question type
question_type = random.randint(1,5)
if question_type == 1:
#Type 1: One Step
steps = "One Step"
type1_variety = random.randint(1,5)
if type1_variety == 1:
#Makes certain that initial and final temps are different
while temperature_initial == temperature_final:
temperature_initial = round(random.uniform(-50.0, 0.0), 1)
temperature_final = round(random.uniform(-50.0, 0.0), 1)
question = "How much heat is needed for a {} g block of ice at {}°C to change temperature to {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 2060 * (temperature_final - temperature_initial)
elif type1_variety == 2:
while temperature_initial == temperature_final:
temperature_initial = round(random.uniform(0.0, 100.0), 1)
temperature_final = round(random.uniform(0.0, 100.0), 1)
question = "How much heat is needed for {} g of water at {}°C to change temperature to {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 4180 * (temperature_final - temperature_initial)
elif type1_variety == 3:
while temperature_initial == temperature_final:
temperature_initial = round(random.uniform(100.0, 150.0), 1)
temperature_final = round(random.uniform(100.0, 150.0), 1)
question = "How much heat is needed for {} g of steam at {}°C to change temperature to {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 2020 * (temperature_final - temperature_initial)
elif type1_variety == 4:
temperature_initial = 0
temperature_final = 0
direction_variety = random.randint(1,2)
if direction_variety == 1:
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 334000
elif direction_variety == 2:
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * -334000
elif type1_variety == 5:
temperature_initial = 100
temperature_final = 100
direction_variety = random.randint(1,2)
if direction_variety == 1:
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * 2260000
elif direction_variety == 2:
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = (mass/1000) * -2260000
elif question_type == 2:
#Type 2: Two Steps
steps = "Two Steps"
type2_variety = random.randint(1,4)
if type2_variety == 1:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = 0
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final - temperature_initial)) + ((mass/1000) * 334000)
elif direction_variety == 2:
temperature_initial = 0
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final - temperature_initial)) + ((mass/1000) * -334000)
elif type2_variety == 2:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = 0
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * 334000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = 0
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * -334000)
elif type2_variety == 3:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = 100
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = 100
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final - temperature_initial)) + ((mass/1000) * -2260000)
elif type2_variety == 4:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = 100
temperature_final = round(random.uniform(101.0, 150.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(temperature_final - temperature_initial)) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(101.0, 150.0), 1)
temperature_final = 100
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(temperature_final - temperature_initial)) + ((mass/1000) * -2260000)
elif question_type == 3:
#Type 3: Three Steps
steps = "Three Steps"
type3_variety = random.randint(1,2)
if type3_variety == 1:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(0 - temperature_initial)) + ((mass/1000)*4180*(temperature_final - 0)) + ((mass/1000) * 334000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final-0)) + ((mass/1000)*4180*(0 - temperature_initial)) + ((mass/1000) * -334000)
elif type3_variety == 2:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(1.0, 99.0), 1)
temperature_final = round(random.uniform(101.0, 150.0), 1)
question = "How much heat is needed to change {} g of water at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(100 - temperature_initial)) + ((mass/1000)*2020*(temperature_final - 100)) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(101.0, 150.0), 1)
temperature_final = round(random.uniform(1.0, 99.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to water at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*4180*(temperature_final-100)) + ((mass/1000)*2020*(100 - temperature_initial)) + ((mass/1000) * -2260000)
elif question_type == 4:
#Type 4: Four Steps
steps = "Four Steps"
type4_variety = random.randint(1,2)
if type4_variety == 1:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = 100
question = "How much heat is needed to change {} g of ice at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(0 - temperature_initial)) + ((mass/1000)*4180*(temperature_final - 0)) + ((mass/1000) * 334000) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = 100
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(temperature_final-0)) + ((mass/1000)*4180*(0 - temperature_initial)) + ((mass/1000) * -334000) + ((mass/1000) * -2260000)
elif type4_variety == 2:
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = 0
temperature_final = round(random.uniform(100.0, 150.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(temperature_final - 100)) + ((mass/1000)*4180*(100 - temperature_initial)) + ((mass/1000) * 334000) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(100.0, 150.0), 1)
temperature_final = 0
question = "How much heat is needed to change {} g of steam at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(100-temperature_initial)) + ((mass/1000)*4180*(temperature_final-100)) + ((mass/1000) * -334000) + ((mass/1000) * -2260000)
elif question_type == 5:
#Type 5: Five Steps
steps = "Five Steps"
direction_variety = random.randint(1,2)
if direction_variety == 1:
temperature_initial = round(random.uniform(-50.0, -1.0), 1)
temperature_final = round(random.uniform(101.0, 150.0), 1)
question = "How much heat is needed to change {} g of ice at {}°C to steam at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2060*(0 - temperature_initial)) + ((mass/1000)*4180*(100 - 0)) + ((mass/1000)*2020*(temperature_final - 100)) + ((mass/1000) * 334000) + ((mass/1000) * 2260000)
elif direction_variety == 2:
temperature_initial = round(random.uniform(101.0, 150.0), 1)
temperature_final = round(random.uniform(-50.0, -1.0), 1)
question = "How much heat is needed to change {} g of steam at {}°C to ice at {}°C?".format(mass, temperature_initial, temperature_final)
print(question)
answer = ((mass/1000)*2020*(100 - temperature_initial)) + ((mass/1000)*4180*(0 - 100)) + ((mass/1000)*2060*(temperature_final - 0)) + ((mass/1000) * -334000) + ((mass/1000) * -2260000)
#Define range of values for random multiple choices
mini = -1000
maxa = 1000
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(mini,maxa),3)
while choice_list.count(int(answer)) >= 1:
choice_list = random.sample(range(mini,maxa),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(int(round(answer/1000))) + " kJ"
option_2 = str(round(choice_list[0],1)) + " kJ"
option_3 = str(round(choice_list[1],1)) + " kJ"
option_4 = str(round(-1*choice_list[2],1)) + " kJ"
option_list = [option_1, option_2, option_3, option_4]
correct_answer = option_list[0]
#Randomly shuffle the options
random.shuffle(option_list)
#Create dropdown menus
dropdown1_1 = widgets.Dropdown(options={' ':0,'One Step': 1, 'Two Steps': 2, 'Three Steps': 3, 'Four Steps': 4, 'Five Steps': 5}, value=0, description='Steps',)
dropdown1_2 = widgets.Dropdown(options={' ':0,option_list[0]: 1, option_list[1]: 2, option_list[2]: 3, option_list[3]: 4}, value=0, description='Answer',)
#Display menus as 1x2 table
container1_1 = widgets.HBox(children=[dropdown1_1, dropdown1_2])
display(container1_1), print(" ", end='\r')
#Evaluate input
def check_answer_dropdown(b):
answer1_1 = dropdown1_1.label
answer1_2 = dropdown1_2.label
if answer1_1 == steps and answer1_2 == correct_answer:
print("Correct! ", end='\r')
elif answer1_1 != ' ' and answer1_2 != ' ':
print("Try again.", end='\r')
else:
print(" ", end='\r')
dropdown1_1.observe(check_answer_dropdown, names='value')
dropdown1_2.observe(check_answer_dropdown, names='value')
# -
# ## Conclusions
#
# * The **specific heat capacity** of a material is an empirically determined value characteristic of a particular material. It is defined as the amount of heat needed to raise the temperature of 1 kg of the material by 1°C.
# * We use the formula $Q=mc\Delta T$ to calculate the amount of heat required to change the temperature of a material in which there is no change of phase.
# * The **latent heat of fusion** ($H_f$) is defined as the amount of heat needed to melt 1 kg of a solid without a change in temperature.
# * The **latent heat of vaporization** ($H_v$) is define as the amount of heat needed to vaporise 1 kg of a liquid without a change in temperature.
# * We use the formula $Q=mH_f$ to calculate the heat required to change a material from a solid to a liquid, or from a liquid to a solid.
# * We use the formula $Q=mH_v$ to calculate the heat required to change a material from a liquid to a gas, or from a gas to a liquid.
# * If heat is being taken away, then a negative sign must be placed in front of $H_f$ and $H_v$.
# * We use a combination of the above formulae to compute the heat required to change a material from an initial temperature to a final temperature when one (or more) phase changes occur across a range of temperatures.
#
# Images in this notebook represent original artwork.
# + tags=["hide-input"] language="html"
#
# <script>
# function code_toggle() {
# if (code_shown){
# $('div.input').hide('500');
# $('#toggleButton').val('Show Code')
# } else {
# $('div.input').show('500');
# $('#toggleButton').val('Hide Code')
# }
# code_shown = !code_shown
# }
#
# $( document ).ready(function(){
# code_shown=false;
# $('div.input').hide()
# });
# </script>
# <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/jupyter_execute/curriculum-notebooks/Science/SpecificAndLatentHeat/specific-and-latent-heat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/strathsdr_banner.png" align="left">
# # RFSoC QPSK Transceiver
#
# ----
#
# <div class="alert alert-box alert-info">
# Please use Jupyter Labs http://board_ip_address/lab for this notebook.
# </div>
#
# This design is a full QPSK transceiver, which transmits and receives randomly-generated pulse-shaped symbols with full carrier and timing synchronisation. PYNQ is used to visualise the data at both the DAC and ADC side of the RFSoC data converters, as well as visualising various DSP stages throughout the transmit and receive signal path.
#
#
# ## Contents
# * [Introduction](#introduction)
# * [Hardware Setup](#hardware-setup)
# * [Software Setup](#software-setup)
# * [RFSoC QPSK Transceiver](#RFSoC-QPSK-Transceiver)
# * [Inspecting the transmit path](#Inspecting-the-transmit-path)
# * [Inspecting the receive path](#Inspecting-the-receive-path)
# * [Reconfigure the RF Data Path](#Reconfigure-the-RF-Data-Path)
# * [Conclusion](#conclusion)
#
# ## References
# * [Xilinx, Inc, "USP RF Data Converter: LogiCORE IP Product Guide", PG269, v2.3, June 2020](https://www.xilinx.com/support/documentation/ip_documentation/usp_rf_data_converter/v2_3/pg269-rf-data-converter.pdf)
#
# ## Revision History
# * **v1.0** | 02/11/2020 | RFSoC QPSK demonstrator
# * **v1.1** | 23/02/2021 | Reformatted notebook
# ----
# ## Introduction <a class="anchor" id="introduction"></a>
# The RFSoC2x2 development board can be configured as a simple QPSK transceiver. The RFSoC QPSK demonstrator uses the RFSoC's RF Data Converters (RF DCs) to transmit and receive QPSK modulated waveforms. There are setup steps for hardware and software that you must follow.
#
# ### Hardware Setup <a class="anchor" id="hardware-setup"></a>
# Your RFSoC2x2 development board can be configured to host one QPSK transceiver channel. To setup your board for this demonstration, you can connect a channel in loopback as shown in [Figure 1](#fig-1).
#
# The default loopback configuration is connected as follows:
# * Channel 0: DAC2 to ADC2
#
# Use the image below for further guidance.
#
# <a class="anchor" id="fig-1"></a>
# <figure>
# <img src='images/rfsoc2x2_setup.jpg' height='50%' width='50%'/>
# <figcaption><b>Figure 1: RFSoC2x2 development board setup in loopback mode.</b></figcaption>
# </figure>
#
# **Do not** attach an antenna to any SMA interfaces labelled DAC.
#
# <div class="alert alert-box alert-danger">
# <b>Caution:</b>
# In this demonstration, we generate tones using the RFSoC development board. Your device should be setup in loopback mode. You should understand that the RFSoC platform can also transmit RF signals wirelessly. Remember that unlicensed wireless transmission of RF signals may be illegal in your geographical location. Radio signals may also interfere with nearby devices, such as pacemakers and emergency radio equipment. Note that it is also illegal to intercept and decode particular RF signals. If you are unsure, please seek professional support.
# </div>
#
# ### Software Setup <a class="anchor" id="software-setup"></a>
# Start by including the `xrfdc` drivers so we can configure the RF data converters, `ipywidgets` to make interactive controls, `numpy` for numerical analysis, and `rfsoc_qpsk` for the QPSK design.
# +
import xrfdc
import ipywidgets as ipw
import numpy as np
from rfsoc_qpsk.qpsk_overlay import QpskOverlay
# -
# We can now initialise the overlay by downloading the bitstream and executing the drivers.
ol = QpskOverlay()
# For a quick reference of all the things you can do with the QPSK overlay, ask the Python interpreter!
# Pop open a new console (right click here and select "_New Console for Notebook_") and type `ol.plot_group?` to query a method of our new overlay. Tab completion works for discovery too.
#
# ----
# ## RFSoC QPSK Transceiver <a class="anchor" id="RFSoC-QPSK-Transceiver"></a>
# We will now explore three interesting components of the QPSK demonstrator. Initially, the transmit path will be inspected and then the same inspection will also be carried out on the receive path. Finally, we will explore the control capabilities of our design and determine how these properties affect the transmit and receive signals.
#
# ### Inspecting the transmit path <a class="anchor" id="Inspecting-the-transmit-path"></a>
#
# There are 3 main steps in the QPSK transmit IP signal path:
#
# 1. Random symbol generation
# 2. Pulse shaping
# 3. Interpolation
#
# This design "taps off" this path after the first two stages so we can inspect the signals in Jupyter Lab.
# The RF data converter can be reconfigured from Python too - we'll look at that [later](#Reconfigure-the-RF-Data-Path).
#
# 
#
# First we plot our raw QPSK symbols in the time domain.
ol.plot_group(
'tx_symbol', # Plot group's ID
['time-binary'], # List of plot types chosen from:
# ['time','time-binary','frequency','constellation']
ol.qpsk_tx.get_symbols, # Function to grab a buffer of samples
500 # Sample frequency (Hz)
)
# We can stream new samples into this plot using the play/stop buttons. By default the samples are stored in a rolling buffer, so we can keep this running for a while without worrying too much about total memory usage. As you continue to work through this notebook though, you should stop any previous plot streams to keep your browser happy.
#
# For the pulse shaped signal, let's have a look at the frequency domain too. This FFT is accelerated in the PL so we pass in an extra argument, `get_freq_data`, telling the plotting library how to grab the accelerated FFT data.
ol.plot_group('tx_shaped', ['time', 'frequency'], ol.qpsk_tx.get_shaped_time, 4000,
get_freq_data=ol.qpsk_tx.get_shaped_fft)
# ### Inspecting the receive path <a class="anchor" id="Inspecting-the-receive-path"></a>
#
# The receive side is nearly the inverse of the transmit path (there's just some extra work for properly synchronising).
#
# Again, there are taps off from a few places in the signal path:
#
# 1. After decimation
# 2. After coarse synchronisation
# 3. After root-raised-cosine filtering
# 4. and the data output
#
# 
#
# Because there are a few different intermediate stages, let's reuse the same cells to plot any of them on-demand.
#
# First we describe how to generate plots for each of the intermediate taps.
# +
rx_domains = ['time', 'frequency', 'constellation']
plot_rx_decimated = lambda : ol.plot_group(
'rx_decimated', rx_domains, ol.qpsk_rx.get_decimated, 4000
)
plot_rx_coarse_sync = lambda : ol.plot_group(
'rx_coarse_sync', rx_domains, ol.qpsk_rx.get_coarse_synced, 4000
)
plot_rx_rrced = lambda : ol.plot_group(
'rx_rrced', rx_domains, ol.qpsk_rx.get_rrced, 16000
)
# -
# Now we can just execute the function whichever tap you want. For example, let's look at the tap after decimation below.
plot_rx_decimated()
# And for the final plot, let's look at the synchronised output data. To recover the bits we need to take our sampled, synchronised signal (seen in the constellation plot below) and decide which quadrant each symbol has fallen into.
# +
def classify_bits(frame):
bit_quantise = lambda b: 1 if b>0 else 0
symbol_quantise = lambda i, q: bit_quantise(i) + 1j*bit_quantise(q)
return np.fromiter(
map(symbol_quantise, np.real(frame), np.imag(frame)),
dtype=complex
)
ol.plot_group(
'rx_data',
['constellation', 'time-binary'],
lambda : classify_bits(ol.qpsk_rx.get_data()),
500,
get_const_data=ol.qpsk_rx.get_data
)
# -
# Now is a good time to note that Jupyter Lab can manage multiple windows. Next we'll be playing with the RF settings, so you may want to make a new window for the constellation plot and leave it streaming. Make a new window for the plot by right clicking the plot and selecting "_Create New View for Output_". Feel free to snap this new window to the side by clicking the window's title ("Output View") and dragging it to the side of the web page. Now we can play with RF settings further down the notebook while still getting instant feedback about our received signal — pretty neat!
# ### Reconfigure the RF Data Path <a class="anchor" id="Reconfigure-the-RF-Data-Path"></a>
#
# #### Transmit Power
#
# The QPSK bitstream includes a digital attenuator on the transmit path. We can configure this via a memory-mapped register.
#
# Let's use this as an example of interactive reconfiguration because the effects are quite clear in the constellation diagram. Try reducing the output power by setting a gain between 0 (off) and 1 (full scale).
ol.qpsk_tx.set_gain(0.6)
# The constellation plot should shrink in a little towards the origin. Let's return to full power now.
ol.qpsk_tx.set_gain(1)
# We can use some `ipywidgets` to make a more natural interface to control the gain too. Let's expose this as a slider with a callback to the `set_gain` function.
# +
pow_slider = ipw.SelectionSlider(
options=[0.1, 0.3, 0.6, 1],
value=1,
description='',
)
accordion = ipw.Accordion(children=[pow_slider])
accordion.set_title(0, 'Transmitter power')
display(accordion)
def unwrap_slider_val(callback):
return lambda slider_val : callback(slider_val['new'])
pow_slider.observe(unwrap_slider_val(ol.qpsk_tx.set_gain), names='value')
# -
# #### Transmit and Receive Mixer Settings
#
# So far the RF Data Converter settings have been controlled by `QpskOverlay` but we can also reconfigure these on the fly in python with the `xrfdc` driver.
#
# First of all, consider the DAC block used for the transmit side.
#
# 
#
# There's a lot of scope for reconfiguration here — see the [IP product guide](https://www.xilinx.com/support/documentation/ip_documentation/usp_rf_data_converter/v2_1/pg269-rf-data-converter.pdf) or type `ol.dac_block?` for more details.
#
# As an example, let's play with the mixer settings. Try changing the DAC's mixer frequency from the deafult 1000 MHz to 900 MHz.
# +
def update_nco(rf_block, nco_freq):
mixer_cfg = rf_block.MixerSettings
mixer_cfg['Freq'] = nco_freq
rf_block.MixerSettings = mixer_cfg
rf_block.UpdateEvent(xrfdc.EVENT_MIXER)
update_nco(ol.dac_block, 900)
# -
# The received signal should disappear until we configure the receiver's ADC to match the new carrier frequency. Set the new carrier frequency for the ADC side mixer below.
update_nco(ol.adc_block, 900)
# Again, we can use `ipywidgets` to make an interactive interface for these settings. Below we setup an RX and a TX slider and a TX slider that are linked together so we can scrub along the spectrum keeping both sides in near lock-step. If you've got any analog RF filters to hand, try them out with different mixer settings!
# +
def new_nco_slider(title):
return ipw.FloatSlider(
value=1000,
min=620,
max=1220,
step=20,
description=title,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
tx_nco_slider = new_nco_slider('TX (MHz)')
rx_nco_slider = new_nco_slider('RX (MHz)')
accordion = ipw.Accordion(children=[ipw.VBox([tx_nco_slider, rx_nco_slider])])
accordion.set_title(0, 'Carrier frequency')
display(accordion)
ipw.link((rx_nco_slider, 'value'), (tx_nco_slider, 'value'))
tx_nco_slider.observe(
unwrap_slider_val(lambda v: update_nco(ol.dac_block, v)),
names='value'
)
rx_nco_slider.observe(
unwrap_slider_val(lambda v: update_nco(ol.adc_block, v)),
names='value'
)
# -
# ## Conclusion <a class="anchor" id="conclusion"></a>
#
# We've now lead you through how we can interact with the RF data converters from PYNQ, using a QPSK transmit/receive loopback system as an example. More exhaustively, we've shown:
#
# * Use of the programmable logic in the context of a real RF application
# * Performing on-board introspection of an RF design:
# * Leveraging existing plotting libraries from the Python ecosystem
# * Interacting with a QPSK hardware design
# * Configuring the signal path, using transmit power as an example
# * Configuring the RF data converter, using TX/RX mixer frequencies as an example
| boards/RFSoC2x2/rfsoc_qpsk/notebooks/rfsoc_qpsk_demonstrator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## A pure python neural network model of reflex conditioning in animal.
# need to add instructions on
#
# install bleeding edge lasagne & theano
# are we to assume that if they can open the notebook they are good?
# This model uses the following packages:
# * [Lasagne](http://lasagne.readthedocs.io/en/stable/index.html) :: A neural network framework built on Theano
# * [Numpy](http://www.numpy.org/) :: Scientific computing packaging
# * [Pandas](https://pandas.pydata.org/pandas-docs/stable/index.html) :: Data manipulation, munging, formatting library
# * [Theano](http://www.deeplearning.net/software/theano/) :: Deep learning framework
# * [Matplotlib.pyplot](https://matplotlib.org/api/pyplot_api.html) :: Visualization
#
# Begin by importing the necessary packages
import lasagne
import theano
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import theano.tensor as T
# *Note: If a warning DEPRACATED populates, set a USER variable defined as THEANO_FLAGS to an empty string.*
# Define constansts for the model
# ######################## Define Constants #################################
N_CS = 5
N_CONTEXT = 10
N_SAMPLES = 25
N_BATCHES = 250
N_SIMS = 20
output_file = 'output.xlsx'
# The below function builds the dataset or input vector. The dataset is a 2-dimensional binary array consisting of an initial 5 element CS, or *conditioned stimulus* portion and a 10 element context portion. The length of each piece, and thus the dataset, is controlled using constants defined above. The `cs[rand_num][0] = 1.0` portion sets the value of the 1st element of a random vector within the array to 1, this correlates to *stimulus present*.
def build_dataset(n_cs=N_CS, n_context=N_CONTEXT, n_samples=N_SAMPLES):
# build out cs portion of input var
cs = [[0 for i in range(n_cs)] for j in range(n_samples)]
rand_num = np.random.randint(0, high=len(cs))
cs[rand_num][0] = 1.0
# build out context portion of input var
context = [float(np.random.randint(0, high=2)) for i in range(n_context)]
# build input var
input_var = []
for array_item in cs:
input_var.append(array_item + context)
return np.asarray(input_var)
# Next, defining a function to build targets for a loss function within the model. The previously built dataset is used a reference point when building the targets to mitigate any errors while running the loss function. The `cs_index = targets.index(1.)` saves the index of the *stimulus present* vector within the larger dataset array for use in later parts of the application.
def build_targets(input_var):
targets = []
for item in input_var:
if np.any(item[0] == 1.0):
targets.append(1.0)
else:
targets.append(0.0)
cs_index = targets.index( 1.)
return np.asarray(targets), cs_index
# On to building the network...
# I choose to use lasagne library because of its restraint in abstracting away all of the lower level theano functionality. Within the context of the experiment, it was not only important to see the activations and weights at the layer level, but also to be able to grab and manipulate them. Other popular neural net libraries do not provide this acccess out of the box.
# Because the cortical network has 2 different training points, i.e. the *lower layer* weights with the hippocampal hidden layer activations, and the *upper layer* weights with the targets data outlined above; each piece of the cortical network had to built separately. This initial function defines the architecture of the lower cortical network.
# ################## Build Lower Cortical Network #############################
def build_cort_low_net(input_var=None):
l_input = lasagne.layers.InputLayer(shape=(None, 15),
input_var=input_var)
l_output = lasagne.layers.DenseLayer(
l_input,
num_units=40,
W=lasagne.init.Uniform(range=3.0),
nonlinearity=lasagne.nonlinearities.rectify)
return l_output
# This network consists of an **Input Layer** accepting a dataset of shape `(None, 15)` where the *None* parameter indicates the size of that dimension is not fixed. This is followed by a fully connected **Dense Layer** with 40 nodes and a *rectify* activation function. The weights matrix values are initialized as a random number between *-3 and 3* using [init.Uniform](https://lasagne.readthedocs.io/en/latest/modules/init.html#lasagne.init.Uniform) method.
# The upper cortical network is defined with similar layers to the lower cortical network with the following differences.
# 1. The **Input Layer** accepts input with shape `(None, 40`) matching the output of the lower cortical network
# 2. The **Dense Layer** consists of a single node, matching the desired network output
# 3. A *sigmoid* activation function is used.
# ################## Build Upper Cortical Network #############################
def build_cort_up_net(input_var=None):
l_input = lasagne.layers.InputLayer(shape=(None, 40),
input_var=input_var)
l_output = lasagne.layers.DenseLayer(
l_input,
num_units=1,
W=lasagne.init.Uniform(range=3.0),
nonlinearity=lasagne.nonlinearities.sigmoid)
return l_output
# The hippocampal network acts as a separate independent network. It receives the same input vector/dataset as the cortical network, but unlike the cortical model; the hippocampal model acts an autoencoder, intending to recreate the the input as it's output.
# #################### Build Hippocampal Network ############################
def build_hipp_net(input_var=None):
l_input = lasagne.layers.InputLayer(shape=(None, 15),
input_var=input_var)
l_hidden = lasagne.layers.DenseLayer(
l_input,
num_units=8,
W=lasagne.init.Uniform(range=3.0),
nonlinearity=lasagne.nonlinearities.rectify)
l_output = lasagne.layers.DenseLayer(
l_hidden,
num_units=15,
nonlinearity=lasagne.nonlinearities.sigmoid)
return l_hidden, l_output
# Notable differences from the cortical models:
# 1. Since this network is self contained, there exists an input, hidden and output layer.
# 2. The # of nodes in the hidden layers is less the # of elements in the input. This is inentional and serves to *focus* the model on patterns within the connected weights.
# 3. This network returns activations from both the hidden and output layer.
# Now that the model architectures are defined, we can define some helper functions that will run with the larger *run_net* function to produce output/feeder data along the way.
def iter_cort_net(num_batches, forward_func, update_func, data):
# instatiate empty lists that are needed
raw_out_list = []
for batch in range(num_batches):
forward_func(data)
raw_out_value = update_func(data)
raw_out_list.append(raw_out_value)
return raw_out_list
def iter_hipp_net(num_batches, forward_func, update_func, data):
# instatiate empty lists that are needed
raw_output_list = []
raw_hidden_list = []
for batch in range(num_batches):
forward_func(data)
raw_hid_value, raw_out_value = update_func(data)
raw_hidden_list.append(raw_hid_value)
raw_output_list.append(raw_out_value)
return raw_hidden_list, raw_output_list
# The *iter_cort_net* & *iter_hipp_net* functions handle the loop defined by the **N_BATCHES** constant. Each pass through the loop represents a single feedforward and backward propogate action for a single dataset. Since the variables being passed are *Theano shared variables* the values are remembered through the loops. In an ideal world these 2 functions would be combined into a single *iter_nets* function, a possible task in a refactoring round.
def find_us_absent_present(index, out_list):
us_present_list = []
us_absent_list = []
for item in out_list:
us_present_list.append(float(item[index]))
try:
us_absent_list.append(float(item[index + 1]))
except IndexError:
us_absent_list.append(float(item[index - 1]))
return us_present_list, us_absent_list
# The *find_us_absent_present* function takes the output list from either *iter_*_net* function and pulls at the output activations from the vector were the us was present, based on the cs_index variable from the *build_targets* function, and 1 representational vector were the cs was absent. The representational vector is the vector immeditately preceeding the the us present, unless the us present is the first vector, in which case the vector immediately after the us present is selected. This function is only used for the output of the upper cortical network.
def get_hid_abs_value(index, cort_list, hipp_list):
cort_us_absent_list = []
cort_us_present_list = []
hipp_us_present_list = []
hipp_us_absent_list = []
for item in cort_list:
cort_us_present_list.append(list(map(lambda x: abs(x), item[index])))
try:
cort_us_absent_list.append(list(map(lambda x: abs(x), item[index + 1])))
except IndexError:
cort_us_absent_list.append(list(map(lambda x: abs(x), item[index - 1])))
for item in hipp_list:
hipp_us_present_list.append(list(map(lambda x: abs(x), item[index])))
try:
hipp_us_absent_list.append(list(map(lambda x: abs(x), item[index + 1])))
except IndexError:
hipp_us_absent_list.append(list(map(lambda x: abs(x), item[index - 1])))
return cort_us_present_list, cort_us_absent_list, hipp_us_present_list, hipp_us_absent_list
# Since the hidden layer of hippocampal network and the output of the lower cortical network could possibly hae negative values, it is imprtant to obtain absolute values for each activation list prior to additional processing. The *get_hid_abs_value* function pulls the us present & a representative us absent vectors, identical to above, but also converts each value to it's absolute value in the process.
# [Hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) is a measure of difference between identical strings. For our purposes, this measures the difference between the activation values for a vector where the us was present vs a vector where the us was absent. In practicality, it should steadily increase as the model *learns* when to predict the us being present, representing the difference between the activation values of both vector increasing.
def get_hamm_dist(cort_abs_list, cort_pres_list, hipp_abs_list, hipp_pres_list):
c_dist_list = []
h_dist_list = []
for item in range(len(cort_pres_list)):
c_dist = np.absolute(np.subtract(np.asarray(cort_abs_list[item]), np.asarray(cort_pres_list[item])))
c_dist_list.append(np.sum(c_dist))
for item in range(len(hipp_pres_list)):
h_dist = np.absolute(np.subtract(np.asarray(hipp_abs_list[item]), np.asarray(hipp_pres_list[item])))
h_dist_list.append(np.sum(h_dist))
return c_dist_list, h_dist_list
def create_dataframe(cort_us_abs, cort_us_pres, c_dist, h_dist):
final_data = {
'X': cort_us_abs,
'XA': cort_us_pres,
'C-Dist': c_dist,
'H-Dist': h_dist,
}
net_output = pd.DataFrame(final_data, columns=final_data.keys())
return net_output
# In order to leverage export to excel and plot functions, the final output data is dumped into a pandas dataset. If you're using python 3.6, you have the added luxury of order dict out of the box.
def create_output(df_list, filename):
df_concat = pd.concat(df_list)
df_concat_by_index = df_concat.groupby(df_concat.index)
df_final = df_concat_by_index.mean().round(decimals=2)
xl_writer = pd.ExcelWriter(filename)
df_final.to_excel(xl_writer, 'Sheet1')
xl_writer.save()
df_final[['X', 'XA']].plot()
return df_final
# To finish up the helper functions:
# * *find_criterion* identifies threshold levels within the data
# * *run_sims* runs the larger simulations loop in which a single sub set of batches are ran
# * *convert_hipp_hidd_layer* convenrts the shape of the hippocampal hidden layer activations array to match the shape of the lower cortical layer output. This is for training purposes
def find_criterion(df, column, threshold):
try:
crit = df.loc[df[str(column)] >= threshold].index.values[0]
except IndexError:
return 'Criterion not reached'
return crit
def run_sims(num_sims, **kwargs):
df_list = []
for sim in range(num_sims):
df = run_nets(model=kwargs['model'],
targets=kwargs['targets'],
input_var=kwargs['input_var'],
index=kwargs['index'],
count=int(sim))
df_list.append(df)
return df_list
def convert_hipp_hidd_layer(hipp_hidd_list):
return_list = [list(x) * 5 for x in hipp_hidd_list[len(hipp_hidd_list) - 1]]
# print(return_list)
return return_list
# On to the big boy, the actual *run_nets* function
def run_nets(model='i', **kwargs):
model_dict = {
'i': 'intact',
'l': 'lesion',
's': 'scopolamine',
'p': 'physostigmine',
}
# define theano shared variables for both networks
X_data_cort_low = T.matrix('X_data_cort_low')
X_data_cort_up = T.matrix('X_data_cort_up')
X_data_hipp = T.matrix('X_data_hipp')
# create nn models
print('Building networks based on {} model type, {} simulation...'.format(model_dict[str(model)], kwargs['count']))
cort_low_out_layer = build_cort_low_net(input_var=X_data_cort_low)
cort_low_out_formula = lasagne.layers.get_output(cort_low_out_layer)
cort_up_out_layer = build_cort_up_net(input_var=X_data_cort_up)
cort_up_out_formula = lasagne.layers.get_output(cort_up_out_layer)
hipp_hid_layer, hipp_out_layer = build_hipp_net(input_var=X_data_hipp)
hipp_hid_formula, hipp_out_formula = lasagne.layers.get_output([hipp_hid_layer, hipp_out_layer])
# branching point for different models based on model type
if model == 'i':
hipp_loss = lasagne.objectives.squared_error(hipp_out_formula, kwargs['input_var']).mean()
hipp_params = lasagne.layers.get_all_params(hipp_out_layer, trainable=True)
hipp_updates = lasagne.updates.momentum(hipp_loss, hipp_params, learning_rate=0.05, momentum=0.5)
feed_forward_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], allow_input_downcast=True)
back_update_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], updates=hipp_updates, allow_input_downcast=True)
hipp_hidd_list, hipp_out_list = iter_hipp_net(N_BATCHES, feed_forward_hipp, back_update_hipp, kwargs['input_var'])
cort_low_targets = convert_hipp_hidd_layer(hipp_hidd_list)
cort_up_loss = lasagne.objectives.binary_crossentropy(cort_up_out_formula, kwargs['targets'])
cort_up_loss = lasagne.objectives.aggregate(cort_up_loss, mode='mean')
cort_low_loss = lasagne.objectives.squared_error(cort_low_out_formula, cort_low_targets).mean()
cort_up_params = lasagne.layers.get_all_params(cort_up_out_layer, trainable=True)
cort_up_grads = theano.grad(cort_up_loss, wrt=cort_up_params)
cort_up_updates = lasagne.updates.sgd(cort_up_loss, cort_up_params, learning_rate=0.5)
cort_low_params = lasagne.layers.get_all_params(cort_low_out_layer, trainable=True)
cort_low_grads = theano.grad(cort_low_loss, wrt=cort_low_params)
cort_low_updates = lasagne.updates.sgd(cort_low_loss, cort_low_params, learning_rate=0.1)
feed_forward_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, allow_input_downcast=True)
feed_forward_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, allow_input_downcast=True)
back_update_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, updates=cort_low_updates, allow_input_downcast=True)
back_update_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, updates=cort_up_updates, allow_input_downcast=True)
cort_low_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_low, back_update_cort_low, kwargs['input_var'])
cort_up_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_up, back_update_cort_up, cort_low_out_list[len(cort_low_out_list)-1])
cort_us_present_up_out_list, cort_us_absent_up_out_list = find_us_absent_present(kwargs['index'], cort_up_out_list)
cort_us_present_low_out_list, cort_us_absent_low_out_list, hipp_us_present_hid_list, hipp_us_absent_hid_list = get_hid_abs_value(kwargs['index'], cort_low_out_list, hipp_hidd_list)
c_dist, h_dist = get_hamm_dist(cort_us_absent_low_out_list, cort_us_present_low_out_list, hipp_us_absent_hid_list, hipp_us_present_hid_list)
net_output = create_dataframe(cort_us_absent_up_out_list, cort_us_present_up_out_list, c_dist, h_dist)
elif model == 'p':
hipp_loss = lasagne.objectives.squared_error(hipp_out_formula, kwargs['input_var']).mean()
hipp_params = lasagne.layers.get_all_params(hipp_out_layer, trainable=True)
hipp_updates = lasagne.updates.momentum(hipp_loss, hipp_params, learning_rate=1.0, momentum=0.5)
feed_forward_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], allow_input_downcast=True)
back_update_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], updates=hipp_updates, allow_input_downcast=True)
hipp_hidd_list, hipp_out_list = iter_hipp_net(N_BATCHES, feed_forward_hipp, back_update_hipp, kwargs['input_var'])
cort_low_targets = convert_hipp_hidd_layer(hipp_hidd_list)
cort_up_loss = lasagne.objectives.binary_crossentropy(cort_up_out_formula, kwargs['targets'])
cort_up_loss = lasagne.objectives.aggregate(cort_up_loss, mode='mean')
cort_low_loss = lasagne.objectives.squared_error(cort_low_out_formula, cort_low_targets).mean()
cort_up_params = lasagne.layers.get_all_params(cort_up_out_layer, trainable=True)
cort_up_grads = theano.grad(cort_up_loss, wrt=cort_up_params)
cort_up_updates = lasagne.updates.sgd(cort_up_loss, cort_up_params, learning_rate=0.5)
cort_low_params = lasagne.layers.get_all_params(cort_low_out_layer, trainable=True)
cort_low_grads = theano.grad(cort_low_loss, wrt=cort_low_params)
cort_low_updates = lasagne.updates.sgd(cort_low_loss, cort_low_params, learning_rate=0.1)
feed_forward_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, allow_input_downcast=True)
feed_forward_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, allow_input_downcast=True)
back_update_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, updates=cort_low_updates, allow_input_downcast=True)
back_update_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, updates=cort_up_updates, allow_input_downcast=True)
cort_low_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_low, back_update_cort_low, kwargs['input_var'])
cort_up_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_up, back_update_cort_up, cort_low_out_list[len(cort_low_out_list)-1])
cort_us_present_up_out_list, cort_us_absent_up_out_list = find_us_absent_present(kwargs['index'], cort_up_out_list)
cort_us_present_low_out_list, cort_us_absent_low_out_list, hipp_us_present_hid_list, hipp_us_absent_hid_list = get_hid_abs_value(kwargs['index'], cort_low_out_list, hipp_hidd_list)
c_dist, h_dist = get_hamm_dist(cort_us_absent_low_out_list, cort_us_present_low_out_list, hipp_us_absent_hid_list, hipp_us_present_hid_list)
net_output = create_dataframe(cort_us_absent_up_out_list, cort_us_present_up_out_list, c_dist, h_dist)
elif model == 's':
hipp_loss = lasagne.objectives.squared_error(hipp_out_formula, kwargs['input_var']).mean()
hipp_params = lasagne.layers.get_all_params(hipp_out_layer, trainable=True)
hipp_updates = lasagne.updates.momentum(hipp_loss, hipp_params, learning_rate=0.001, momentum=0.5)
feed_forward_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], allow_input_downcast=True)
back_update_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], updates=hipp_updates, allow_input_downcast=True)
hipp_hidd_list, hipp_out_list = iter_hipp_net(N_BATCHES, feed_forward_hipp, back_update_hipp, kwargs['input_var'])
cort_low_targets = convert_hipp_hidd_layer(hipp_hidd_list)
cort_up_loss = lasagne.objectives.binary_crossentropy(cort_up_out_formula, kwargs['targets'])
cort_up_loss = lasagne.objectives.aggregate(cort_up_loss, mode='mean')
cort_low_loss = lasagne.objectives.squared_error(cort_low_out_formula, cort_low_targets).mean()
cort_up_params = lasagne.layers.get_all_params(cort_up_out_layer, trainable=True)
cort_up_grads = theano.grad(cort_up_loss, wrt=cort_up_params)
cort_up_updates = lasagne.updates.sgd(cort_up_loss, cort_up_params, learning_rate=0.5)
cort_low_params = lasagne.layers.get_all_params(cort_low_out_layer, trainable=True)
cort_low_grads = theano.grad(cort_low_loss, wrt=cort_low_params)
cort_low_updates = lasagne.updates.sgd(cort_low_loss, cort_low_params, learning_rate=0.1)
feed_forward_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, allow_input_downcast=True)
feed_forward_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, allow_input_downcast=True)
back_update_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, updates=cort_low_updates, allow_input_downcast=True)
back_update_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, updates=cort_up_updates, allow_input_downcast=True)
cort_low_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_low, back_update_cort_low, kwargs['input_var'])
cort_up_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_up, back_update_cort_up, cort_low_out_list[len(cort_low_out_list)-1])
cort_us_present_up_out_list, cort_us_absent_up_out_list = find_us_absent_present(kwargs['index'], cort_up_out_list)
cort_us_present_low_out_list, cort_us_absent_low_out_list, hipp_us_present_hid_list, hipp_us_absent_hid_list = get_hid_abs_value(kwargs['index'], cort_low_out_list, hipp_hidd_list)
c_dist, h_dist = get_hamm_dist(cort_us_absent_low_out_list, cort_us_present_low_out_list, hipp_us_absent_hid_list, hipp_us_present_hid_list)
net_output = create_dataframe(cort_us_absent_up_out_list, cort_us_present_up_out_list, c_dist, h_dist)
else:
cort_low_out_layer.params[cort_low_out_layer.W].remove('trainable')
cort_low_out_layer.params[cort_low_out_layer.b].remove('trainable')
hipp_loss = lasagne.objectives.squared_error(hipp_out_formula, kwargs['input_var']).mean()
hipp_params = lasagne.layers.get_all_params(hipp_out_layer, trainable=True)
hipp_updates = lasagne.updates.momentum(hipp_loss, hipp_params, learning_rate=0.05, momentum=0.5)
feed_forward_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], allow_input_downcast=True)
back_update_hipp = theano.function([X_data_hipp], [hipp_hid_formula, hipp_out_formula], updates=hipp_updates, allow_input_downcast=True)
hipp_hidd_list, hipp_out_list = iter_hipp_net(N_BATCHES, feed_forward_hipp, back_update_hipp, kwargs['input_var'])
cort_low_targets = convert_hipp_hidd_layer(hipp_hidd_list)
cort_up_loss = lasagne.objectives.binary_crossentropy(cort_up_out_formula, kwargs['targets'])
cort_up_loss = lasagne.objectives.aggregate(cort_up_loss, mode='mean')
cort_low_loss = lasagne.objectives.squared_error(cort_low_out_formula, cort_low_targets).mean()
cort_up_params = lasagne.layers.get_all_params(cort_up_out_layer, trainable=True)
cort_up_grads = theano.grad(cort_up_loss, wrt=cort_up_params)
cort_up_updates = lasagne.updates.sgd(cort_up_loss, cort_up_params, learning_rate=0.5)
cort_low_params = lasagne.layers.get_all_params(cort_low_out_layer, trainable=True)
cort_low_grads = theano.grad(cort_low_loss, wrt=cort_low_params)
cort_low_updates = lasagne.updates.sgd(cort_low_loss, cort_low_params, learning_rate=0.1)
feed_forward_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, allow_input_downcast=True)
feed_forward_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, allow_input_downcast=True)
back_update_cort_low = theano.function([X_data_cort_low], cort_low_out_formula, updates=cort_low_updates, allow_input_downcast=True)
back_update_cort_up = theano.function([X_data_cort_up], cort_up_out_formula, updates=cort_up_updates, allow_input_downcast=True)
cort_low_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_low, back_update_cort_low, kwargs['input_var'])
cort_up_out_list = iter_cort_net(N_BATCHES, feed_forward_cort_up, back_update_cort_up, cort_low_out_list[len(cort_low_out_list)-1])
cort_us_present_up_out_list, cort_us_absent_up_out_list = find_us_absent_present(kwargs['index'], cort_up_out_list)
cort_us_present_low_out_list, cort_us_absent_low_out_list, hipp_us_present_hid_list, hipp_us_absent_hid_list = get_hid_abs_value(kwargs['index'], cort_low_out_list, hipp_hidd_list)
c_dist, h_dist = get_hamm_dist(cort_us_absent_low_out_list, cort_us_present_low_out_list, hipp_us_absent_hid_list, hipp_us_present_hid_list)
net_output = create_dataframe(cort_us_absent_up_out_list, cort_us_present_up_out_list, c_dist, h_dist)
return net_output
# This is the meat of the application and houses the actual training and prediction side of the model. Lets deconstruct the function:
# And finally to run some functions
# First, grab user input for which model to run
user_response = input('Select model type: intact(i), lesion(l), phystogimine(p), scopolomine(s): ')
# Create & view the dataset:
input_var = build_dataset()
print(input_var)
# Build & view the targets, note the location of the *1* in the targets matches that of the dataset
targets, cs_index = build_targets(input_var)
print(targets)
# And finally...running the network! Depending on your hardware, this can take up to 10 minutes; I warned you...
df_list = run_sims(N_SIMS,
model=user_response,
targets=targets,
input_var=input_var,
index=cs_index)
# Create the output
df_final = create_output(df_list, output_file)
# Show the plot inline
# %matplotlib inline
| CH_model.as.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Importing the libraries
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score,recall_score, precision_score, f1_score
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, average_precision_score
# -
# # Load and Explore Data
dataset=pd.read_csv('weatherAUS.csv')
dataset.head()
dataset.describe()
# find categorical variables
categorical = [var for var in dataset.columns if dataset[var].dtype=='O']
print('There are {} categorical variables : \n'.format(len(categorical)), categorical)
# view the categorical variables
dataset[categorical].head()
# +
# check and print categorical variables containing missing values
nullCategorical = [var for var in categorical if dataset[var].isnull().sum()!=0]
print(dataset[nullCategorical].isnull().sum())
# -
# Number of labels: cardinality
#
# The number of labels within a categorical variable is known as cardinality. A high number of labels within a variable is known as high cardinality. High cardinality may pose some serious problems in the machine learning model. So, I will check for high cardinality.
# check for cardinality in categorical variables
for var in categorical:
print(var, ' contains ', len(dataset[var].unique()), ' labels')
# +
# Feature Extraction
# -
dataset['Date'].dtypes
# parse the dates, currently coded as strings, into datetime format
dataset['Date'] = pd.to_datetime(dataset['Date'])
dataset['Date'].dtypes
# extract year from date
dataset['Year'] = dataset['Date'].dt.year
# extract month from date
dataset['Month'] = dataset['Date'].dt.month
# extract day from date
dataset['Day'] = dataset['Date'].dt.day
dataset.info()
# drop the original Date variable
dataset.drop('Date', axis=1, inplace = True)
dataset.head()
# ## Explore Categorical Variables
# Explore Location variable
dataset.Location.unique()
# check frequency distribution of values in Location variable
dataset.Location.value_counts()
# let's do One Hot Encoding of Location variable
# get k-1 dummy variables after One Hot Encoding
pd.get_dummies(dataset.Location, drop_first=True).head()
# Explore WindGustDir variable
dataset.WindGustDir.unique()
# check frequency distribution of values in WindGustDir variable
dataset.WindGustDir.value_counts()
# let's do One Hot Encoding of WindGustDir variable
# get k-1 dummy variables after One Hot Encoding
# also add an additional dummy variable to indicate there was missing data
pd.get_dummies(dataset.WindGustDir, drop_first=True, dummy_na=True).head()
# sum the number of 1s per boolean variable over the rows of the dataset --> it will tell us how many observations we have for each category
pd.get_dummies(dataset.WindGustDir, drop_first=True, dummy_na=True).sum(axis=0)
# Explore WindDir9am variable
dataset.WindDir9am.unique()
dataset.WindDir9am.value_counts()
pd.get_dummies(dataset.WindDir9am, drop_first=True, dummy_na=True).head()
# +
# sum the number of 1s per boolean variable over the rows of the dataset -- it will tell us how many observations we have for each category
pd.get_dummies(dataset.WindDir9am, drop_first=True, dummy_na=True).sum(axis=0)
# -
# Explore WindDir3pm variable
dataset['WindDir3pm'].unique()
dataset['WindDir3pm'].value_counts()
pd.get_dummies(dataset.WindDir3pm, drop_first=True, dummy_na=True).head()
pd.get_dummies(dataset.WindDir3pm, drop_first=True, dummy_na=True).sum(axis=0)
# Explore RainToday variable
dataset['RainToday'].unique()
dataset.RainToday.value_counts()
pd.get_dummies(dataset.RainToday, drop_first=True, dummy_na=True).head()
pd.get_dummies(dataset.RainToday, drop_first=True, dummy_na=True).sum(axis=0)
# ## Explore Numerical Variables
# find numerical variables
numerical = [var for var in dataset.columns if dataset[var].dtype!='O']
print('There are {} numerical variables : \n'.format(len(numerical)), numerical)
# view the numerical variables
dataset[numerical].head()
# check missing values in numerical variables
dataset[numerical].isnull().sum()
# view summary statistics in numerical variables to check for outliers
print(round(dataset[numerical].describe()),2)
# +
# plot box plot to check outliers
plt.figure(figsize=(10,15))
plt.subplot(2, 2, 1)
fig = sns.boxplot(y=dataset['Rainfall'])
fig.set_ylabel('Rainfall')
plt.subplot(2, 2, 2)
fig = sns.boxplot(y=dataset["Evaporation"])
fig.set_ylabel('Evaporation')
plt.subplot(2, 2, 3)
fig = sns.boxplot(y=dataset['WindSpeed9am'])
fig.set_ylabel('WindSpeed9am')
plt.subplot(2, 2, 4)
fig = sns.boxplot(y=dataset['WindSpeed3pm'])
fig.set_ylabel('WindSpeed3pm')
# +
# plot histogram to check distribution
plt.figure(figsize=(10,15))
plt.subplot(2, 2, 1)
fig = dataset.Rainfall.hist(bins=10)
fig.set_xlabel('Rainfall')
fig.set_ylabel('RainTomorrow')
plt.subplot(2, 2, 2)
fig = dataset.Evaporation.hist(bins=10)
fig.set_xlabel('Evaporation')
fig.set_ylabel('RainTomorrow')
plt.subplot(2, 2, 3)
fig = dataset.WindSpeed9am.hist(bins=10)
fig.set_xlabel('WindSpeed9am')
fig.set_ylabel('RainTomorrow')
plt.subplot(2, 2, 4)
fig = dataset.WindSpeed3pm.hist(bins=10)
fig.set_xlabel('WindSpeed3pm')
fig.set_ylabel('RainTomorrow')
# +
# find outliers for Rainfall variable
IQR = dataset.Rainfall.quantile(0.75) - dataset.Rainfall.quantile(0.25)
Rainfall_Lower_fence = dataset.Rainfall.quantile(0.25) - (IQR * 3)
Rainfall_Upper_fence = dataset.Rainfall.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Rainfall_Lower_fence, upperboundary=Rainfall_Upper_fence))
# -
print('Number of outliers are {}'. format(dataset[(dataset.Rainfall> Rainfall_Upper_fence) | (dataset.Rainfall< Rainfall_Lower_fence)]['Rainfall'].count()))
# +
# find outliers for Evaporation variable
IQR = dataset.Evaporation.quantile(0.75) - dataset.Evaporation.quantile(0.25)
Evaporation_Lower_fence = dataset.Evaporation.quantile(0.25) - (IQR * 3)
Evaporation_Upper_fence = dataset.Evaporation.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=Evaporation_Lower_fence, upperboundary=Evaporation_Upper_fence))
# -
print('Number of outliers are {}'. format(dataset[(dataset.Evaporation> Evaporation_Upper_fence) | (dataset.Evaporation< Evaporation_Lower_fence)]['Evaporation'].count()))
# +
# find outliers for WindSpeed9am variable
IQR = dataset.WindSpeed9am.quantile(0.75) - dataset.WindSpeed9am.quantile(0.25)
WindSpeed9am_Lower_fence = dataset.WindSpeed9am.quantile(0.25) - (IQR * 3)
WindSpeed9am_Upper_fence = dataset.WindSpeed9am.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=WindSpeed9am_Lower_fence, upperboundary=WindSpeed9am_Upper_fence))
# -
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed9am> WindSpeed9am_Upper_fence) | (dataset.WindSpeed9am< WindSpeed9am_Lower_fence)]['WindSpeed9am'].count()))
# +
# find outliers for WindSpeed3pm variable
IQR = dataset.WindSpeed3pm.quantile(0.75) - dataset.WindSpeed3pm.quantile(0.25)
WindSpeed3pm_Lower_fence = dataset.WindSpeed3pm.quantile(0.25) - (IQR * 3)
WindSpeed3pm_Upper_fence = dataset.WindSpeed3pm.quantile(0.75) + (IQR * 3)
print('Outliers are values < {lowerboundary} or > {upperboundary}'.format(lowerboundary=WindSpeed3pm_Lower_fence, upperboundary=WindSpeed3pm_Upper_fence))
# -
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed3pm> WindSpeed3pm_Lower_fence) | (dataset.WindSpeed3pm< WindSpeed3pm_Upper_fence)]['WindSpeed3pm'].count()))
# +
def max_value(dataset, variable, top):
return np.where(dataset[variable]>top, top, dataset[variable])
dataset['Rainfall'] = max_value(dataset, 'Rainfall', Rainfall_Upper_fence)
dataset['Evaporation'] = max_value(dataset, 'Evaporation', Evaporation_Upper_fence)
dataset['WindSpeed9am'] = max_value(dataset, 'WindSpeed9am', WindSpeed9am_Upper_fence)
dataset['WindSpeed3pm'] = max_value(dataset, 'WindSpeed3pm', 57)
# -
print('Number of outliers are {}'. format(dataset[(dataset.Rainfall> Rainfall_Upper_fence) | (dataset.Rainfall< Rainfall_Lower_fence)]['Rainfall'].count()))
print('Number of outliers are {}'. format(dataset[(dataset.Evaporation> Evaporation_Upper_fence) | (dataset.Evaporation< Evaporation_Lower_fence)]['Evaporation'].count()))
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed9am> WindSpeed9am_Upper_fence) | (dataset.WindSpeed9am< WindSpeed9am_Lower_fence)]['WindSpeed9am'].count()))
print('Number of outliers are {}'. format(dataset[(dataset.WindSpeed3pm> WindSpeed3pm_Lower_fence) | (dataset.WindSpeed3pm< WindSpeed3pm_Upper_fence)]['WindSpeed3pm'].count()))
# +
# Replace NaN with default values
# -
nullValues = [var for var in dataset.columns if dataset[var].isnull().sum()!=0]
print(dataset[nullValues].isnull().sum())
categorical = [var for var in nullValues if dataset[var].dtype=='O']
from sklearn.impute import SimpleImputer
categoricalImputer = SimpleImputer(missing_values=np.nan,strategy='constant')
categoricalImputer.fit(dataset[categorical])
dataset[categorical]=categoricalImputer.transform(dataset[categorical])
print(dataset.head())
numerical = [var for var in dataset.columns if dataset[var].dtype!='O']
from sklearn.impute import SimpleImputer
numericalImputer = SimpleImputer(missing_values=np.nan,strategy='mean')
numericalImputer.fit(dataset[numerical])
dataset[numerical]=numericalImputer.transform(dataset[numerical])
print(dataset.head())
# # Split data for model
x = dataset.drop(['RainTomorrow'], axis=1) # get all row data expect RainTomorrow
y = dataset['RainTomorrow'] # get the RainTomorrow column depentant variable data for all rows
print(x.head())
print(y[:10])
# # Encoding categorical data
# +
#encoding independent variable
# -
x = pd.get_dummies(x)
print(x.head())
# +
## Encoding dependent variable
# -
# use LabelEncoder to replace purchased (dependent variable) with 0 and 1
from sklearn.preprocessing import LabelEncoder
y= LabelEncoder().fit_transform(y)
print(y[:10])
# # Splitting the dataset into training and test set
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state = 0) # func returns train and test data. It takes dataset and then split size test_size =0.3 means 30% data is for test and rest for training and random_state
print(x_train.head())
print(x_test.head())
print(y_train[:10])
print(y_test[:10])
# # Feature scaling
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train= scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
print(x_train[:10,:])
print(x_test[:10,:])
# # Build Model
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(solver='liblinear', random_state=0)
classifier.fit(x_train,y_train)
#predicting the test set results
y_pred = classifier.predict(x_test)
# # Evaluate Model
cm = confusion_matrix(y_test,y_pred)
print(cm)
cr = classification_report(y_test,y_pred)
print(cr)
accuracy_score(y_test,y_pred)
average_precision= average_precision_score(y_test,y_pred)
print(average_precision)
recall_score(y_test,y_pred)
precision_score(y_test,y_pred)
f1_score(y_test,y_pred)
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
disp = plot_precision_recall_curve(classifier, x_test, y_test)
disp.ax_.set_title('2-class Precision-Recall curve: '
'AP={0:0.2f}'.format(average_precision))
| Logistic Regression Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gamesMum/Leukemia-Diagnostics/blob/master/Leukemia_Diagnosis_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="28X5lrEJcNp9" colab_type="text"
# # Leukemia Diagnostic Model
#
# **Classification of Acute Leukemia using Pretrained Deep Convolutional Neural Networks**
# Based on the implementation in the paper:
#
# [**Human-level recognition of blast cells in acute myeloid
# leukemia with convolutional neural networks**](https://www.biorxiv.org/content/10.1101/564039v1.full.pdf)
#
# **The Dataset used in this implementation:**
#
#
# - AML dataset
# -The number of classes are 10:
# Abbreviations of morphological classes used in folder structure and annotation file
# BAS Basophil
# EBO Erythroblast (and Monoblast)
# EOS Eosinophil
# KSC Smudge cell
# LYT Lymphocyte (typical and atypical)
# MON Monocyte
# MYO Myeloblast (Metamyelocyte and Myelocyte)
# NGB Neutrophil (band)
# NGS Neutrophil (segmented)
# PMO Promyelocyte (bilobled and not)
#
# UNC Image that could not be assigned a class during re-annotation
# nan no re-annotation
#
# - link to the dataset https://www.kaggle.com/lsaa2014/single-cell-morphological-dataset-of-leukocytes
#
# + [markdown] id="eOblMJaAcNp_" colab_type="text"
# # **Materials and Methods**
#
# Please read the paper for more and detailed information.
#
# - peripheral blood smears were selected from 100 patients diagnosed with different subtypes
# of AML at the Laboratory of Leukemia Diagnostics at Munich University Hospital between 2014 and 2017, and smears from 100 patients found to exhibit no morphological
# features of hematological malignancies in the same time frame.
#
# - The resulting digitised data consisted of multiresolution pyramidal images of a size of approximately 1 GB per scanned area of interest.
# A trained examiner experienced in routine cytomorphological diagnostics at Munich University Hospital differentiated physiological and pathological leukocyte types contained
# in the microscopic scans into the classification scheme (see fig 2B),
# which is derived from standard morphological categories and was refined to take into account subcategories relevant for the morphological classification of AML, such as bilobed Promyelocytes, which are typical of the FAB subtype M3v.
# - Annotation was carried out on a
# single-cell basis, and approximately 100 cells were differentiated in each smear
# - Subimage patches of size 400 x 400 pixels (corresponding to approximately 29µm x 29µm)
# around the annotated cells were extracted without further cropping or filtering, including
# background components such as erythrocytes, platelets or cell fragments.
# - When examining the screened blood smears, the cytologist followed the routine clinical procedure.
# Overall, 18,365 single-cell images were annotated and cut out of the scan regions.
#
# - Annotations of single-cell images provide the ground truth for training and evaluation
# of our network.
#
# - Morphological classes containing fewer than 10 images were merged with
# neighbouring classes of the taxonomy.
#
# - A subset of 1,905 single-cell images from all morphological categories were presented to a second, independent examiner, and annotated
# for a second time in order to estimate inter-rater variability
#
# **For Implementation:**
# - The network was adopted to input image dimensions of 400 x 400 x 3
# - No further cropping or filtering.
# - Retained the cardinality hyper-parameter at C = 32.
# - The final dense layer adapted to our 10-category classification scheme.
# - Annotations of single-cell images provide the ground truth for training and evaluation of the network
# - There are 10 classes for training and evaluation.
# - For our image classification task, we used a Alexnet
# - The network was trained for at least 20 epochs, which took a computing time of approximately 4 days on a Nvidia GeForce GTX TITAN X GPU.
# - The test group contains 20%, and the validation group 20% of the images
# - Random rotational transformations of 0−359 degrees, as well as random horizontal and vertical flips to the single-cell images in the dataset.
# - In the end the data set was augmented in such a way that each class contained approximately 10,000 images for training. (This is from the origional paper that had 15 classes, but this number increased in this implementation since some of the classes were combined)
# - You'll see that we implemented a technique to choose and cycle between different learning rate values. This implementation is based on the paper by <NAME> here: https://arxiv.org/abs/1506.01186 and with the help of this great article by <NAME>
# https://towardsdatascience.com/adaptive-and-cyclical-learning-rates-using-pytorch-2bf904d18dee. More details found as you go along
#
#
#
#
#
#
#
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="7vft72MzcNqA" colab_type="code" colab={}
#inporting the necessary libraries
import numpy as np
import torch
from torch import nn
from torchvision import transforms, datasets, models
import math
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision as tv
# + id="zpb7PaokcNqG" colab_type="code" colab={} outputId="165433f6-c533-472c-cfde-04de784bccff"
#check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
print("CUDA is available. Training on GPU!")
else:
print("CUDA is not available. Training on CPU.")
# + id="Xo664n7-cNqL" colab_type="code" colab={}
#time to prepare the data
batch_size = 32
test_size = 0.20
valid_size = 0.20
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
#define the transforms
train_transform = transforms.Compose([
transforms.Resize((400,400)),
transforms.RandomRotation(359),
transforms.RandomHorizontalFlip(0.2),
transforms.RandomVerticalFlip(0.2),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
train_data = datasets.ImageFolder("/kaggle/input/single-cell-morphological-dataset-of-leukocytes/blood_smear_images_for_aml_diagnosis_MOD/AML-Cytomorphology_LMU_MOD",
transform = train_transform)
#obtain training indicies that will be used as testing and validation
num_train = len(train_data)
indicies = list(range(num_train))
np.random.shuffle(indicies)
test_split = int(np.floor(test_size * num_train))
valid_split = int(np.floor(valid_size * num_train))
train_idx, valid_idx, test_idx = indicies[test_split+valid_split:], indicies[:valid_split], indicies[valid_split:test_split+valid_split]
#define samplers for obtainig the trainig, testing and validation set
train_sampler = SubsetRandomSampler(train_idx)
test_sampler = SubsetRandomSampler(test_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(train_data, batch_size = batch_size,
sampler = train_sampler)
test_loader = torch.utils.data.DataLoader(train_data, batch_size = batch_size,
sampler = test_sampler)
valid_loader = torch.utils.data.DataLoader(train_data,batch_size = batch_size,
sampler = valid_sampler)
# PROMYELOCYTE (PMB Promyelocyte (bilobled))
# PMO Promyelocyte), MYELOCYTE (MYB Myelocyte, MYO Myeloblast)ARE FOUND ON LEUKEMIA PATIENTS
classes = ['BAS', 'EBO', 'EOS', 'KSC','LYT','MON', 'MYO', 'NGB', 'NGS', 'PMO']
# + id="_FQRNUeDcNqQ" colab_type="code" colab={} outputId="b1511aa1-f4ee-4420-a620-7c6395e05db5"
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torchvision.transforms.functional as F
def imshow(img):
img = img /2+0.5 #unormalize the images
plt.imshow(np.transpose(img, (1, 2, 0))) #convert it back from tensor to image
#get one batch of training images
dataiter = iter(train_loader) #now contains the first batch
images, labels = dataiter.next() #images=the first batch of images, labels= the first batch of labels
images = images.numpy() #convert the images to display them
#plot the imahes in the batch along with the corresponding labels
fig = plt.figure(figsize=(25,6))
for idx in np.arange(20):
ax = fig.add_subplot(1, 20, idx+1, xticks=[], yticks=[]) #(rows, cols, index, .., ..)
imshow(images[idx])
ax.set_title(classes[labels[idx]])
# + id="I9szVG2DcNqV" colab_type="code" colab={"referenced_widgets": ["ad7f7194b3db405f9d659abe7216bcf1"]} outputId="bbb3078d-5d82-45bd-a776-2b3396420a6c"
#Load AlexNet pretrained model
model = models.vgg16(pretrained=True)
model
# + id="ENxVVp9ycNqZ" colab_type="code" colab={} outputId="9cb7bb32-aa2c-4a75-f900-612fcf5bdcdb"
#freeze the model calssifier
for param in model.features.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(4096, 1024)),
('relu', nn.ReLU()),
('fc2', nn.Linear(1024, 10))]))
#('output', nn.LogSoftmax(dim=1)
model.classifier[6] = classifier
model
# + [markdown] id="ln5Rw5L7cNqd" colab_type="text"
# <h2>Choose how to train</h2>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ul>
# <li>
# <a href="#normal">Normal Training</a>
# </li>
# <li>
# <a href="#cyclical">With Cyclical Learning Rate</a>
# </li>
# <li>
# <a href="#inference">Test our model</a>
# </li>
# </ul>
# </div>
#
# <hr>
# + [markdown] id="YLzyb2o0cNqf" colab_type="text"
# <a id="normal"></a>
# ## Normal Training
# + id="qzaJpfKrcNqg" colab_type="code" colab={} outputId="b69a7232-85c7-46be-ed52-d8b3bf2ea2fe"
import torch.optim as optim
#Loss function and optmixation function
criterion = nn.CrossEntropyLoss()
# the optimizer accepts only the trainable parameters
optimizer = torch.optim.SGD(model.classifier.parameters(), lr=0.001, momentum=0.9)
if train_on_gpu:
model.cuda()
model
# + id="TvKLl0QWcNql" colab_type="code" colab={} outputId="29421976-ac03-4d7f-df37-1b9a1806ea47"
# number of epochs to train the model
n_epochs = 20
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(1, n_epochs+1):
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for images, labels in train_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
images, labels = images.cuda(), labels.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(images)
# calculate the batch loss (comapre the values of the output model to the actual labels)
loss = criterion(output, labels)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*images.size(0)
######################
# validate the model #
######################
model.eval()
for images, labels in valid_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
images, labels = images.cuda(), labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(images)
# calculate the batch loss
loss = criterion(output, labels)
# update average validation loss
valid_loss += loss.item()*images.size(0)
# calculate average losses
train_loss = train_loss/len(train_loader.sampler)
valid_loss = valid_loss/len(valid_loader.sampler)
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
valid_loss_min = valid_loss
# print the decremnet in the validation
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'model_AML_classifier.pt')
if epoch % 10 == 0:
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
# + id="gAXQmJvHcNqq" colab_type="code" colab={} outputId="5bf5e9da-6be3-45df-c6ee-0cd35a9cb197"
model.load_state_dict(torch.load('model_AML_classifier.pt'))
# + id="jXJ0uo9rcNqt" colab_type="code" colab={} outputId="e83eb580-b4c0-4aa2-87f0-d13c4083d969"
#initialize the test loss
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list (0. for i in range(10))
#set the model to test and validation mode (no gradient descent needed)
model.eval()
for data, target in test_loader:
#move the tensor to GPU ig available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
#forward pass: compute prediction output by passing the first batch of test data
output = model(data)
#calculate the batch size
loss = criterion(output, target)
#update the test loss
test_loss += loss.item()*data.size(0)
#convert output probabilities to output class
_, pred = torch.max(output, 1)
#compare the prediction to true label
correct_tensor = pred.eq(target.data.view_as(pred))
#conveert to numpy array and remove the extra dimention and get only the result
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
#calculate test accuracy for each object class
for i in range(batch_size):
try:
label = target.data[i] #get the corresponding label from the object
class_correct[label] += correct[i].item()
class_total[label] += 1
except IndexError:
break
# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# + id="ujiP5MwlcNqx" colab_type="code" colab={} outputId="3c3cc39d-9aaf-45db-d4ab-72dd74833936"
#Move model inputs to cuda
if train_on_gpu:
images = images.cuda()
#get sample outputs
output = model(images)
#convert probabilties to prediction class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(10):
ax = fig.add_subplot(2, 10/2, idx+1, xticks=[], yticks=[])
imshow(images.cpu()[idx])
ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
# + [markdown] id="McpAZy5AcNq3" colab_type="text"
# <a id="cyclical"></a>
# ## Implementing cyclical learning rate techneque:
#
# - First we need to find the max_lr and the base_lr.
# - implement the following experience to do so
# Refernce:
# - https://towardsdatascience.com/adaptive-and-cyclical-learning-rates-using-pytorch-2bf904d18dee
# - https://arxiv.org/pdf/1506.01186.pdf
#
# + id="iNWZGfYvcNq3" colab_type="code" colab={}
# Experiment parameters
lr_find_epochs = 2
start_lr = 1e-7
end_lr = 0.1
# + id="2nt35z0BcNq6" colab_type="code" colab={}
# Set up the model, optimizer and loss function for the experiment
optimizer = torch.optim.SGD(model.classifier.parameters(), lr=start_lr)
criterion = nn.CrossEntropyLoss()
# y = a.e(-bt)
# end_lr = start_lr . e(b.t)
# (end_lr - start_lr) = e(b.t)
# ln(end_lr - start_lr) = b.t
# b = ln(end_lr - start_lr) / t
# + id="i3504XwEcNq9" colab_type="code" colab={}
# LR function lambda
from torch.optim.lr_scheduler import LambdaLR
lr_lambda = lambda x: math.exp(x * math.log(end_lr / start_lr) / (lr_find_epochs * len( train_loader)))
scheduler = LambdaLR(optimizer, lr_lambda)
# + id="gCZEslTScNrB" colab_type="code" colab={} outputId="8a64bdc3-7414-4813-c390-ed2e31cd0112"
# move model to GPU
if train_on_gpu:
model.cuda()
model
# + [markdown] id="UZFAJLv_cNrG" colab_type="text"
# - In the following we run two epochs through the network. At each step we are capturing the LR and optimizing the gradient.
# + id="exTAe0sBcNrH" colab_type="code" colab={} outputId="8389a240-188f-4f28-f853-230874918fa1"
# Run the experiment
lr_find_loss = []
lr_find_lr = []
iter = 0
smoothing = 0.05
for i in range(lr_find_epochs):
print("epoch {}".format(i))
model.train()
for inputs, labels in train_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
# Get outputs to calc loss
outputs = model(inputs)
loss = criterion(outputs, labels)
# Backward pass
loss.backward()
optimizer.step()
# Update LR
scheduler.step()
lr_step = optimizer.state_dict()["param_groups"][0]["lr"]
lr_find_lr.append(lr_step)
# smooth the loss
if iter==0:
lr_find_loss.append(loss)
else:
loss = smoothing * loss + (1 - smoothing) * lr_find_loss[-1]
lr_find_loss.append(loss)
iter += 1
# + id="qtPY4ZazcNrM" colab_type="code" colab={} outputId="8c9ba4f7-5ad5-409a-ec24-a8746a147ab7"
plt.ylabel("loss")
plt.xlabel("learning rate")
plt.xscale("log")
plt.plot(lr_find_lr, lr_find_loss)
plt.show()
# + [markdown] id="9yyGZVsScNrQ" colab_type="text"
#
# - From the figure above, For the upper bound (max). We won't pick the one on lowest point but rather about a factor of ten to the left.
# In this case
# the lowest point is about 3e-2 so we'll take 3e-3
# - Now for the lowe bound (Min): Acording to the paper and other resouce a good lower bound is the upper divided by 6. So 3e-3/6 = 5e-4
#
# - This approach could also help us find the range of acceptable lr for our model even if we decided to go with a fixed lr.
# + id="KSRLhsApcNrR" colab_type="code" colab={}
# As concluded above
lr_max = 3e-3
# + [markdown] id="EiYhv54lcNrU" colab_type="text"
# ### Step 2: The CLR Scheduale
# Which varies the learning rate between uper and lower bound.
# we are going with triangular CLR schedule.
# 
# - Programatically we just need to create a custom function
# + id="LN-zyRMpcNrV" colab_type="code" colab={}
def cyclical_lr(stepsize, min_lr=5e-4, max_lr=3e-3):
# Scaler: we can adapt this if we do not want the triangular CLR
scaler = lambda x: 1.
# Lambda function to calculate the LR
lr_lambda = lambda it: min_lr + (max_lr - min_lr) * relative(it, stepsize)
# Additional function to see where on the cycle we are
def relative(it, stepsize):
cycle = math.floor(1 + it / (2 * stepsize))
x = abs(it / stepsize - 2 * cycle + 1)
return max(0, (1 - x)) * scaler(cycle)
return lr_lambda
# + [markdown] id="P3cs8y1NcNrY" colab_type="text"
# ### Step 3: Wrap it up
# - This can be wrapped up inside LamdaLR object in Pytorch
# + id="T-2FUGRucNrZ" colab_type="code" colab={} outputId="e97b4175-51b6-4358-fd26-ea557fed741b"
#Parameters
factor = 6
end_lr = lr_max
iter = 0
total_logs = []
#Loss function and optmixation function
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.classifier.parameters(), lr=1.)
step_size = 4*len(train_loader)
clr = cyclical_lr(step_size, min_lr=end_lr/factor, max_lr=end_lr)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, [clr])
if train_on_gpu:
model.cuda()
model
# + [markdown] id="-VAhatvZcNrc" colab_type="text"
# # Step 4: Time to train our model
# + id="oS7XaEpbcNrc" colab_type="code" colab={} outputId="da1fa2ba-0bf0-4de5-bf1c-a611a8516978"
# number of epochs to train the model
n_epochs = 20
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(1, n_epochs+1):
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for images, labels in train_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
images, labels = images.cuda(), labels.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(images)
# calculate the batch loss (comapre the values of the output model to the actual labels)
loss = criterion(output, labels)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
scheduler.step() # > Where the magic happens
lr_sched_test = scheduler.get_last_lr()
# update training loss
train_loss += loss.item()*images.size(0)
######################
# validate the model #
######################
model.eval()
for images, labels in valid_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
images, labels = images.cuda(), labels.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(images)
# calculate the batch loss
loss = criterion(output, labels)
# update average validation loss
valid_loss += loss.item()*images.size(0)
# calculate average losses
train_loss = train_loss/len(train_loader.sampler)
valid_loss = valid_loss/len(valid_loader.sampler)
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
# print the decremnet in the validation
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f} \tLearning rate: {}'.format(
epoch, train_loss, valid_loss, lr_sched_test))
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'model_AML_classifier.pt')
valid_loss_min = valid_loss
if epoch % 10 == 0:
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f} \tLearning rate: {}'.format(
epoch, train_loss, valid_loss, lr_sched_test))
# + id="KWnrbOoecNrf" colab_type="code" colab={} outputId="e8c562ad-cdab-488f-e359-ac00445630e9"
model.load_state_dict(torch.load('model_AML_classifier.pt'))
# + [markdown] id="d8OEfaTwcNri" colab_type="text"
# <a id="inference"></a>
# ## Test our Model
# + id="zDwkDiAwcNri" colab_type="code" colab={} outputId="b1eb77cf-8667-4714-d8c3-96763dbeae03"
#initialize the test loss
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list (0. for i in range(10))
#set the model to test and validation mode (no gradient descent needed)
model.eval()
for data, target in test_loader:
#move the tensor to GPU ig available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
#forward pass: compute prediction output by passing the first batch of test data
output = model(data)
#calculate the batch size
loss = criterion(output, target)
#update the test loss
test_loss += loss.item()*data.size(0)
#convert output probabilities to output class
_, pred = torch.max(output, 1)
#compare the prediction to true label
correct_tensor = pred.eq(target.data.view_as(pred))
#conveert to numpy array and remove the extra dimention and get only the result
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
#calculate test accuracy for each object class
for i in range(batch_size):
try:
label = target.data[i] #get the corresponding label from the object
class_correct[label] += correct[i].item()
class_total[label] += 1
except IndexError:
break
# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# + id="Uombggs6cNrp" colab_type="code" colab={} outputId="bc6defdd-8805-49ac-da66-10e2fc1932ce"
#Move model inputs to cuda
if train_on_gpu:
images = images.cuda()
#get sample outputs
output = model(images)
#convert probabilties to prediction class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(10):
ax = fig.add_subplot(2, 10/2, idx+1, xticks=[], yticks=[])
imshow(images.cpu()[idx])
ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
# + [markdown] id="UOHuR9iwcNrs" colab_type="text"
# ### Notes:
#
# - make sure that the tarin, test, validation sets are randomly picked so they containe images from all calsses.
# - less lr better (stable) training
# - make sure my data is well distrbuted (more images most be obtaine)
# - The deeper the CNN the less FC layers is needed. because it needs less work to extract details and patterns since most of the job has been don by the CNN.
# - remove only the last fc layer from the original model and add our own classifier and train only this part.
#
#
#
#
# + id="pSyxARWycNrt" colab_type="code" colab={}
| Leukemia_Diagnosis_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import division, print_function, absolute_import
import GPy
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import patches
# %matplotlib inline
from safe_learning import *
# -
# ## Heads-up
#
# The following code models an inverted pendulum, and uses a GP model to determine the safe region of attraction (ROA). The following is inteded to illustrate the algorithm, not to be high-performance code. As such, the code will run very slowly, with the main bottleneck being the repeated GP predictions of the dynamics. There are several obvious points that could make the code run faster
# * A less conservative Lipschitz constant will allow coarser discretizations and therefore faster computations.
# * Only evaluating states close to the boundary of the safe set, since those are the only states that are able to expand the ROA over time.
# * Only partially update the GP predictions of the model where needed, rather than everywhere (lots of predictions are at states that are either unsafe and too far away from the current level set, or are already safe and evaluations there have no hope of expanding the ROA
# ## Dynamics model
#
# We define the dynamics of an inverted pendulum
# $$\ddot{\theta}(t) = \frac{mgl \sin(\theta(t)) + u(t)}{m l^2},$$
# where $m$ is the mass, $g$ the gravitational constant, $l$ the length of the pendulum, $u$ the control input (torque), and $\theta$ the angle.
#
# The prior model that we use considers no friction, as well as a mass that is $0.5\,$kg lighter.
# +
n = 2
m = 1
# 'Wrong' model parameters
mass = 0.1
friction = 0.
length = 0.5
gravity = 9.81
inertia = mass * length ** 2
# True model parameters
true_mass = 0.15
true_friction = 0.05
true_length = length
true_inertia = true_mass * true_length ** 2
# Input saturation
x_max = np.deg2rad(30)
u_max = gravity * true_mass * true_length * np.sin(x_max)
# LQR cost matrices
Q = np.array([[1, 0], [0, 1]], dtype=np.float)
R = np.array([[0.1]], dtype=np.float)
# -
# ## Normalization
#
# In order for the LQR to return meaningful results, as well as for the GP model to have simpler kernel parameters, we normalize the system dynamics (all dimensions have similar magnitudes).
#
# $\theta$ is normalized withing the maximum controllable angle, $\dot{\theta}$ is normalized with the eigenfrequency of the dynamics, and $u$ is normlized with the maximum allowed control input.
# +
# Normalize the cost functions for the LQR computation
# x_normalized = inv(Tx) * x
Tx = np.diag([x_max, np.sqrt(gravity / length)])
Tu = np.array([[u_max]])
Tx_inv = np.diag(np.diag(Tx)**(-1))
Tu_inv = np.diag(np.diag(Tu)**(-1))
def normalize_x(x):
"""Normalize x vector"""
x = np.asarray(x)
return x.dot(Tx_inv)
def denormalize_x(x):
"""Denormalize x vector"""
x = np.asarray(x)
return x.dot(Tx)
def normalize_u(u):
"""Normalize u vector"""
u = np.asarray(u)
return u.dot(Tu_inv)
def denormalize_u(u):
"""Denormalize u vector"""
u = np.asarray(u)
return u.dot(Tu)
# -
# ## Dynamics functions
#
# Here we define the physical dynamics, as well as the prior dynamics, which are a linearization of the true, nonlinear model with wrong parameters.
# +
# Nonlinear dynamics
def ode(x, u):
"""True ode of the dynamics.
Parameters
----------
x: np.array
2D array with one, normalized state
at each column
u: np.array
2D array with one, normalized input
at each column
Returns
-------
x_dot: np.array
The normalized derivative of the dynamics
"""
# Denormalize
x = denormalize_x(np.atleast_2d(x))
u = denormalize_u(np.asarray(u))
# Physical dynamics
x_dot = np.hstack([x[:, [1]],
(gravity / true_length * np.sin(x[:, [0]]) +
u / true_inertia
- true_friction / true_inertia * x[:, [1]])])
# Normalize
return normalize_x(x_dot)
# Linearized dynamics
A = np.array([[0, 1],
[gravity / length, -friction / inertia]])
B = np.array([[0],
[1 / inertia]])
# Normalize linear dynamics
An = Tx_inv.dot(A.dot(Tx))
Bn = Tx_inv.dot(B.dot(Tu))
# Obtain LQR controlelr gain and cost-to-go matrix
Kn, Pn = lqr(An, Bn, Q, R)
u_max_norm = normalize_u(u_max)
def control_law(x):
"""LQR controller with bounded (normalized) inputs.
Parameters
----------
x: np.array
2D array with one normalized state on each column
Returns
-------
u: np.array
2D array with normalized inputs on each column
"""
x = np.asarray(x)
u = -x.dot(Kn.T)
np.clip(u, -u_max_norm, u_max_norm, out=u)
return u
def true_dynamics(x):
"""Return the true closed-loop, normalized dynamics.
Parameters
----------
x: np.array
2D array with one normalized state on each column
Returns
-------
x_dot: np.array
2D array with normalized derivative states on each column
"""
x = np.asarray(x)
u = control_law(x)
return ode(x, u)
def prior_dynamics(x):
"""Return the linearized, closed-loop, prior, normalized dynamics.
Parameters
----------
x: np.array
2D array with one normalized state on each column
Returns
-------
x_dot: np.array
2D array with normalized derivative states on each column
"""
x = np.asarray(x)
u = control_law(x)
return x.dot(An.T) + u.dot(Bn.T)
# -
# ## Discretization
#
# We discretize the state into a grid world. Since we will use the conservative, theoretical Lipschitz constant of $\dot{V}(x)$ from Lemma 5, we have to discretize very finely. In practice, one may be tempted to pick larger values.
# +
# Discretization constant
tau = 0.002
# x_min, x_max, accuracy
grid_param = [(-0.5, 0.5, tau),
(-0.5, 0.5, tau)]
# Used to plot the safe set later
extent = np.array([grid_param[0][0], grid_param[0][1],
grid_param[1][0], grid_param[1][1]])
# Define a grid with combinations of states
grid = [np.arange(*x) for x in grid_param]
num_samples = [len(x) for x in grid]
grid = combinations(grid)
# Initial safe set
grid_true = denormalize_x(grid)
S0 = np.logical_and(np.abs(grid_true[:, 0]) < np.deg2rad(5),
np.abs(grid_true[:, 1]) < np.deg2rad(10))
if not np.any(S0):
print('No initial safe points!')
print('Grid size: {0} combinations in {1}x{2} discretized with tau={3}'
.format(len(grid), extent[:2], extent[2:], tau))
# -
# ## Gaussian process model of the error
#
# We define the state vector $\mathbf{x} = [\mathbf{x}_1, \mathbf{x}_2] = [\theta, \dot{\theta}]$, so that the dynamics can be written as
# $$
# \dot{\mathbf{x}} =
# \left[
# \begin{matrix}
# \mathbf{x}_2 \\
# \frac{mgl \sin(\mathbf{x}_1) + \tau}{m l^2}
# \end{matrix} \right]
# $$
#
# The first part of this equation says that the angle is equal to the integrated angular velocity. This is a intuitively true, irrespective of model errors. As such, we only learn the model error of the second part of the dynamics. That is
# $$\dot{\mathbf{x}} =
# \left[
# \begin{matrix}
# \mathbf{x}_2 \\
# \frac{mgl \sin(\mathbf{x}_1) + \tau}{m l^2} + g_\pi(\mathbf{x})
# \end{matrix} \right]
# $$
#
# As a kernel we choose $k(x,x') = k_{\mathrm{linear}}(x, x') * k_{\mathrm{Matern}}(x, x')$, the product of a linear and a Matern kernel. This encodes nonlinear functions with linearly increasing amplitude. For more details what this kernel encodes, see the one-dimensional example.
# +
# Mean function for the GP with the prior dynamics
mf = GPy.core.Mapping(2, 1)
mf.f = lambda x: prior_dynamics(x)[:, [1]]
mf.update_gradients = lambda a,b: None
# Matern kernel multiplied with linear kernel
kernel = (GPy.kern.Matern32(input_dim=2, lengthscale=.2, variance=5, name='radial') *
GPy.kern.Linear(input_dim=2, name='linear', variances=1))
# Measurement model
likelihood = GPy.likelihoods.Gaussian(variance=0.05**2)
# GP with initial measurement at (0, 0), 0
gp = GPy.core.GP(np.array([[0, 0]]), np.array([[0]]),
kernel, likelihood, mean_function=mf)
def predict_model(gp, x):
"""Predict the model using the gp dynamics
Given that the model error only affects the second derivative,
the first state has zero variance and is equal to the prior model.
Parameters
----------
gp: GPy.core.GP
The GP model of the dynamics (including prior)
x: np.array
2D array. Each column has one state at which
to predict the dynamics
Returns
-------
mean: np.array
The mean dynamics at x
var: np.array
Variance of the dynamics at x
"""
gp_mean, gp_var = gp._raw_predict(x)
# Augment with deterministic model for first state
gp_mean = np.hstack([prior_dynamics(x)[:, [0]], gp_mean])
gp_var = np.hstack([np.zeros_like(gp_var), gp_var])
return gp_mean, gp_var
# -
# ## Lipschitz constant
#
# The Lipschitz constant is defined via the high-probability Lipschitz constant of the GP model, as well as the linear dynamics. Importantly, here we use the local Lipschitz constants. Since the kernel we have choosen implies increasing Lipschitz constants with distance from the origin. the worst-case Lipschitz constant would be too conservative.
# +
# Lyapunov function:
V, dV = quadratic_lyapunov_function(grid, Pn)
V_max = np.max(V)
accuracy = V_max / 1e10
# Lipschitz constants of Lyapunov function
B_dV = L_V = np.max(np.abs(dV), axis=1)
L_dV = np.max(Pn)
# Kernel parameters
kernel_lengthscale = np.min(gp.kern.radial.lengthscale).squeeze()
kernel_var = gp.kern.radial.variance.values.squeeze()
linear_var = gp.kern.linear.Kdiag(grid).squeeze()
# Dynamics Lipschitz constants
L_g = 2 * np.sqrt(kernel_var * linear_var) / kernel_lengthscale
L_f = np.max(np.abs(An - Bn.dot(Kn)))
# Function bounds
B_g = 2 * np.sqrt(kernel_var * linear_var)
B_f = prior_dynamics(grid)[:, 1]
L = (B_g + B_f) * L_dV + B_dV * (L_g + L_f)
# -
# ## True safe levelset
#
# To get an intuition about the task at hand, we compute the maximum, safe level set (ROA) according to the true and prior dynamics. The learning algorithm only has access to the prior dynamics model, not the true model!
#
# The plot shows the maximum level set (orange), and the region where $\dot{V}$ is sufficiently small (red). It can be seen that the prior model estimates a safe region that is too large, since it considers a lighter mass. Also, the third plot shows that we cannot recover the maximum level set with the learning method, since it considers $\dot{V}(x) < -L\tau$, rather than $\dot{V}(x) < 0$. For finer discretizations the two sets will get closer and closer to each other.
# +
V_dot_true = compute_v_dot_upper_bound(dV, true_dynamics(grid), None)
V_dot_prior = compute_v_dot_upper_bound(dV, prior_dynamics(grid), None)
fig, axes = plt.subplots(1, 3, figsize=(10, 20))
S_true = get_safe_set(V_dot_true, 0, S0=None)
axes[0].imshow(np.reshape(S_true, num_samples).T, extent=extent, origin='lower')
c_true = find_max_levelset(S_true, V, accuracy)
axes[0].imshow(np.reshape(V <= c_true, num_samples).T, extent=extent, origin='lower', alpha=0.3, cmap='viridis')
axes[0].set_title('True safe set (V_dot < 0)')
S_prior = get_safe_set(V_dot_prior, 0, S0=S0)
c_prior = find_max_levelset(S_prior, V, accuracy)
axes[1].imshow(np.reshape(S_prior, num_samples).T, extent=extent, origin='lower')
axes[1].set_title('Prior safe set (V_dot < 0)')
axes[1].imshow(np.reshape(V < c_prior, num_samples).T, extent=extent, origin='lower', alpha=0.3, cmap='viridis')
S_true_L = get_safe_set(V_dot_true, -L*tau, S0=S0)
c_true_L = find_max_levelset(S_true_L, V, accuracy)
axes[2].imshow(np.reshape(S_true_L, num_samples).T, extent=extent, origin='lower')
axes[2].set_title('True safe set (V_dot < -L*tau)')
axes[2].imshow(np.reshape(V < c_true_L, num_samples).T, extent=extent, origin='lower', alpha=0.3, cmap='viridis')
plt.show()
print('Number of true safe points: {0}/{3}\n'
'Number of prior safe points: {1}/{3}\n'
'Number of finite safe points: {2}/{3}\n'.format(np.count_nonzero(V < c_true),
np.count_nonzero(V < c_prior),
np.count_nonzero(V < c_true_L),
grid.shape[0]))
# -
# ## Online learning
#
# Now let us see how the learning algorithm performs. We compute the maximum level set based on the GP estimate of the dynamics, and sample the most uncertain state within for 100 iterations.
# +
V, dV = quadratic_lyapunov_function(grid, Pn)
def update_gp():
dynamics_mean, dynamics_var = predict_model(gp, grid)
V_dot = compute_v_dot_upper_bound(dV, dynamics_mean, dynamics_var, beta=2.)
S = get_safe_set(V_dot, -L*tau, S0=S0)
c = find_max_levelset(S, V, accuracy)
S[:] = V <= c
max_id = np.argmax(dynamics_var[S, 1])
max_state = grid[S][[max_id], :].copy()
gp.set_XY(np.vstack([gp.X, max_state]),
np.vstack([gp.Y, true_dynamics(max_state)[:, [1]]]))
return S
# -
# ##### Warning: This is non-optimized, academic code. Executing the following cell may take roughly a minute on a decent laptop.
# +
# Try to import a nice progress bar
try:
from tqdm import tqdm
except:
tqdm = lambda x: x
# Update the GP model 100 times
for i in tqdm(range(100)):
S = update_gp()
print('Number of estimated safe points: {0}% relative to true dynamics with V_dot < 0'
.format(np.count_nonzero(S) / np.count_nonzero(V < c_true)))
# -
# # Plot results
#
# We plot the resulting estimate. By restricting ourselves to the levelset $\dot{V} \leq -L \tau$, we cannot reach the true safe set. However, if we pick a less conservative Lipschitz constant and discretize at a finer rate, the two will approach each other.
# +
def denorm_ellipse(P, level):
"""Return the ellipse _bounds, but denormalized."""
x0, x1_u, x1_l = ellipse_bounds(P, level)
return Tx[0,0] * x0, Tx[1,1] * x1_u, Tx[1,1] * x1_l
c_est = find_max_levelset(S, V, accuracy)
colors = ['b', 'm', 'r']
plt.fill_between(*denorm_ellipse(Pn, c_prior), color=colors[0], alpha=0.5)
plt.fill_between(*denorm_ellipse(Pn, c_true), color=colors[1], alpha=0.5)
plt.fill_between(*denorm_ellipse(Pn, c_est), color=colors[2], alpha=0.5)
patch0 = patches.Patch(color=colors[0], alpha=0.5, label='Prior safe set')
patch1 = patches.Patch(color=colors[1], alpha=0.5, label='True safe set')
patch2 = patches.Patch(color=colors[2], alpha=0.5, label='Estimated safe set')
legs = [patch0, patch1, patch2]
labels = [x.get_label() for x in legs]
leg = plt.legend(legs, labels, loc=3, borderaxespad=0)
data = denormalize_x(gp.X[1:, :])
plt.plot(data[:, 0], data[:, 1], 'x')
plt.xlabel(r'Angle $\theta$')
plt.ylabel(r'Angular velocity $\dot{\theta}$')
plt.show()
# -
| inverted_pendulum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # SageMaker で BYOA を行うための Docker イメージを作成する
#
# #### ノートブックに含まれる内容
#
# - SageMaker で BYOA(Bring Your Own Algorithm) を行うための,Docker コンテナイメージの基本的な作成のしかた
#
# #### ノートブックで使われている手法の詳細
#
# - Docker
# ## 前準備
#
# このノートブックでは Amazon Elastic Container Registory (ECR) を使用するため,あらかじめ SageMaker を実行している IAM Role に対して,以下の IAM Policy をアタッチしてください
#
# ```
# AmazonEC2ContainerRegistryFullAccess
# ```
# ## 概要
#
# 先ほどの例では,単純にノートブック上でDecision Tree アルゴリズムについて,サンプルデータをもとにモデルを学習しました.そして,学習したモデルを使って推論も行いました.ですが,ここでデータサイズが大きすぎてモデルの学習に時間がかかる場合や,できたモデルを API として公開し,大量のリクエストをさばくことができるようにしたい場合には,どうしたら良いでしょうか?
#
# SageMaker を使うことで,このような問題を解決することができます.SageMaker は Docker コンテナを活用することにより,モデル学習や API による推論をスケーラブルな形で実行します.そのため,SageMaker を実際に使用する前に,学習および推論を行うための Docker イメージをまず最初に作成します.それから,作成した Docker イメージを使って,実際に SageMaker API 経由で学習,および推論を実行します.
#
# このノートブックでは,scikit-learn での学習・推論を行うための Docker イメージについて説明します.以下,まず Part 1 で Docker イメージのアーキテクチャについて説明,Part 2 で実際に Docker イメージの作成を行います.
# ## SageMaker 用の Docker イメージの構成
#
# ### SageMaker での Docker の利用の仕方
# SageMaker の Docker イメージは,学習のときと推論のときで,同じイメージを用いることができます.SageMaker の中では,学習と推論のそれぞれでコンテナを立ち上げる際に,以下のコマンドが実行されます
#
# * 学習: `docker run $IMAGE_ID train`
#
# * 推論: `docker run $IMAGE_ID serve`
#
# このため,Docker イメージは `train` および `serve` というコマンドを持つ必要があります.この例では,Docker イメージ作成時に使用するスクリプト群をまとめて `container` ディレクトリに以下のように配置しました.`container/decision_trees` 内に `train` と `serve` のスクリプトが配置されているのが確認できるかと思います.これらのスクリプトは Python で書かれていますが,実際にはどの言語で書いても問題はありません.`serve` スクリプトではあくまで推論用の HTTP サーバを起動しているだけで,実際のエンドポイントの処理は `predictor.py に記述されています
#
# .
# └── container
# ├── Dockerfile
# ├── build_and_push.sh
# └── decision_trees
# ├── nginx.conf
# ├── predictor.py
# ├── serve
# ├── train
# └── wsgi.py
#
#
# * __`Dockerfile`__ には,Docker イメージをどのようにビルドするかが記述されています
# * __`build_and_push.sh`__ は Dockerfile を使ってコンテナイメージをビルドし,ECR にプッシュするためのスクリプトです
# * __`decision_trees`__ コンテナ内に含まれるファイルを配置したディレクトリです
#
# ディレクトリの中身の確認
# !ls -lR container
# train スクリプトの中身の確認
# !cat container/decision_trees/train
# ### 学習時のコンテナの実行
#
# SageMaker が学習ジョブを走らせる際,`train` スクリプトが通常の Python プログラムのように実行されます.その際に SageMaker の仕様として,コンテナ内の `/opt/ml` ディレクトリ内に,さまざまなファイルを配置して使用する形をとります.
#
# /opt/ml
# ├── input
# │ ├── config
# │ │ ├── hyperparameters.json
# │ │ └── resourceConfig.json
# │ └── data
# │ └── <channel_name>
# │ └── <input data>
# ├── model
# │ └── <model files>
# └── output
# └── failure
#
# #### インプット
#
# * `/opt/ml/input/config` には,どのように学習処理を実行するかの情報が置かれます.`hyperparameters.json` はハイパーパラメタの名前とその値を JSON フォーマットで格納したファイルです.値は常に `string` 型として読みだされるため,その後適切な型に変換する必要があります.`resourceConfig.json` は分散学習を行う際のネットワークレイアウトを記述した JSON フォーマットのファイルです.scikit-learn では分散学習をサポートしていないため,ここ例では使用しません.
# * `/opt/ml/input/data/<channel_name>/` はデータ入力方式が FILE モードのときに使われるディレクトリです.チャンネルはジョブ実行時に叩く `CreateTrainingJob` に引き渡すパラメタとして指定することができます.入力データはチャネルごとに,こちらもパラメタで指定された S3 ディレクトリからロードされたものが配置されます.
# * `/opt/ml/input/data/<channel_name>_<epoch_number>` はデータ入力方式が PIPE モードのときに使われるディレクトリです.エポックは 0 から始まり順に増えていきます.ディレクトリ名はチャンネルとエポックで指定されます
#
# #### アウトプット
#
# * `/opt/ml/model/` は,アルゴリズムにより生成された結果のモデルが保存されるディレクトリです.モデルのフォーマットは自由に指定することができます.単一ファイルでもよいですし,階層構造を持ったディレクトリの形でも構いません.SageMaker はこのディレクトリ内のすべてのデータを圧縮済みの tar アーカイブにまとめます.このアーカイブファイルは,`DescribeTrainingJob` API のレスポンスに含まれる S3 ロケーションに置かれます
# * `/opt/ml/output` にはジョブが失敗した際に,その原因が記述された `failure` ファイルが配置されます.このファイルの中身は,`DescribeTrainingJob` API のレスポンスに含まれる `FailureReason` の内容と同じです.ジョブが成功した際には,ここには何も書き出されません
#
# ### 推論時のコンテナの実行
#
# 推論時には,コンテナが API サーバとしてホストされた形で実行されます.そのため,HTTP 経由で推論のリクエストを受け付けることができます.SageMaker で API サーバをホストする際には,以下の 2 つのエンドポイントが必要です
#
# * `/ping` はインフラからの `GET` リクエストを受けるためのエンドポイントです.リクエストを受けたら,レスポンスコード 200 を返します
#
# * `/invocations` はクライアントからの `POST` 推論リクエストを受けるためのエンドポイントです.リクエストとレスポンスのフォーマットは自由に指定することができます.クライアントで `ContentType` と `Accept` ヘッダをつけた場合には,そのままエンドポイント側に引き渡されます
#
# 推論用のコンテナでは,SageMaker はモデルファイルを学習時と同じディレクトリに配置して使用します
#
# /opt/ml
# └── model
# └── <model files>
# serve スクリプトの中身の確認
# !cat container/decision_trees/predictor.py
# この例では以下の図のような,私たちの推奨する構成である Python ベースの頑健かつスケーラブルなスタックを用います.
#
# 
#
# 上記を実現するために,`container/decision_trees` ディレクトリ内には,`nginx.conf` や `wsgi.py` が配置されています.もちろん,これ以外のツールを追加したり,または全く別の構成を取ることも可能です.
#
# container/decision_trees
# ├── nginx.conf
# ├── predictor.py
# ├── serve
# ├── train
# └── wsgi.py
#
# 上記ファイルの中身は以下のとおりです.
#
# * __`nginx.conf`__ は nginx の設定ファイルです
# * __`predictor.py`__ は Flask による Web サーバが記述されたプログラムファイルです
# * __`serve`__ は推論用コンテナとして立ち上げたときに実行されるプログラムです.中では,単に gunicorn サーバを起動して,`prediction.py` で実装された複数の Flask アプリケーションを実行します
# * __`train`__ は学習用コンテナとして立ち上げたときに実行されるプログラムです.学習アルゴリズムに応じて自由に記述を変えることができます
# * __`wsgi.py`__ は Flask アプリケーションを叩くための小さなラッパーです.
#
# 基本的には,上記の `train` (学習アルゴリズムの記述)と `predictor.py` (推論アルゴリズムの記述)を変更することで,実施したい機械学習の学習および推論処理を実現できます.それ以外の 3 ファイルは基本的には変更する必要はありません
#
# ## Docker イメージの作成
#
# ### Dockerfile
#
# ここまで説明してきた仕組みを実現するために,Dockerfile でコンテナイメージの構成を定義します.
# !cat container/Dockerfile
# ### コンテナイメージをビルドして登録
#
# 以下のシェルで,`docker build` コマンドを使ってコンテナイメージをビルドし,ECR (Elastic Container Registry) にプッシュします.このスクリプトは,`container/build-and-push.sh` にシェルスクリプトとしてまとまっており,`build-and-push.sh decision_trees_sample` の形で実行することで,`decision_trees_sample` イメージを ECR にプッシュすることができます.
#
# ECR リポジトリは,SageMaker のノートブックインスタンスがあるのと同一リージョンのものが使われます.もしリポジトリがない場合には,自動的に作られます.
#
# 以下のスクリプトを実行する前に,**<span style="color: red;">5 行目の `account_number=XX` の `XX` を指定された適切な数字に変更</span>**してください
# + language="sh"
#
# # アルゴリズムの名前
# # アカウントナンバーを修正
# account_number=XX
# algorithm_name=decision-trees-sample-$account_number
#
# cd container
#
# chmod +x decision_trees/train
# chmod +x decision_trees/serve
#
# account=$(aws sts get-caller-identity --query Account --output text)
#
# # 現在の設定を確認して,リージョンをセット (もし定義されていない場合には,us-west-2 に設定)
# region=$(aws configure get region)
# region=${region:-us-west-2}
#
# fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
#
# # もしリポジトリが ECR に存在しない場合には作成
#
# aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
#
# if [ $? -ne 0 ]
# then
# aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
# fi
#
# # ECR からログインコマンドを取得し,直接実行
# $(aws ecr get-login --region ${region} --no-include-email)
#
# # Docker イメージを指定した名前でローカルで作成し,ECR にプッシュ
# # Sagemaker ノートブックインスタンスの場合,Docker デーモンを再起動する必要がある(既知の問題で対応予定)
#
# if [ -d "/home/ec2-user/SageMaker" ]; then
# sudo service docker restart
# fi
#
# docker build -t ${algorithm_name} .
# docker tag ${algorithm_name} ${fullname}
#
# docker push ${fullname}
# -
# ### ECR のリポジトリを確認
#
# 以下の URL を開いて,作成した `decision-trees-sample-XX` のリポジトリが存在することを確認
#
# https://console.aws.amazon.com/ecs/home?region=us-east-1#/repositories
| bring_your_own_container/scikit_bring_your_own/02_setup_docker_container_image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Seurat integration of 5x SPLEEN samples
#
# # OD1, OD2, OD4 + HS1 & HS2
# Date: 2021-11-02
library(Seurat)
library(future)
# +
plan(strategy = "multicore", workers = 6)
options(future.globals.maxSize = +Inf)
# -
# +
data.path = 'matrices/'
data.rds = 'rds/'
data.meta = 'notebooks/output/'
# -
Sys.time()
combo.data <- read.table(file = gzfile( paste0(data.path,
"20211102_COMBO_SPLEEN_filtered_gene_x_cells_XFILTERED_matrix.txt.gz") ) )
Sys.time()
head(combo.data)
# Replace first (all with gsub actually) character (X) with nothing
colnames(combo.data) <- gsub('X', '', colnames(combo.data))
head(combo.data)
dim(combo.data)
# **Metadata**
metadata <- read.table(paste0(data.meta, '20211102_COMBO_SPLEEN_filtered_metadata.txt'),
sep='\t', header=T, row.names = 1)
head(metadata)
tail(metadata)
rownames(metadata) <- gsub('-', '.', rownames(metadata))
colnames(metadata)
metadata <- metadata[ ,c('donor','library','leiden.1.2','annot') ]
dim(metadata)
# Common cells between data and metadata
common <- intersect( colnames(combo.data), rownames(metadata) )
length(common)
Sys.time()
data <- CreateSeuratObject(counts = combo.data[,common], meta.data = metadata[common,])
Sys.time()
# **Save filtered input counts seurat obj**
Sys.time()
saveRDS(file = paste0(data.rds, '20211102_COMBO_SPLEEN_filtered_counts_Seurat3_obj.rds'), data)
Sys.time()
data
unique(metadata$donor)
unique(<EMAIL>$donor)
data.list <- SplitObject(object = data, split.by = "donor")
for (i in 1:length(x = data.list)) {
data.list[[i]] <- NormalizeData(object = data.list[[i]], verbose = FALSE)
data.list[[i]] <- FindVariableFeatures(object = data.list[[i]],
selection.method = "vst", nfeatures = 3000, verbose = FALSE)
}
reference.list <- data.list[ c("DOD1", "DOD2", "DOD4", "SPL_1", "SPL_0") ]
Sys.time()
data.anchors <- FindIntegrationAnchors(object.list = reference.list, dims = 1:15)
Sys.time()
saveRDS(file = paste0(data.rds, '20211102_COMBO_SPLEEN_Seurat3_VST_classic_anchors.rds'), data.anchors)
Sys.time()
data.integrated <- IntegrateData(anchorset = data.anchors, dims = 1:15)
Sys.time()
saveRDS(file = paste0(data.rds, '20211102_COMBO_SPLEEN_Seurat3_VST_classic_integrated.rds'), data.integrated)
<EMAIL>$orig.ident <- 'WTV'
library(SeuratDisk)
transfile = paste0(data.path, "../h5ad/", "20211102_COMBO_SPLEEN_Seurat_VST_classic_integration_processed_minimal.h5Seurat")
# # Two-step conversion to h5ad file
# +
Sys.time()
SaveH5Seurat(data.integrated, filename = transfile)
Convert(transfile, dest = "h5ad")
Sys.time()
# -
# ### Then I went back to nb ``09d_PY_...``
| 01_data_transformations/09d2_R_Seurat_integration_of_5x_SPLEEN_samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from pathlib import Path
import re
import pandas as pd
import networkx as nx
from cloudvolume import CloudVolume, Skeleton
from io import StringIO
import os
from brainlit.utils.util import (
check_type,
check_size,
)
from brainlit.utils.Neuron_trace import NeuronTrace
# # NeuronTrace Class
#
# The NeuronTrace class takes neuron data either stored in .swc format or located in an s3 bucket, and loads it as a dataframe.
#
# If the neuron is stored in a .swc file, the swc filepath must be provided, and if the neuron is stored in an s3 bucket, the url, segment ID, and mip (resolution to use for scaling) must be provided.
# +
#swc_path = "../../../../../tree_2.swc"
swc_path = "../../../../../../Manual-GT/8-01_test_1-5/8-01_test_1/tree_2.swc"
s3_path = "s3://open-neurodata/brainlit/brain1_segments"
seg_id = 11
mip = 2
swc_trace = NeuronTrace(swc_path)
s3_trace = NeuronTrace(s3_path,seg_id,mip)
# -
# ## Methods of NeuronTrace Class
# ### 1. get_df_arguments
#
# This method returns the arguments of the dataframe - for .swc files these will include offset, color, cc, branch, and for s3 files there should be none
#
swc_trace.get_df_arguments()
s3_trace.get_df_arguments()
# ### 2. get_df
#
# This method returns the dataframe object for the input neuron
swc_trace.get_df()
s3_trace.get_df()
# ### 3. get_skel
#
# This method returns the Skeleton object of the dataframe, if the input file is a swc
swc_trace.get_skel(benchmarking=True)
s3_trace.get_skel()
swc_trace.get_skel(benchmarking=True,origin=np.asarray([7,7,7]))
s3_trace.get_skel(origin=np.asarray([7,7,7]))
# ### 4. get_df_voxel
#
# This method provides the dataframe converted from spatial units to voxel, if spacing and an origin is provided. If origin is not specified, it defaults to (0,0,0)
#swc trace - origin not provided
swc_trace.get_df_voxel(spacing=np.asarray([2,2,2]))
#swc trace - origin provided
swc_trace.get_df_voxel(spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))
#s3 trace - origin not provided
s3_trace.get_df_voxel(spacing=np.asarray([2,2,2]))
#s3 trace - origin provided
s3_trace.get_df_voxel(spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))
# ### 5. get_graph
#
# This method provides the dataframe in graph format.
# If spacing and origin is specified, the units will be converted from spatial to voxel units prior to making the graph.
# If only spacing is specified, origin will be set to (0,0,0)
#
#swc input
swc_trace.get_graph()
#swc input, only spacing specified, origin defaults to (0,0,0)
swc_trace.get_graph(spacing=np.asarray([2,2,2]))
#swc input, spacing and origin specified
swc_trace.get_graph(spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))
#s3 input
s3_trace.get_graph()
#s3 input, only spacing specified, origin defaults to (0,0,0)
s3_trace.get_graph(spacing=np.asarray([2,2,2]))
#s3 input, spacing and origin specified
s3_trace.get_graph(spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))
# ### 6. get_paths
#
# This method provides the dataframe as a list of paths. If spacing and origin is specified, the units will be converted from spatial to voxel units prior to making the paths. If only spacing is specified, origin will be set to (0,0,0)
#swc input - print 10
swc_trace.get_paths()[0][1:10]
#swc input, only spacing specified, origin defaults to (0,0,0)
swc_trace.get_paths(spacing=np.asarray([2,2,2]))[0][1:10]
#swc input, spacing and origin specified
swc_trace.get_paths(spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))[0][1:10]
#s3 input
s3_trace.get_paths()[0][1:10]
#s3 input, only spacing specified, origin defaults to (0,0,0)
s3_trace.get_paths(spacing=np.asarray([2,2,2]))[0][1:10]
#s3 input, spacing and origin specified
s3_trace.get_paths(spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))[0][1:10]
# ### 7. generate_df_subset
#
# This method reads a new subset dataframe, taking in a list of voxels. An option was added to provide a subset of the neuron rather than the entire neuron (by providing subneuron_start and subneuron_end)
# +
#swc input, no subneuron_start and subneuron_end
#generate vox_in_img_list
my_list = []
for i in range(len(swc_trace.get_df())):
my_list.append(10)
vox_in_img_list = [my_list,my_list,my_list]
swc_trace.generate_df_subset(vox_in_img_list)
# +
#swc input, subneuron_start and subneuron_end specified
subneuron_start = 5
subneuron_end = 8
#generate vox_in_img_list
my_list = []
for i in range(subneuron_end-subneuron_start):
my_list.append(10)
vox_in_img_list_2 = list([my_list,my_list,my_list])
swc_trace.generate_df_subset(vox_in_img_list_2,subneuron_start,subneuron_end)
# +
#s3 input, no subneuron_start and subneuron_end
#generate vox_in_img_list
my_list = []
for i in range(len(s3_trace.get_df())):
my_list.append(10)
vox_in_img_list_3 = [my_list,my_list,my_list]
s3_trace.generate_df_subset(vox_in_img_list_3)
# +
#s3 input, subneuron_start and subneuron_end specified
subneuron_start = 5
subneuron_end = 8
#generate vox_in_img_list
my_list = []
for i in range(subneuron_end-subneuron_start):
my_list.append(10)
vox_in_img_list_4 = [my_list,my_list,my_list]
s3_trace.generate_df_subset(vox_in_img_list_4,subneuron_start,subneuron_end)
# -
# ### 8. get_bfs_subgraph
#
# This method creates a spanning subgraph from a seed node and parent graph created from the dataframes using BFS. The seed node ID and max depth for BFS should be specified. A dataframe storing indices can be specified. If spacing and origin is specified, the units will be converted from spatial to voxel units. If only spacing is specified, origin will be set to (0,0,0).
#
#swc input, specify node_id and depth
swc_trace.get_bfs_subgraph(node_id=11,depth=2)
#swc input, provide a dataframe
swc_trace.get_bfs_subgraph(node_id=11,depth=2,df=s3_trace.get_df())
#swc input, add spacing
swc_trace.get_bfs_subgraph(node_id=11,depth=2,df=s3_trace.get_df(),spacing=np.asarray([2,2,2]))
#swc input, add spacing and origin
swc_trace.get_bfs_subgraph(node_id=11,depth=2,df=s3_trace.get_df(),spacing=np.asarray([2,2,2]),origin=np.asarray([50,50,50]))
#s3 input, specify node_id and depth
s3_trace.get_bfs_subgraph(node_id=11,depth=2)
#s3 input, provide a dataframe
s3_trace.get_bfs_subgraph(node_id=11,depth=2,df=swc_trace.get_df())
#s3 input, add spacing
s3_trace.get_bfs_subgraph(node_id=11,depth=2,df=s3_trace.get_df(),spacing=np.asarray([2,2,2]))
#s3 input, add spacing and origin
s3_trace.get_bfs_subgraph(node_id=11,depth=2,df=s3_trace.get_df(),spacing=np.asarray([2,2,2]),origin=np.asarray([50,50,50]))
# ### 9. get_sub_neuron
#
# This method returns a sub-neuron in graph format with node coordinates bounded by a bounding box with start and end. If spacing and origin is specified, the units will be converted from spatial to voxel units. If only spacing is specified, origin will be set to (0,0,0).
# +
bounding_box=[[1,2,4],[1,2,3]]
#swc input, no spacing and origin
swc_trace.get_sub_neuron(bounding_box)
# -
#swc input, spacing specified
swc_trace.get_sub_neuron(bounding_box,spacing=np.asarray([2,2,2]))
#swc input, spacing and origin specified
swc_trace.get_sub_neuron(bounding_box,spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))
#s3 input, no spacing and origin
s3_trace.get_sub_neuron(bounding_box)
#s3 input, spacing specified
s3_trace.get_sub_neuron(bounding_box,spacing=np.asarray([2,2,2]))
#s3 input, spacing and origin specified
s3_trace.get_sub_neuron(bounding_box,spacing=np.asarray([2,2,2]),origin=np.asarray([500,500,500]))
# ### 10. ssd
#
# Computes significant spatial distance metric between two traces (for APP1)
# +
pts1 = swc_trace.get_paths()[0][1:10]
pts2 = swc_trace.get_paths()[0][11:20]
NeuronTrace.ssd(pts1,pts2)
# -
| docs/NeuronTrace_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ##ThinkDSP
#
# This notebook contains code examples from Chapter 5: Autocorrelation
#
# Copyright 2015 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/)
# +
from __future__ import print_function, division
import thinkdsp
import thinkplot
import thinkstats2
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
PI2 = np.pi * 2
# %matplotlib inline
# -
# To investigate serial correlation of signals, let's start with a sine wave at 440 Hz.
def make_sine(offset):
signal = thinkdsp.SinSignal(freq=440, offset=offset)
wave = signal.make_wave(duration=0.5, framerate=10000)
return wave
# I'll make two waves with different phase offsets.
# +
wave1 = make_sine(offset=0)
wave2 = make_sine(offset=1)
thinkplot.preplot(2)
wave1.segment(duration=0.01).plot()
wave2.segment(duration=0.01).plot()
thinkplot.config(xlabel='Time (s)', ylim=[-1.05, 1.05])
# -
# The two waves appears correlated: when one is high, the other is usually high, too.
#
# We can use `np.corrcoef` to compute the correlation matrix.
print(np.corrcoef(wave1.ys, wave2.ys))
# The diagonal elements are the correlations of the waves with themselves, which is why they are 1.
# The off-diagonal elements are the correlations between the two waves. In this case, 0.54 indicates that there is a moderate correlation between these waves.
#
# The correlation matrix is more interesting when there are more than two waves. With only two waves, there is really only one number in the matrix we care about.
#
# `thinkdsp.Wave` provides `corr`, which computes the correlation between waves:
wave1.corr(wave2)
# To investigate the relationship between phase offset and correlation, I'll make an interactive function that computes correlation for each offset:
def compute_corr(offset):
wave1 = make_sine(offset=0)
wave2 = make_sine(offset=-offset)
thinkplot.preplot(2)
wave1.segment(duration=0.01).plot()
wave2.segment(duration=0.01).plot()
corr = wave1.corr(wave2)
print('corr =', corr)
thinkplot.config(xlabel='Time (s)', ylim=[-1.05, 1.05])
# The following interaction plots waves with different phase offsets and prints their correlations:
slider = widgets.FloatSlider(min=0, max=PI2, value=1)
interact(compute_corr, offset=slider);
# Finally, we can plot correlation as a function of offset:
# +
offsets = np.linspace(0, PI2, 101)
corrs = []
for offset in offsets:
wave2 = make_sine(offset)
corr = np.corrcoef(wave1.ys, wave2.ys)[0, 1]
corrs.append(corr)
thinkplot.plot(offsets, corrs)
thinkplot.config(xlabel='Offset (radians)',
ylabel='Correlation',
axis=[0, PI2, -1.05, 1.05])
# -
# That curve is a cosine.
#
# Next we'll compute serial correlations for different kinds of noise.
def serial_corr(wave, lag=1):
N = len(wave)
y1 = wave.ys[lag:]
y2 = wave.ys[:N-lag]
corr = np.corrcoef(y1, y2, ddof=0)[0, 1]
return corr
# We expect uncorrelated noise to be... well... uncorrelated.
signal = thinkdsp.UncorrelatedGaussianNoise()
wave = signal.make_wave(duration=0.5, framerate=11025)
serial_corr(wave)
# As expected, the serial correlation is small.
#
# In Brownian noise, each value is the sum of the previous value and a random "step", so we expect a strong serial correlation:
signal = thinkdsp.BrownianNoise()
wave = signal.make_wave(duration=0.5, framerate=11025)
serial_corr(wave)
# In fact, the correlation is near 1.
#
# Since pink noise is between white and Brownian, we expect an intermediate correlation.
signal = thinkdsp.PinkNoise(beta=1)
wave = signal.make_wave(duration=0.5, framerate=11025)
serial_corr(wave)
# And we get one.
#
# Now we can plot serial correlation as a function of the pink noise parameter $\beta$.
# +
np.random.seed(19)
betas = np.linspace(0, 2, 21)
corrs = []
for beta in betas:
signal = thinkdsp.PinkNoise(beta=beta)
wave = signal.make_wave(duration=1.0, framerate=11025)
corr = serial_corr(wave)
corrs.append(corr)
thinkplot.preplot(1)
thinkplot.plot(betas, corrs)
thinkplot.config(xlabel=r'Pink noise parameter, $\beta$',
ylabel='Serial correlation',
ylim=[0, 1.05])
# -
# The autocorrelation function calls `serial_corr` with different values of `lag`.
def autocorr(wave):
"""Computes and plots the autocorrelation function.
wave: Wave
"""
lags = range(len(wave.ys)//2)
corrs = [serial_corr(wave, lag) for lag in lags]
return lags, corrs
# Now we can plot autocorrelation for pink noise with various values of $\beta$.
def plot_pink_autocorr(beta, label):
signal = thinkdsp.PinkNoise(beta=beta)
wave = signal.make_wave(duration=1.0, framerate=10000)
lags, corrs = autocorr(wave)
thinkplot.plot(lags, corrs, label=label)
# +
np.random.seed(19)
thinkplot.preplot(3)
for beta in [1.7, 1.0, 0.3]:
label = r'$\beta$ = %.1f' % beta
plot_pink_autocorr(beta, label)
thinkplot.config(xlabel='Lag',
ylabel='Correlation',
xlim=[-1, 1000],
ylim=[-0.05, 1.05],
legend=True)
# -
# For low values of $\beta$, the autocorrelation function drops off quickly. As $\beta$ increases, pink noise shows more long range dependency.
# Now let's investigate using autocorrelation for pitch tracking. I'll load a recording of someone singing a chirp:
wave = thinkdsp.read_wave('28042__bcjordan__voicedownbew.wav')
wave.normalize()
wave.make_audio()
# The spectrum tells us what frequencies are present, but for chirps, the frequency components are blurred over a range:
spectrum = wave.make_spectrum()
spectrum.plot()
thinkplot.config(xlabel='Frequency (Hz)', ylabel='Amplitude')
# The spectrogram gives a better picture of how the components vary over time:
spectro = wave.make_spectrogram(seg_length=1024)
spectro.plot(high=4200)
thinkplot.config(xlabel='Time (s)',
ylabel='Frequency (Hz)',
xlim=[wave.start, wave.end])
# We can see the fundamental frequency clearly, starting near 500 Hz and dropping. Some of the harmonics are also visible.
#
# To track the fundamental frequency, we can take a short window:
duration = 0.01
segment = wave.segment(start=0.2, duration=duration)
segment.plot()
thinkplot.config(xlabel='Time (s)', ylim=[-1, 1])
spectrum = segment.make_spectrum()
spectrum.plot(high=1000)
thinkplot.config(xlabel='Frequency (Hz)', ylabel='Amplitude')
# The spectrum shows a clear peak near 400 Hz, but we can't get an very accurate estimate of frequency, partly because the peak is blurry, and partly because even if it were a perfect spike, the frequency resolution is not very good.
len(segment), segment.framerate, spectrum.freq_res
# Each element of the spectrum spans a range of 100 Hz, so we can't get an accurate estimate of the fundamental frequency.
#
# For signals that are at least approximately periodic, we can do better by estimating the length of the period.
#
# The following function plots the segment, and a shifted version of the segment, and computes the correlation between them:
# +
def plot_shifted(wave, offset=0.001, start=0.2):
thinkplot.preplot(2)
segment1 = wave.segment(start=start, duration=0.01)
segment1.plot(linewidth=2, alpha=0.8)
# start earlier and then shift times to line up
segment2 = wave.segment(start=start-offset, duration=0.01)
segment2.shift(offset)
segment2.plot(linewidth=2, alpha=0.4)
corr = segment1.corr(segment2)
text = r'$\rho =$ %.2g' % corr
thinkplot.text(segment1.start+0.0005, -0.8, text)
thinkplot.config(xlabel='Time (s)', xlim=[start, start+duration], ylim=[-1, 1])
plot_shifted(wave, 0.0001)
# -
# With a small shift the segments are still moderately correlated. As the shift increases, the correlation falls for a while, then rises again, peaking when the shift equals the period of the signal.
#
# You can use the following interaction to search for the shift that maximizes correlation:
end = 0.004
slider1 = widgets.FloatSlider(min=0, max=end, step=end/40, value=0)
slider2 = widgets.FloatSlider(min=0.1, max=0.5, step=0.05, value=0.2)
interact(plot_shifted, wave=fixed(wave), offset=slider1, start=slider2)
None
# The `autocorr` function automates this process by computing the correlation for each possible lag, up to half the length of the wave.
# The following figure shows this autocorrelation as a function of lag:
wave = thinkdsp.read_wave('28042__bcjordan__voicedownbew.wav')
wave.normalize()
duration = 0.01
segment = wave.segment(start=0.2, duration=duration)
lags, corrs = autocorr(segment)
thinkplot.plot(lags, corrs)
thinkplot.config(xlabel='Lag (index)', ylabel='Correlation', ylim=[-1, 1])
# The first peak (other than 0) is near lag=100.
#
# We can use `argmax` to find the index of that peak:
low, high = 90, 110
lag = np.array(corrs[low:high]).argmax() + low
lag
# We can convert from an index to a time in seconds:
period = lag / segment.framerate
period
# Given the period in seconds, we can compute frequency:
frequency = 1 / period
frequency
# This should be a better estimate of the fundamental frequency. We can approximate the resolution of this estimate by computing how much we would be off by if the index were off by 1:
segment.framerate / 102, segment.framerate / 100
# The range is less than 10 Hz.
#
# The function I wrote to compute autocorrelations is slow; `np.correlate` is much faster.
N = len(segment)
corrs2 = np.correlate(segment.ys, segment.ys, mode='same')
lags = np.arange(-N//2, N//2)
thinkplot.plot(lags, corrs2)
thinkplot.config(xlabel='Lag', ylabel='Correlation', xlim=[-N//2, N//2])
# `np.correlate` computes correlations for positive and negative lags, so lag=0 is in the middle. For our purposes, we only care about positive lags.
#
# Also, `np.correlate` doesn't correct for the fact that the number of overlapping elements changes as the lag increases.
#
# The following code selects the second half of the results and corrects for the length of the overlap:
# +
N = len(corrs2)
lengths = range(N, N//2, -1)
half = corrs2[N//2:].copy()
half /= lengths
half /= half[0]
thinkplot.plot(half)
thinkplot.config(xlabel='Lag', ylabel='Correlation', ylim=[-1.05, 1.05])
# -
# Now the result is similar to what we computed before.
#
# If we plot the results computed by NumPy and my implementation, they are visually similar. They are not quite identical because my version and theirs are normalized differently.
thinkplot.preplot(2)
thinkplot.plot(half)
thinkplot.plot(corrs)
thinkplot.config(xlabel='Lag', ylabel='Correlation', ylim=[-1.05, 1.05])
# The difference between the NumPy implementation and mine is less than 0.02 over most of the range.
diff = corrs - half[:-1]
thinkplot.plot(diff)
thinkplot.config(xlabel='Lag', ylabel='Difference in correlation')
| ThinkDSP-master/code/chap05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Welcome to on road lane detection program
# Programm has image processing pipeline that support both RGB images and BGR video input.
#
# RGB image processing consists of next steps:
# - image file reading. File expected to exist on disk
# - RGB to BGR conversion
# - common image processing pipeline
# - processing result conversion (BGR to RGB)
# - result image output
#
# BGR video processing consists of next steps:
# - video file capturing. File expected to exist on disk
# - video frame processing loop:
# * common image processing pipeline
# * result frame output
# - resources release
#
# Image processing pipeline does:
# - BGR to HSV conversion
# - white and yellow colors filter, creates b/w image. Makes other colored objects black
# - detection area filter. Creates new b/w image output of color filter result
# - edge detection
# - lines detection with help of Hough transform method (cv2.HoughLines tool)
# - lanes detection with help of custom algorithm that does line groups detection and median line
# calculation for each found group
# - lane lines Polar coortinate to Cartesian coordinate conversion, drawing lane lines
# - result image composition from initial frame and detected lane lines images
# - result image returned to program for output/further operations
#
# Colors detection on HSV image allows efficientely get rid of noise coused by shadows and road surface color artifacts. Further b/w image processing might save processor time.
#
# As the next improvement I would extract configuration parametes into separate entity. And would use same instance of it for on-flight configuration/adjustment. It can become an interface for another system :)
#
# Imports, compose full file path fuction
# +
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pandas
import os
def getPathFor(file_path):
# current_directory = %pwd
path = os.path.join(current_directory, file_path)
print("About to open file: {}\n".format(path))
return path
# -
# Next class is responsible for filtering out lanes detection area:
class DetectionAreaFilter:
def __init__(self):
self._lower_yellow = np.array([20, 0, 170], dtype=np.uint8)
self._upper_yellow = np.array([55, 255, 255], dtype=np.uint8)
self._lower_white = np.array([0, 0, 220], dtype=np.uint8)
self._upper_white = np.array([255, 25, 255], dtype=np.uint8)
self._ignore_mask_color = 255
def getColorMask(self, hsv_image):
mask_yellow = cv2.inRange(hsv_image, self._lower_yellow, self._upper_yellow)
mask_white = cv2.inRange(hsv_image, self._lower_white, self._upper_white)
mask = cv2.add(mask_white, mask_yellow)
return mask
def applyDetectionArea(self, bw_image, width_adjustment=60, height_adjustment=65):
im_height = bw_image.shape[0]
im_half_height = im_height // 2
im_width = bw_image.shape[1]
im_half_width = im_width // 2
area_left_bottom = (0, im_height)
area_left_top = (im_half_width - width_adjustment, im_half_height + height_adjustment)
area_right_top = (im_half_width + width_adjustment, im_half_height + height_adjustment)
area_right_bottom = (im_width, im_height)
detection_area = [area_left_bottom, area_left_top, area_right_top, area_right_bottom]
vertices = np.array([detection_area], dtype=np.int32)
mask = np.zeros_like(bw_image)
cv2.fillPoly(mask, vertices, self._ignore_mask_color)
masked_image = cv2.bitwise_and(bw_image, mask)
return masked_image
# Here is the result of getColorMask function, that turns all white and yellow objects into white ones and makes other colored objects black:
#
# 
# And the result of applyDetectionArea function, that creates new b/w image with applyed trapezium shaped mask out of given b/w image:
#
# 
# Then goes Canny edge detection:
def getEdges(image, low_threshold=50, high_threshold=150):
edges = cv2.Canny(image, low_threshold, high_threshold)
return edges
# The result of Canny edge detection:
#
# 
# Lines detetection with help of Hough transform method
def getLaneLines(edges):
deg = np.pi/180
lines = cv2.HoughLines(edges, 1, 1*deg, 40)
if lines is None:
return np.array([])
points_array = list()
for line in lines:
for rho, theta in line:
points_array.append((rho, theta))
return np.array(points_array)
# Result of getLaneLines transformed into lines
#
# 
# Then goes line grop detection, that accepts getLaneLines result as input parameter.
#
# CoordinateSorter class does line groups detection and median line calculation for each found group.
# Influence it's input parameters on 'sort' function behavior could be described with help of Gherkin language:
#
# CoordinateSorter(max_distance_delta, max_angle_delta, threshold)
#
# Scenario: 'max_distance_delta' and 'max_angle_delta' parameters allow to control line group detection
# Given: 5 lines have been given to sort
# And: it is possible to create a chain 'chain_1' of lines line1, line2, line3
# Where: distance between links is less (or equal) then (max_distance_delta, max_angle_delta)
# And: it is possible to create a chain 'chain_2' of lines line4, line5
# Where: distance between links is less (or equal) then (max_distance_delta, max_angle_delta)
# And: distance between chain_1 and chain_2 edges is more than (max_distance_delta, max_angle_delta)
# Then: chain_1 and chain_2 considered as two separate lines
#
# Scenario: 'threshold' parameter allows to filter out noise lines
# Given: threshold = 4, set of lines
# When: sorter found 3 groups of lines
# And: the first set of lines contains 10 lines, second - 5 lines
# But: the third set of lines contains 3 lines
# Then: the third considered as noise and will not be presented in sorting result
#
# Resulting line is calculate as median of all lines in a group
class CoordinateSorter:
def __init__(self, max_distance_delta, max_angle_delta, threshold):
if max_angle_delta < 0:
raise ValueError("[max_angle_delta] must be positive number")
if max_angle_delta < 0:
raise ValueError("[max_angle_delta] must be positive number")
if threshold < 1 or type(threshold) != int:
raise ValueError("[threshold] expected to be integer greater then or equal to 1")
self._max_point_distance = (max_distance_delta, max_angle_delta)
self._min_points_amount = threshold
def _sortPointsByDistance(self, points_dict):
set_list = list()
for key, value in points_dict.items():
indexes_set = set()
set_list.append(indexes_set)
indexes_set.add(key)
for inner_key, inner_value in points_dict.items():
point_distance = abs(np.subtract(value, inner_value))
if point_distance[0] <= self._max_point_distance[0] \
and point_distance[1] <= self._max_point_distance[1]:
indexes_set.add(inner_key)
return set_list
def _splitOnGroups(self, set_list_source):
sorted_source = list(set_list_source)
sorted_source.sort(key=len, reverse=True)
extremums = list()
def find_extremums(ordered_list_of_set_items):
if len(ordered_list_of_set_items) == 0:
return
first_extremum = ordered_list_of_set_items[0]
items_for_further_sorting = list()
for dot_set in ordered_list_of_set_items:
if dot_set.issubset(first_extremum):
continue
else:
if len(first_extremum.intersection(dot_set)):
first_extremum = first_extremum.union(dot_set)
else:
items_for_further_sorting.append(dot_set)
extremums.append(first_extremum)
find_extremums(items_for_further_sorting)
find_extremums(sorted_source)
filtered_extremums = filter(lambda x: len(x) >= self._min_points_amount, extremums)
return filtered_extremums
@staticmethod
def _getMedian(source_dict, key_set):
point_array = [source_dict[item] for item in key_set]
data_frame = pandas.DataFrame(data=point_array, columns=["distance", "angle"])
return data_frame["distance"].median(), data_frame["angle"].median()
def sort(self, points_array):
if len(points_array) < self._min_points_amount:
return []
points_dictionary = dict()
for index, coordinates in enumerate(points_array):
points_dictionary[index] = (int(coordinates[0]), coordinates[1])
point_set_list = self._sortPointsByDistance(points_dictionary)
point_groups = self._splitOnGroups(point_set_list)
resulting_points = [self._getMedian(points_dictionary, point_group) for point_group in point_groups]
return resulting_points
# Result of lines sorting function in Cartesian coordinate system:
#
# 
# Drawing of lines with needed length:
# +
def convert(rho, theta, y_min, y_max):
def create_point(y):
x = (rho - y*np.sin(theta))/np.cos(theta)
return int(x), int(y)
d1 = create_point(y_max)
d2 = create_point(y_min)
return d1, d2
def drawLines(polar_coordinates_array, image, color, line_weight = 10):
y_max = image.shape[0]
y_min = int(y_max * 2 / 3)
lines = [convert(rho, theta, y_min, y_max) for rho, theta in polar_coordinates_array]
for d1, d2 in lines:
cv2.line(image, d1, d2, color, line_weight)
# -
# The result:
#
# 
# The pipeline itself:
class ImageProcessor:
def __init__(self, detection_area_filter, coordinate_sorter):
self._bgr_line_color = (0, 0, 255)
self._detection_area_filter = detection_area_filter
self._coordinate_sorter = coordinate_sorter
def processFrame(self, bgr_frame):
frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2HSV)
bw_color_mask = self._detection_area_filter.getColorMask(frame)
bw_area = self._detection_area_filter.applyDetectionArea(bw_color_mask)
bw_edges = getEdges(bw_area)
polar_lane_coordinates = getLaneLines(bw_edges)
average_polar_lane_coordinates = self._coordinate_sorter.sort(polar_lane_coordinates)
lines_image = np.zeros(bgr_frame.shape, dtype=np.uint8)
drawLines(average_polar_lane_coordinates, lines_image, self._bgr_line_color)
result_image = cv2.addWeighted(lines_image, 0.9, bgr_frame, 1, 0)
return result_image
def _convert_bw_2_color(self, bw_image):
return np.dstack((bw_image, bw_image, bw_image))
# And the result of processFrame:
#
# 
# RGB image processing entry point
def showImage(file_path):
def convert(image):
return image[..., [2, 1, 0]]
image_path = getPathFor(file_path)
rgb_image = mpimg.imread(image_path)
bgr_frame = convert(rgb_image)
frame = img_processor.processFrame(bgr_frame)
rgb_frame = convert(frame)
plt.imshow(rgb_frame)
plt.show()
# Video processing entry point
def playVideo(file_path):
video_path = getPathFor(file_path)
video = cv2.VideoCapture(video_path)
print("About to start video playback...")
while video.isOpened():
_, bgr_frame = video.read()
if not isinstance(bgr_frame, np.ndarray):
# workaround to handle end of video stream.
break
frame = img_processor.processFrame(bgr_frame)
cv2.imshow("output", frame)
key = cv2.waitKey(1) & 0xFF
# stop video on ESC key pressed
if key == 27:
break
print("Video has been closed successfully.")
video.release()
cv2.destroyAllWindows()
# Constants with image/video pathes for testing, pipeline initialization
# +
image1 = "input/test_images/solidWhiteCurve.jpg"
image2 = "input/test_images/solidWhiteRight.jpg"
image3 = "input/test_images/solidYellowCurve.jpg"
image4 = "input/test_images/solidYellowCurve2.jpg"
image5 = "input/test_images/solidYellowLeft.jpg"
image6 = "input/test_images/whiteCarLaneSwitch.jpg"
video1 = "input/test_videos/challenge.mp4"
video2 = "input/test_videos/solidYellowLeft.mp4"
video3 = "input/test_videos/solidWhiteRight.mp4"
detection_area_filter = DetectionAreaFilter()
max_distance_delta = 40 # max distance between lines (rho1 - rho2) in polar coordinate system
max_angle_delta = np.radians(4) # max angle between lines (theta1 - theta2) in polar coordinate system
threshold = 3 # min amount of lines in set filter
coordinate_sorter = CoordinateSorter(max_distance_delta, max_angle_delta, threshold)
img_processor = ImageProcessor(detection_area_filter, coordinate_sorter)
showImage(image4)
#playVideo(video1)
| OnroadLanesDetector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="0RSp8g9aN8If"
# # Data loading
# + [markdown] id="8ku76Lg8NcXz"
# ## 필요 라이브러리 선언
# + id="bJpKCf3YN0Ap" executionInfo={"status": "ok", "timestamp": 1628234288362, "user_tz": -540, "elapsed": 2954, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}}
from sklearn.datasets import make_regression
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# + [markdown] id="GKxXX6chNiNG"
# ## Dummy Data 만들기
# + [markdown] id="Ht9LczPYOKli"
# ### make_regression 함수를 통한 생성
# + id="xw07K87_Ngyn" executionInfo={"status": "ok", "timestamp": 1628234288367, "user_tz": -540, "elapsed": 18, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}}
X, y = make_regression(n_samples=100, n_features=1, bias=10.0, noise=10.0, random_state=2)
# + [markdown] id="k2qq0tHTORRp"
# ### a=2, B=1 인 더미데이터 생성 예시
# + id="L6BO8OkyOLoF" executionInfo={"status": "ok", "timestamp": 1628234288368, "user_tz": -540, "elapsed": 17, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}}
# x_org = [1,2 , 3 , 4 ,5]
# y_org = [3,5 , 7 ,9.1 ,11.1 ]
# X = np.array(x_org)
# y = np.array(y_org)
# X=X.reshape( [5,-1])
# y=y.reshape( [5,-1])
# print(X.shape)
# print(y.shape)
# + [markdown] id="CG7ZxgStQFzo"
# y 값에 차원 추가
# + colab={"base_uri": "https://localhost:8080/"} id="LosPbxPc9L2F" executionInfo={"status": "ok", "timestamp": 1628234288369, "user_tz": -540, "elapsed": 17, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="f0709e6d-6ec3-4bcd-a446-de47891ed688"
y = np.expand_dims(y, axis=1)
y.shape
# + [markdown] id="j_sYa3bMQYZ6"
# ### Data 분석
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="U6YJlUJI8q3j" executionInfo={"status": "ok", "timestamp": 1628234288370, "user_tz": -540, "elapsed": 16, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="5bee3e01-9e57-41ec-b16c-4ee512bbbf02"
print(X.shape)
print(y.shape)
plt.scatter(X, y)
plt.show()
# + [markdown] id="9LUqAh5DOP8u"
# # Regression Example by Keras
# + [markdown] id="QCO5g-VgQjPq"
# ## 훈련데이터와 평가데이터를 나눠주기
# + id="Nc8p1O0eORob" executionInfo={"status": "ok", "timestamp": 1628234288371, "user_tz": -540, "elapsed": 13, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}}
train_x = X[:80]
test_x = X[80:]
train_y = y[:80]
test_y = y[80:]
# + [markdown] id="Lp38oWAsQyXD"
# ## Model 만들기
# + id="y0gu13YI-4Sc" executionInfo={"status": "ok", "timestamp": 1628234288372, "user_tz": -540, "elapsed": 13, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}}
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.SGD(0.01)
model.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
# + [markdown] id="RhsdzK5hQ1Ty"
# ## Model 훈련
# + colab={"base_uri": "https://localhost:8080/"} id="VkhfsosE-rWu" executionInfo={"status": "ok", "timestamp": 1628234291273, "user_tz": -540, "elapsed": 2913, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="011b9e73-b964-4b1f-b8fd-28802cc24850"
model.fit(train_x, train_y, batch_size=10, epochs=100, shuffle=True)
# + colab={"base_uri": "https://localhost:8080/"} id="oyrhRsSu95MG" executionInfo={"status": "ok", "timestamp": 1628234291274, "user_tz": -540, "elapsed": 31, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="500a9ed9-b36f-44eb-a4c4-73054bfecf42"
6.6546e+10
# + [markdown] id="jB2Ofeu6RkuS"
# ### Hidden Layer의 weights 값 보기
# + colab={"base_uri": "https://localhost:8080/"} id="3xRn63l911OE" executionInfo={"status": "ok", "timestamp": 1628234291274, "user_tz": -540, "elapsed": 22, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="af72b888-107b-42e9-ba8a-a7f2e200056e"
# dir(model)
# dir(model.layers[0])
model.layers[0].weights
# + [markdown] id="Q5XhlsTSRsce"
# # 모델 평가
# + [markdown] id="P8eFigEtStkk"
# ### test 데이터의 일부를 활용해 직접 확인해보기
# + [markdown] id="X-oHOTLXSyVR"
# y 값 확인
# + colab={"base_uri": "https://localhost:8080/"} id="6zwoXw4sOuDr" executionInfo={"status": "ok", "timestamp": 1628234291275, "user_tz": -540, "elapsed": 19, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="b1610206-f154-4c84-d778-a6fcd919ec1e"
test_y[0:4]
# + [markdown] id="a6vi_Xb0S26Z"
# x 값 확인
# + colab={"base_uri": "https://localhost:8080/"} id="psSchmyfR3Fk" executionInfo={"status": "ok", "timestamp": 1628234291275, "user_tz": -540, "elapsed": 17, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="1796f2e9-ac3b-47b6-ad18-c4919c6f936b"
test_x[0:4]
# + [markdown] id="dV3IMJvoS6Oz"
# 확인한 x 값을 돌려서 나온 결과물을 위의 y값 확인 한 것과 비교해본다.
# + colab={"base_uri": "https://localhost:8080/"} id="imTokU4LPiVR" executionInfo={"status": "ok", "timestamp": 1628234291276, "user_tz": -540, "elapsed": 16, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="19c92c45-7673-4d03-e2ab-777c3da90b8e"
model.predict(test_x[0:4])
# + [markdown] id="1-rrAeK2TJcs"
# ## 임의의 값으로 확인해보기
# + colab={"base_uri": "https://localhost:8080/"} id="7lpaYzcV7vC3" executionInfo={"status": "ok", "timestamp": 1628234291276, "user_tz": -540, "elapsed": 13, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="dc0dba7c-de03-4d25-bb96-9c63d978505e"
xx=np.array([[9]])
xx.shape
# + colab={"base_uri": "https://localhost:8080/"} id="dorgosEX7_LC" executionInfo={"status": "ok", "timestamp": 1628234291276, "user_tz": -540, "elapsed": 11, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="86b601e4-2608-47ee-e5c9-540297db909f"
model.predict( xx )
# + colab={"base_uri": "https://localhost:8080/"} id="sNK6mpwa7mVr" executionInfo={"status": "ok", "timestamp": 1628234291277, "user_tz": -540, "elapsed": 10, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="a287e963-27d2-444d-ca56-04eccd57c621"
model.predict( np.array([[0]]) )
# + [markdown] id="ZjVkaPhhTxzt"
# ## test 데이터를 활용해 평가하기
# + colab={"base_uri": "https://localhost:8080/"} id="KwXUGBk4PrUh" executionInfo={"status": "ok", "timestamp": 1628234291776, "user_tz": -540, "elapsed": 508, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="55403c08-92a2-45e9-ef0e-a557c82fb2bc"
model.evaluate(test_x, test_y)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="Ac8f7kjuP_RZ" executionInfo={"status": "ok", "timestamp": 1628234291778, "user_tz": -540, "elapsed": 17, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}} outputId="3adcd6f8-54a2-4ee4-9704-4bea55aaddef"
import seaborn as sns
sns.regplot(x=test_x,y=model.predict(test_x),fit_reg=True)
# + id="dzFBLExaShg6" executionInfo={"status": "ok", "timestamp": 1628234291780, "user_tz": -540, "elapsed": 12, "user": {"displayName": "\uae40\uc131\uad6d", "photoUrl": "", "userId": "14454204284131910272"}}
| 02.KerasIntro/00.Keras_simple_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yohanesnuwara/computational-geophysics/blob/master/seismic/dutch_f3_from_opendtect.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="SZmLH-2w7DzC" colab_type="text"
# ## Reference
#
# * To use `PyDrive` to upload **file** from Colab to Drive: [Ask Forum](https://raspberrypi.stackexchange.com/questions/14186/use-pydrive-to-upload-files-to-google-drive-folder/26644#26644)
#
# * To use `PyDrive` to upload **folder** from Colab to Drive: [Tutorial](https://colab.research.google.com/drive/1un9uWAZU0jf2viLnMduYz-tpR0UCNyn9)
# + [markdown] id="bpKTNjTcmpza" colab_type="text"
# Code to open Google Drive data from its ID (URL):<br>
# `wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=FILEID' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=FILEID" -O FILENAME && rm -rf /tmp/cookies.txt`
# + id="CQOFN8UplnBC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 440} outputId="431e348a-6192-4dc8-e40e-0e1e4fd6d247"
# !wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=0B2QlmHOqpoj1LW52dWlCOWpPSkE' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=0B2QlmHOqpoj1LW52dWlCOWpPSkE" -O Dutch_F3.zip && rm -rf /tmp/cookies.txt
# + id="I_U37bGanpzY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="45172113-b5cc-4cb9-a7e1-1ef447e2bfc4"
# !unzip '/content/Dutch_F3.zip' -d '/content/dutch_f3'
# + id="fwkl4GPrpD-L" colab_type="code" colab={}
# !unzip /content/dutch_f3/F3_Demo_2016_training_v6/Rawdata/Well_data/All_wells_RawData.zip -d /content/dutch_f3/wells
# + [markdown] id="23lEIYdrW4LP" colab_type="text"
# ## Authorize Google Drive account
# + id="qVw5qpMhqZXb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 121} outputId="b0526cef-9f46-4201-abee-7b2682a0b2b5"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="aYsyAu5aW8pj" colab_type="text"
# ## Import PyDrive and Authenticate
# + id="0L6-HjIvryo9" colab_type="code" colab={}
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# authenticate
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + [markdown] id="S6wbwpxLXNPp" colab_type="text"
# ## List FID of each folder and file in my Google Drive
# + id="CfBbyDuJqkQe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f0011e62-2fc0-497c-84f1-4e2ed737c62e"
# get the FID of a folder in Gdrive
file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
for file1 in file_list:
print ('title: %s, id: %s' % (file1['title'], file1['id']))
# + [markdown] id="PomV7dSRXVcc" colab_type="text"
# # Export Folder from Colab to Drive
# + id="3lZSWcPfuzgL" colab_type="code" colab={}
# !pip install -U -q PyDrive
from google.colab import files
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
import zipfile
import os
import sys
# + id="OMat07cBXdb7" colab_type="code" colab={}
zipname = 'horizons'
def zipfolder(foldername, target_dir):
zipobj = zipfile.ZipFile(foldername + '.zip', 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for file in files:
fn = os.path.join(base, file)
zipobj.write(fn, fn[rootlen:])
zipfolder(zipname, '/content/dutch_f3/F3_Demo_2016_training_v6/Rawdata/Surface_data')
# + id="3ePtcRqUu16H" colab_type="code" colab={}
# Create & upload a file text file.
fid = '1Ri44QHEDrDm7Bijh0FHK<KEY>'
file1 = drive.CreateFile({"parents": [{"kind": "drive#fileLink", "id": fid}]})
file1.SetContentFile(zipname+".zip")
file1.Upload()
# + [markdown] id="9LrDQwNeYyuu" colab_type="text"
# ## Export File from Colab to Drive
# + id="tf7IS9B3Y30K" colab_type="code" colab={}
filename = 'faults.xyt'
f = drive.CreateFile({'title': filename, 'parents': [{'id': fid}]})
f.SetContentFile('/content/dutch_f3/F3_Demo_2016_training_v6/Rawdata/Faults/FaultA.xyt')
f.Upload()
| seismic/dutch_f3_from_opendtect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Rational Speech Act framework
# Human language depends on the assumption of *cooperativity*, that speakers attempt to provide relevant information to the listener; listeners can use this assumption to reason *pragmatically* about the likely state of the world given the utterance chosen by the speaker.
#
# The Rational Speech Act framework formalizes these ideas using probabiistic decision making and reasoning.
#
# Note: This notebook must be run against Pyro 4392d54a220c328ee356600fb69f82166330d3d6 or later.
# +
#first some imports
import torch
torch.set_default_dtype(torch.float64) # double precision for numerical stability
import collections
import argparse
import matplotlib.pyplot as plt
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from search_inference import factor, HashingMarginal, memoize, Search
# -
# Before we can defined RSA, we specify a helper function that wraps up inference. `Marginal` takes an un-normalized stochastic function, constructs the distribution over execution traces by using `Search`, and constructs the marginal distribution on return values (via `HashingMarginal`).
def Marginal(fn):
return memoize(lambda *args: HashingMarginal(Search(fn).run(*args)))
# The RSA model captures recursive social reasoning -- a listener thinks about a speaker who thinks about a listener....
#
# To start, the `literal_listener` simply imposes that the utterance is true. Mathematically:
# $$P_\text{Lit}(s|u) \propto {\mathcal L}(u,s)P(s)$$
#
# In code:
@Marginal
def literal_listener(utterance):
state = state_prior()
factor("literal_meaning", 0. if meaning(utterance, state) else -999999.)
return state
# Next the cooperative speaker chooses an utterance to convey a given state to the literal listener. Mathematically:
#
# $$P_S(u|s) \propto [P_\text{Lit}(s|u) P(u)]^\alpha$$
#
# In the code below, the `utterance_prior` captures the cost of producing an utterance, while the `pyro.sample` expression captures that the litteral listener guesses the right state (`obs=state` indicates that the sampled value is observed to be the correct `state`).
#
# We use `poutine.scale` to raise the entire execution probability to the power of `alpha` -- this yields a softmax decision rule with optimality parameter `alpha`.
@Marginal
def speaker(state):
alpha = 1.
with poutine.scale(scale=torch.tensor(alpha)):
utterance = utterance_prior()
pyro.sample("listener", literal_listener(utterance), obs=state)
return utterance
# Finally, we can define the pragmatic_listener, who infers which state is likely, given that the speaker chose a given utterance. Mathematically:
#
# $$P_L(s|u) \propto P_S(u|s) P(s)$$
#
# In code:
@Marginal
def pragmatic_listener(utterance):
state = state_prior()
pyro.sample("speaker", speaker(state), obs=utterance)
return state
# Now let's set up a simple world by filling in the priors. We imagine there are 4 objects each either blue or red, and the possible utterances are "none are blue", "some are blue", "all are blue".
#
# We take the prior probabilities for the number of blue objects and the utterance to be uniform.
# +
total_number = 4
def state_prior():
n = pyro.sample("state", dist.Categorical(probs=torch.ones(total_number+1) / total_number+1))
return n
def utterance_prior():
ix = pyro.sample("utt", dist.Categorical(probs=torch.ones(3) / 3))
return ["none","some","all"][ix]
# -
# Finally, the meaning function (notated $\mathcal L$ above):
# +
meanings = {
"none": lambda N: N==0,
"some": lambda N: N>0,
"all": lambda N: N==total_number,
}
def meaning(utterance, state):
return meanings[utterance](state)
# -
# Now let's see if it works: how does the pragmatic listener interpret the "some" utterance?
# +
#silly plotting helper:
def plot_dist(d):
support = d.enumerate_support()
data = [d.log_prob(s).exp().item() for s in d.enumerate_support()]
names = support
ax = plt.subplot(111)
width=0.3
bins = map(lambda x: x-width/2,range(1,len(data)+1))
ax.bar(bins,data,width=width)
ax.set_xticks(map(lambda x: x, range(1,len(data)+1)))
ax.set_xticklabels(names,rotation=45, rotation_mode="anchor", ha="right")
interp_dist = pragmatic_listener("some")
plot_dist(interp_dist)
# -
# Yay, we get a *scalar implicature*: "some" is interpretted as likely not including all 4. Try looking at the `literal_listener` too -- no implicature.
| tutorial/source/RSA-implicature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # <NAME> / <NAME> BORGNE 1SI
# + [markdown] slideshow={"slide_type": "slide"}
# # Wind Charger
#
# ***
#
# ***
# + [markdown] slideshow={"slide_type": "slide"}
# ## 1. Analyse fonctionnelle externe
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ♦ Bête à cornes
#
# <img src="image/bete a corne.png" alt="Bête a cornes" title="Bête a cornes">
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ♦ Diagramme pieuvre
#
# <img src="image/diagramme pieuvre.png" alt="Diagramme pieuvre" title="Diagramme pieuvre">
#
# | | |
# |------|-------------------------------------------------------|
# | FP 1 | recharger un batterie de 5V grâce à l'énergie du vent |
# | FC 1 | être rétractable et facilement transportable |
# | FC 2 | résister au milieu extérieur (pluie, vent trop fort) |
# | FC 3 | avoir un joli design |
#
# ***
# + [markdown] slideshow={"slide_type": "slide"}
# ## 2. Cahier des charges fonctionnel
# + [markdown] slideshow={"slide_type": "subslide"}
# | Fonctions de service | Critères d'appréciation | Niveau d'exigence |
# |-------------------------------------------------------------|--------------------------|------------------------|
# | FS 1: recharger un batterie de 5V grâce à l'énergie du vent | tension stable | 5V |
# | | durée de charge | 1h30-2h |
# | FS 2: être rétractable et facilement transportable | taille | maximum 30cm |
# | FS 3: résister au milieu extérieur (pluie, vent trop fort) | durée dans le temps | ne pas tomber/rouiller |
# | FS 4: avoir un joli design | formes / couleurs | attirer le regard |
#
# ***
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3. Analyse fonctionnelle interne
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ♦ Chaîne d'énergie et d'information
#
# <img src="image/chaine.png" alt="Chaîne d'énergie et d'information" title="Chaîne d'énergie et d'information">
#
# ***
# + [markdown] slideshow={"slide_type": "slide"}
# ## 4. Modélisation et schéma cinématique
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ♦ Schéma cinématique
#
# <img src="image/schema cinematique.JPG" alt="schéma cinématique" title="schéma cinématique">
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ♦ Modélisation
#
# <img src="image/image eolienne1.png" alt="modélisation éolienne" title="modélisation éolienne">
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src="image/image eolienne2.png" alt="modélisation éolienne" title="modélisation éolienne">
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src="image/image eolienne3.png" alt="modélisation éolienne" title="modélisation éolienne">
#
# ***
#
# ***
# -
| wind charger nils emilien/Wind Charger.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + hideCode=false
# This is a cell to hide all Code cells that has the string "hide_this_cell" from displaying
# This code must be at first and last cell to work properly
from IPython.display import HTML, display
hide_this_cell = ''
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show) {
$('div.input').each(function(id) {
el = $(this).find('.cm-variable:first');
if (id == 0 || el.text() == 'hide_this_cell') {
$(this).hide();
}
});
$('div.output_prompt').css('opacity', 0);
} else {
$('div.input').each(function(id) {
$(this).show();
});
$('div.output_prompt').css('opacity', 1);
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()">
<input style="opacity:0.2" type="submit" value="Show/hide the code running under the hood">
</form>''')
# +
# This cell is used for importing essential libraries that is not used to understand Qiskit and is therefore hidden
hide_this_cell
from IPython.display import clear_output, Markdown
from ipywidgets import widgets
# -
# # Knapper
# + [markdown] hideCode=false hideOutput=false hidePrompt=false
# ## Første knap
# Den nedenstående celle med kode har 2 knapper i alt.<br>
# De fungere i en cyklus, hvor man kan "vise" og "skjule" det, man gemmer bag knappen.
# + hideCode=false hidePrompt=false hide_input=true
hide_this_cell
buttonShow = widgets.Button(description='Show answer', tooltip='Click me to show the answer')
buttonHide = widgets.Button(description='Hide answer', tooltip='Click me to hide the answer')
def handleClickShow(b):
clear_output()
display(buttonHide)
display(Markdown('*Her er markdown* - Markdown med LaTeX $\phi$'))
display(Markdown('<center> Her har vi LaTeX centreret $\phi$ med tekst</center>'))
def handleClickHide(b):
clear_output()
display(buttonShow)
buttonShow.on_click(handleClickShow)
buttonHide.on_click(handleClickHide)
display(buttonShow)
# -
# ---
# ## <NAME>
# Den nedenstående celle med kode har én knap <br>
# Ønsker man, at knappen skal forsvinde i forlængelse med, at man viser det skulte content, vil denne knap gøre tricket.
# + tags=["tagz"]
hide_this_cell
tooltiptext = "The button will disappear when pressed and consequently reveals the answer"
buttonDisappear = widgets.Button(description='Reveal the answer', tooltip=tooltiptext, disabled=False)
def handleClickDisappear(b):
clear_output()
display(Markdown('*Her er markdown* - Markdown med LaTeX $\phi$'))
display(Markdown('<center> Her har vi LaTeX centreret $\phi$ med tekst</center>'))
print("Money In The Bank")
buttonDisappear.on_click(handleClickDisappear)
display(buttonDisappear)
# -
# ---
# ## Mulighed for flere input senere
# +
# This is the same cell as seen at the beginning to hide code snippets from displaying
# This must be the last cell in the jupyter notebook!
hide_this_cell = ''
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show) {
$('div.input').each(function(id) {
el = $(this).find('.cm-variable:first');
if (id == 0 || el.text() == 'hide_this_cell') {
$(this).hide();
}
});
$('div.output_prompt').css('opacity', 0);
} else {
$('div.input').each(function(id) {
$(this).show();
});
$('div.output_prompt').css('opacity', 1);
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>''');
# -
| Boilerplates/Buttons.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multi-armedBandits - Chapter 2
# ## A k-armed Bandit Problem
# Bandit problem is a simplified setting for reinforcement learning in which you are faced repeatedly with a choice among k different options (actions).After each choice you receive a numerical reward chosen from a stationary probability distribution that depends on the action you selected. Your objective is to maximize the expected total
# reward over some time period, for example, over 1000 action selections, or time steps.
#
# In our k-armed bandit problem, each of the k actions has an expected or mean reward given that
# that action is selected; let us call this the value of that action. We denote the action selected on time
# step t as At , and the corresponding reward as Rt . The value then of an arbitrary **action a**, denoted
# q∗ (a), is the **expected reward** given that a is selected:
# $$q_{*}(a) = E \{ \ R_t \ | \ A_t=a \} $$
# If you knew the value of each action, then it would be trivial to solve the k-armed bandit problem: you
# would always select the action with highest value. We assume that you do not know the action values
# with certainty, although you may have estimates. We denote the **estimated value** of action a at time
# step t as ** $Q_t(a)$ **. We would like $Q_t(a)$ to be close to $q_∗$(a).
#
# If you maintain estimates of the action values, then at any time step there is at least one action whose
# estimated value is greatest. We call these the **greedy actions**. When you select one of these actions,
# we say that you are **exploiting** your current knowledge of the values of the actions. If instead you
# select one of the nongreedy actions, then we say you are **exploring**, because this enables you to improve
# your estimate of the nongreedy action’s value.
#
# **Exploitation** $\to$ Maximize the expected reward on the **one step**.
#
# **Exploration** $\to$ Produce the greater total reward in the **long run**.
#
# Whether it is better to explore or exploit depends in a complex way on the precise values of the estimates, uncertainties, and the number of remaining steps.
# +
import numpy as np
np.warnings.filterwarnings('ignore')
from enum import Enum
class EstimationMethod(Enum):
"""This is an enum for representing different methods of estimating the value of each action"""
SAMPLE_AVERAGE_INCREMENTAL = 1
CONSTANT_STEP_SIZE = 2
GRADIENT_BANDIT = 3
class ActionSelectionMethod(Enum):
"""This is an enum representing different methods of selecting the action"""
EPSILON_GREEDY = 1
UPPER_CONFIDENCE_BOUND = 2
GRADIENT_BANDIT = 3
class Bandit:
"""This class represents a Bandit problem
k-armed bandit problem is a problem in reinforcement learning with simplified setting. You are
faced repeatedly with a choice among k different options, or actions. After each choice you
receive a numerical reward chosen from a stationary probability distribution that depends
on the action you selected.
Attributes:
arms (int) : number of arms of the problem (k)
estimation_method (EstimationMethod) : the method of estimating the action value
action_selection_method (ActionSelectionMethod) : the method of selecting next action
c (float) : it is the coefficient of UCB (c > 0)
epsilon (float) : it is the probability of acting randomly in epsilon-greedy algorithm
alpha (float) : is the step size in gradient abd constant step size algorithm
random_walk (float) : is the standard derivation of normal distribution for adding to Q*
start_equal (bool) : a boolean representing whether all q* start in equal values (Q*(a)=0)
initial_value_estimation (float) : initial value for actions estimation value
q_star_mean (float) : normal distribution with given mean for Q*(a)
gradient_baseline (bool) : whether the gradient has baseline or not
time (int) : time step which we are in
action_probability (np,array) : the probability of choosing each action (gradient algorithm)
average_reward (float) : average reward until the current time
q_estimations (np.array) : estimation of values for each action
action_counts (np.array) : number of times which each action have been chosen
q_true_values (np.array) : the true value of each action (Q*(a))
"""
def __init__(self, arms=10, estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
epsilon=0.1,gradient_baseline=False,alpha=0.1, start_equal=False, random_walk=0,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY, c=2,
initial_value_estimation=0.0, start_time=0,q_star_mean=0):
# parameters
self.arms = arms
self.estimation_method = estimation_method
self.action_selection_method = action_selection_method
self.c = c
self.epsilon = epsilon
self.alpha = alpha
self.random_walk = random_walk
self.start_equal = start_equal
self.initial_value_estimation = initial_value_estimation
self.start_time = start_time
self.q_star_mean = q_star_mean
self.gradient_baseline = gradient_baseline
# environment
self.time = self.start_time
self.action_probability = np.zeros(self.arms)
self.action_probability.fill(1)
self.average_reward = 0
self.q_estimations = np.zeros(self.arms)
self.q_estimations.fill(self.initial_value_estimation)
self.action_counts = np.zeros(self.arms)
self.q_true_values = np.zeros(self.arms)
if not self.start_equal:
self.q_true_values = np.random.normal(self.q_star_mean, 1, self.arms)
def start_new_environment(self):
"""reset all the environment parameters"""
self.time = self.start_time
self.action_probability = np.zeros(self.arms)
self.action_probability.fill(1)
self.average_reward = 0
self.q_estimations = np.zeros(self.arms)
self.q_estimations.fill(self.initial_value_estimation)
self.action_counts = np.zeros(self.arms)
self.q_true_values = np.zeros(self.arms)
if not self.start_equal:
self.q_true_values = np.random.normal(self.q_star_mean, 1, self.arms)
def select_action(self):
"""select one of the action according to action selection method"""
if self.action_selection_method == ActionSelectionMethod.EPSILON_GREEDY:
if np.random.random_sample() < self.epsilon:
# select one of the actions randomly
return np.random.choice(np.arange(self.arms))
else:
# argmax by break tie randomly
return np.random.choice(np.flatnonzero(self.q_estimations==self.q_estimations.max()))
elif self.action_selection_method == ActionSelectionMethod.UPPER_CONFIDENCE_BOUND:
ucb_value = self.q_estimations + \
self.c * np.sqrt(np.log(self.time) / self.action_counts)
return np.argmax(ucb_value)
elif self.action_selection_method == ActionSelectionMethod.GRADIENT_BANDIT:
exp_h = np.exp(self.q_estimations)
self.action_probability = exp_h / exp_h.sum()
return np.random.choice(np.arange(self.arms), p=self.action_probability)
def step(self, action):
"""simulate one step and return the reward"""
reward = np.random.normal(self.q_true_values[action], scale=1.0)
self.time += 1
self.average_reward = (self.average_reward * (self.time - 1) + reward) / self.time
self.action_counts[action] += 1
if self.random_walk != 0:
mu, sigma = 0, self.random_walk # mean and standard deviation
self.q_true_values += np.random.normal(mu, sigma, self.arms)
if self.estimation_method == EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL:
self.q_estimations[action]+=(reward-self.q_estimations[action])/self.action_counts[action]
elif self.estimation_method == EstimationMethod.CONSTANT_STEP_SIZE:
self.q_estimations[action] += self.alpha * (reward - self.q_estimations[action])
elif self.estimation_method == EstimationMethod.GRADIENT_BANDIT:
if self.gradient_baseline:
baseline = self.average_reward
else:
baseline = 0
one = np.zeros(self.arms)
one[action] = 1
self.q_estimations -= self.alpha*(reward-baseline)*(self.action_probability-one)
return reward
def simulate(self, runs=2000, time_steps=1000):
"""simulate for given number of runs with the given number of time steps"""
best_action_counts = np.zeros((runs, time_steps))
rewards = np.zeros((runs, time_steps))
for r in range(runs):
self.start_new_environment()
for t in range(time_steps):
action = self.select_action()
reward = self.step(action)
rewards[r, t] = reward
if action == np.argmax(self.q_true_values):
# it was the best action
best_action_counts[r, t] = 1
# mean of all rewards and best action of all runs count in a particular time step
return best_action_counts.mean(axis=0), rewards.mean(axis=0)
# -
# ## Action-value Methods
# We begin by looking more closely at some simple methods for estimating the values of actions and for using the estimates to make action selection decisions. Recall that the true value of an action is the mean reward when that action is selected. One natural way to estimate this is by **averaging the rewards** actually received:
#
# $$ Q_t(a) = \frac{sum \ of \ rewards\ when \ a \ taken \ prior \ to \ t}{number \ of \ times \ a \ taken \ prior \ to \ t} = \frac{\sum_{i=1}^{t-1}R_i \ . \ 1_{A_i=a}}{\sum_{i=1}^{t-1} 1_{A_i=a}}$$
#
# *$1_{predicate}$ denotes the random variable that is 1 if predicate is true and 0 if it is not.*
#
# We call this the **sample-average** method for **estimating action values**. Of course this is just one way to estimate action values, and not necessarily the best one.
#
# The simplest action selection rule is to select **one of the actions** with the **highest estimated value**,
# that is, one of the greedy actions as defined in the previous section. If there is **more than one greedy
# action**, then a selection is made among them in some arbitrary way, perhaps **randomly**. We write this
# greedy action selection method as:
#
# $$A_t = argmax \ Q_t(a)$$
#
# A simple alternative is to behave **greedily most of the time**, but every once in a while, say with small
# **probability ε**, instead **select randomly from among all the actions** with equal probability, independently
# of the action-value estimates. We call methods using this near-greedy action selection rule **ε-greedy**
# methods.
#
# **Exercise 2.1 In $\epsilon$-greedy action selection, for the case of two actions and $\epsilon$ = 0.5, what is
# the probability that the greedy action is selected?**
#
# 0.5 (there is a $\epsilon$ probablity of choosing greedy) + 0.5 (there is a 1-$\epsilon$ probability of choosing randomly ) * 0.5 (choosing one of the action is 1/2) = 0.75
#
#
# ## The 10-armed Testbed
#
# +
import matplotlib.pyplot as plt
import numpy as np
#Figure 2.1
# %matplotlib notebook
#a sample from normal distribution with mean zero and unit variance for each action
q_star_value = np.random.randn(10)
# a mean q_star_value, unit variance normal distribution
rewards = np.random.randn(500,10) + q_star_value
plt.violinplot(dataset=rewards , showmeans=True)
plt.xlabel("Action")
plt.ylabel("Reward distribution")
# +
import matplotlib.pyplot as plt
#Figure 2.2
epsilons = [0, 0.1, 0.01]
epsilon_greedy_bandits = [Bandit(arms=10,
estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=eps)
for eps in epsilons]
epsilon_greedy_best_action, epsilon_greedy_rewards = [], []
for bandit in epsilon_greedy_bandits:
result = bandit.simulate(2000, 1000)
epsilon_greedy_best_action.append(result[0])
epsilon_greedy_rewards.append(result[1])
# %matplotlib inline
plt.figure()
plt.subplot(2, 1, 1)
for eps, rewards in zip(epsilons, epsilon_greedy_rewards):
plt.plot(rewards, label='epsilon = {}'.format(eps))
plt.xlabel('steps')
plt.ylabel('average reward')
plt.legend()
plt.subplot(2, 1, 2)
for eps, counts in zip(epsilons, epsilon_greedy_best_action):
plt.plot(counts * 100, label='epsilon = {}'.format(eps))
plt.xlabel('steps')
plt.ylabel('% optimal action')
plt.legend()
# -
# **Exercise 2.2: Bandit example Consider a k-armed bandit problem with k = 4 actions,
# denoted 1, 2, 3, and 4. Consider applying to this problem a bandit algorithm using
# $\epsilon$-greedy action selection, sample-average action-value estimates, and initial estimates
# of Q1 (a) = 0, for all a. Suppose the initial sequence of actions and rewards is A1 = 1,
# R1 = 1, A2 = 2, R2 = 1, A3 = 2, R3 = 2, A4 = 2, R4 = 2, A5 = 3, R5 = 0. On some
# of these time steps the " case may have occurred, causing an action to be selected at
# random. On which time steps did this definitely occur? On which time steps could this
# possibly have occurred?**
#
# after one time step : $A_1 = 1 , R_1 = -1 \to Q_2(1) = \frac{-1}{1} = -1 , Q_2(2) = 0 , Q_2(3) = 0 , Q_2(4) = 0$ (this choice of action could be both random and greedy)
#
# after two time step : $A_2 = 2 , R_2 = 1 \to Q_3(1) = -1 , Q_3(2) = \frac{1}{1} = 1 , Q_3(3) = 0, Q_3(4)=0$ (this choice of action could be both random and greedy)
#
# after three time step : $A_3 = 2 , R_3 = 2 \to Q_4(1) = -1 , Q_4(2) = \frac{1+2}{1+1} = 1.5 , Q_4(3) = 0 , Q_4(4) = 0$(this choice of action could be both random and greedy)
#
# after four time step : $A_4 = 2 , R_4 = 2 \to Q_5(1) = -1 , Q_5(2) = \frac{1+2+2}{1+1+1} = \frac{5}{3} , Q_5(3) = 0, Q_5(4) = 0$(this choice of action could be both random and greedy)
#
# after five time step : $A_5 = 3 , R_5 = 0 \to Q_6(1) = -1 , Q_6(2) = \frac{5}{3} , Q_6(3)=0,Q_6(4)=0$ (this one definitely was chosen randomly)
#
# **Exercise 2.3 In the comparison shown in Figure 2.2, which method will perform best in
# the long run in terms of cumulative reward and probability of selecting the best action?
# How much better will it be? Express your answer quantitatively.**
#
# plotting both the cumlative average reward and the probility of slecting the best action we see that in both case strategiy with exploration will do better however smaller epsilon like 0.01 will improve slowly but eventually would perform better than epsilon=0.1. For 1000 steps the epsilon=0.1 startegy obtain a cumulative total reward of about 1300 while the greedy strategy obtains a total reward of about 1000 or an improvement of 30%.
#
#
# ## Incremental Implementation
# The action-value methods we have discussed so far all estimate action values as sample
# averages of observed rewards. We now turn to the question of how these averages can be
# computed in a computationally **efficient** manner, in particular, with **constant memory**
# and constant per-time-step computation.
#
# $Q_n \ = \ \frac{R_1 \ + R_2 \ + \ ... \ + \ R_{n-1}}{n-1}$
#
#
# $Q_{n+1} \ = \ \frac{1}{n} \ \sum_{i=1}^{n} \ R_i$
#
#
# $ = \ \frac{1}{n} \ (R_n \ + \ \sum_{i=1}^{n-1} \ R_i )$
#
#
# $ = \ \frac{1}{n} \ ( \ R_n \ + \ (n-1) \ \frac{1}{n-1} \ \sum_{i=1}^{n-1} \ R_i \ )$
#
#
# $ = \ \frac{1}{n} \ ( \ R_n \ + \ (n-1) \ Q_n \ )$
#
#
# $ = \ \frac{1}{n} \ ( \ R_n \ + \ n Q_n \ - \ Q_n \ )$
#
#
# $ = \ Q_n \ + \ \frac{1}{n} \ [ \ R_n \ - \ Q_n \ ]$
#
# **NewEstimate <- OldEstimate + StepSize \[ Target - OldEstimatei \]**
#
# ## Tracking a Nonstationary Problem
# We someties face problems which are not stationary (which means **rewards probability change** over time) so it is a good idea to give **more weight** to **recent rewards** than the past rewards. One of ways to do so is by using a **constant step-size** parameter.
#
#
# $ Q_{n+1} \ = \ Q_n \ + \ \alpha \ [ \ R_n \ - \ Q_n \ ]$
#
# $ = \ \alpha \ R_n \ + \ (1-\alpha) \ Q_n$
#
# $ = \ \alpha \ R_n \ + \ (1-\alpha) \ [ \ \alpha \ R_{n-1} \ + \ (1-\alpha) \ Q_{n-1} \ ]$
#
# $ = \alpha \ R_n \ + \ (1-\alpha) \ \alpha \ R_{n-1} \ + \ (1-\alpha)^{2} \ Q_{n-1}$
#
# $ = \alpha \ R_n \ + \ (1-\alpha) \ \alpha \ R_{n-1} \ + \ (1-\alpha)^2 \ \alpha \ R_{n-2} \ + \ ... \ + \ (1-\alpha)^{n-1} \ \alpha \ R_1 \ + (1-\alpha)^n \ Q_1$
#
# $ = \ (1-\alpha)^n \ Q_1 \ + \sum_{i=1}^{n} \ \alpha \ (1-\alpha)^{n-i} \ R_i$
#
# **exponential recency-weighted average** $\to$ *weight decays exponentially according to the exponent on* $1-\alpha$
#
#
# **for ensuring convergence wih probability 1 : **
#
# $$ \sum_{n=1}^{\infty} \ \alpha_{n}(a) \ = \ \infty \ \ and \ \ \sum_{n=1}^{\infty} \ \alpha_{n}^{2}(a) \ < \ \infty$$
#
# **first conditon** $\to$ *steps are large enough to eventually overcome any initial conditions*
#
# **second conditon** $\to$ *eventually the steps become small enough to assure convergence.*
#
# $\alpha_n(a) \ = \ \frac{1}{n}$ (sample average) $\to$ converges
#
# $\alpha_n(a) \ = \ \alpha$ (constan step-size) $\to$ never completely converge but continue to vary in response to the most recently received rewards and this is a good thing for nonstationary problems
#
# **Exercise 2.4 If the step-size parameters, $\alpha_n$ , are not constant, then the estimate $Q_n$ is
# a weighted average of previously received rewards with a weighting different from that
# given by (2.6). What is the weighting on each prior reward for the general case, analogous
# to (2.6), in terms of the sequence of step-size parameters?**
#
# $Q_{n+1} \ = \ Q_n \ + \ \alpha_n \ [ \ R_n \ - \ Q_n \ ]$
#
# $ = \ \alpha_n \ R_n \ + \ (1-\alpha_n) \ Q_n$
#
# $ = \ \alpha_n \ R_n \ + \ (1-\alpha_n) \ [ \ \alpha_{n-1} \ R_{n-1} \ + \ (1-\alpha_{n-1}) \ Q_{n-1} \ ]$
#
# $ = \ \alpha_n \ R_n \ + \ (1-\alpha_n) \ \alpha_{n-1} \ R_{n-1} \ + \ (1-\alpha_n) \ (1-\alpha_{n-1}) \ Q_{n-1}$
#
# $ = \ \alpha_n \ R_n \ + \ (1-\alpha_n) \ \alpha_{n-1} \ R_{n-1} \ + \ (1-\alpha_n) \ (1-\alpha_{n-1}) \ \alpha_{n-2} \ R_{n-2} \ + \ ... \ + \ (1-\alpha_n) \ (1-\alpha_{n-1}) \ ... \ (1-\alpha_1) \ Q_1$
#
# $ = \ \prod_{i=1}^{n} \ (1-\alpha_i) \ Q_1 + \ \sum_{i=1}^{n} \ \alpha_i \ \prod_{j=i+1}^{n} \ (1-\alpha_j) \ R_i$
#
# **Exercise 2.5 (programming) Design and conduct an experiment to demonstrate the
# difficulties that sample-average methods have for nonstationary problems. Use a modified
# version of the 10-armed testbed in which all the $q_*$(a) start out equal and then take
# independent random walks (say by adding a normally distributed increment with mean
# zero and standard deviation 0.01 to all the $q_*$(a) on each step). Prepare plots like
# Figure 2.2 for an action-value method using sample averages, incrementally computed,
# and another action-value method using a constant step-size parameter, $\alpha$ = 0.1. Use $\epsilon$ = 0.1 and longer runs, say of 10,000 steps.**
#
# +
import matplotlib.pyplot as plt
#Exercise 2.5
sample_average_bandit = Bandit(arms=10,
estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=0.1,start_equal=True, random_walk=0.01)
constant_step_size_bandit = Bandit(arms=10, estimation_method=EstimationMethod.CONSTANT_STEP_SIZE,
alpha=0.1,random_walk=0.01,epsilon=0.1, start_equal=True,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY)
epsilon_greedy_best_action, epsilon_greedy_rewards = [], []
sample_average_result = sample_average_bandit.simulate(2000, 10000)
sample_average_best_action = sample_average_result[0]
sample_average_rewards = sample_average_result[1]
constant_step_size_result = constant_step_size_bandit.simulate(2000, 10000)
constant_step_size_best_action = constant_step_size_result[0]
constant_step_size_rewards = constant_step_size_result[1]
# %matplotlib inline
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(sample_average_rewards, label='epsilon = {}'.format(0.1))
plt.plot(constant_step_size_rewards, label='epsilon = {}, alpha = {}'.format(0.1, 0.1))
plt.xlabel('steps')
plt.ylabel('average reward')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(sample_average_best_action * 100, label='epsilon = {}'.format(0.1))
plt.plot(constant_step_size_best_action * 100, label='epsilon = {}, alpha = {}'.format(0.1, 0.1))
plt.xlabel('steps')
plt.ylabel('% optimal action')
plt.legend()
# -
# ## Optimistic Initial Values
# Initial action values can be used as a simple way to encourage **exploration**. Suppose that instead of setting the initial values to zero, we set them all to 5 then because it is really **optimistic** ($q_*(a)$ is a normal distribution with mean 0 and variance 1) then the reward will be leass than the starting estimates so the agent will switch to other actions and will explore more actions, but it is **not well suited** to **nonstationary** problems because its drive for exploration is inherently temporary.
#
#
# +
import matplotlib.pyplot as plt
#Figure 2.3
optimistic_initial_value_bandit =Bandit(arms=10,
estimation_method=EstimationMethod.CONSTANT_STEP_SIZE,
alpha=0.1,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=0,initial_value_estimation=5.0)
realistic_initial_value_bandit =Bandit(arms=10,
estimation_method=EstimationMethod.CONSTANT_STEP_SIZE,
alpha=0.1,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=0.1,initial_value_estimation=0)
optimistic_result = optimistic_initial_value_bandit.simulate(2000, 1000)
optimistic_best_action = optimistic_result[0]
realistic_result = realistic_initial_value_bandit.simulate(2000, 1000)
realistic_best_action = realistic_result[0]
# %matplotlib inline
plt.plot(optimistic_best_action * 100, label='Q1 = {} , epsilon = {}'.format(5, 0))
plt.plot(realistic_best_action * 100, label='Q1 = {} , epsilon = {}'.format(0, 0.1))
plt.xlabel('steps')
plt.ylabel('% optimal action')
plt.legend()
# -
# **Exercise 2.6: Mysterious Spikes The results shown in Figure 2.3 should be quite reliable
# because they are averages over 2000 individual, randomly chosen 10-armed bandit tasks.
# Why, then, are there oscillations and spikes in the early part of the curve for the optimistic
# method? In other words, what might make this method perform particularly better or
# worse, on average, on particular early steps?**
#
# If the initial action selected when using the optimistic method are by chance ones of the better
# choices then the action value estimates Q(a) for these plays will be magnified resulting in an
# emphasis to continue playing this action. This results in large actions values being received
# on the initial draws and consequently very good initial play. In the same way, if the algorithm
# initially selects poor plays then initially the algorithm will perform poorly resulting in very
# poor initial play.
#
# **Exercise 2.7: Unbiased Constant-Step-Size Trick In most of this chapter we have used
# sample averages to estimate action values because sample averages do not produce the
# initial bias that constant step sizes do (see the analysis leading to (2.6)). However, sample
# averages are not a completely satisfactory solution because they may perform poorly
# on nonstationary problems. Is it possible to avoid the bias of constant step sizes while
# retaining their advantages on nonstationary problems? One way is to use a step size of**
#
#
# ## Upper-Confidence-Bound Action Selection
#
# +
import matplotlib.pyplot as plt
#Figure 2.4
ucb_bandit = Bandit(arms=10, estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
action_selection_method=ActionSelectionMethod.UPPER_CONFIDENCE_BOUND,
c=2, start_time=1)
epsilon_greedy_bandit = Bandit(arms=10,
estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=0.1,
start_time=1)
ucb_result = ucb_bandit.simulate(2000, 1000)
ucb_rewards = ucb_result[1]
epsilon_greedy_result = epsilon_greedy_bandit.simulate(2000, 1000)
epsilon_greedy_rewards = epsilon_greedy_result[1]
# %matplotlib inline
plt.plot(ucb_rewards, label='c = {}'.format(2))
plt.plot(epsilon_greedy_rewards, label='epsilon = {}'.format(0.1))
plt.xlabel('steps')
plt.ylabel('average reward')
plt.legend()
# -
# ## Gradient Bandit Algorithms
#
# +
import matplotlib.pyplot as plt
#Figure 2.5
gradient_small_alpha_base_bandit=Bandit(arms=10,
estimation_method=EstimationMethod.GRADIENT_BANDIT,
alpha=0.1,gradient_baseline=True, q_star_mean=4,
action_selection_method=ActionSelectionMethod.GRADIENT_BANDIT)
gradient_small_alpha_base_best_action=gradient_small_alpha_base_bandit.simulate(2000, 1000)[0]
gradient_big_alpha_base_bandit=Bandit(arms=10,
estimation_method=EstimationMethod.GRADIENT_BANDIT,
alpha=0.4,gradient_baseline=True, q_star_mean=4,
action_selection_method=ActionSelectionMethod.GRADIENT_BANDIT)
gradient_big_alpha_base_best_action=gradient_big_alpha_base_bandit.simulate(2000, 1000)[0]
gradient_small_alpha_bandit=Bandit(arms=10, estimation_method=EstimationMethod.GRADIENT_BANDIT,
alpha=0.1, gradient_baseline=False, q_star_mean=4,
action_selection_method=ActionSelectionMethod.GRADIENT_BANDIT)
gradient_small_alpha_best_action=gradient_small_alpha_bandit.simulate(2000,1000)[0]
gradient_big_alpha_bandit = Bandit(arms=10, estimation_method=EstimationMethod.GRADIENT_BANDIT,
alpha=0.4, gradient_baseline=False, q_star_mean=4,
action_selection_method=ActionSelectionMethod.GRADIENT_BANDIT)
gradient_big_alpha_best_action = gradient_big_alpha_bandit.simulate(2000, 1000)[0]
# %matplotlib inline
plt.plot(gradient_small_alpha_base_best_action * 100,label='with baseline , alpha = {}'.format(0.1))
plt.plot(gradient_big_alpha_base_best_action * 100,label='with baseline , alpha = {}'.format(0.4))
plt.plot(gradient_small_alpha_best_action * 100, label='without baseline , alpha = {}'.format(0.1))
plt.plot(gradient_big_alpha_best_action * 100, label='without baseline , alpha = {}'.format(0.4))
plt.xlabel('steps')
plt.ylabel('% optimal action')
plt.legend()
# -
# ## Associative Search (Contextual Bandits)
#
# ## Summary
#
# +
import matplotlib.pyplot as plt
#Figure 2.6
epsilon_greedy_bandits = [Bandit(action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=2**power,
estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL)
for power in range(-7, -1)]
average_rewards_epsilon_greedy = []
for bandit in epsilon_greedy_bandits:
reward = bandit.simulate(2000,1000)[1].mean()
average_rewards_epsilon_greedy.append(reward)
gradient_bandit_bandits = [Bandit(action_selection_method=ActionSelectionMethod.GRADIENT_BANDIT,
alpha=2**power,
estimation_method=EstimationMethod.GRADIENT_BANDIT,
gradient_baseline=True) for power in range(-5, 3)]
average_rewards_gradient = []
for bandit in gradient_bandit_bandits:
reward = bandit.simulate(2000, 1000)[1].mean()
average_rewards_gradient.append(reward)
ucb_bandits = [Bandit(estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
action_selection_method=ActionSelectionMethod.UPPER_CONFIDENCE_BOUND,
c=2**power,start_time=1) for power in range(-4, 3)]
average_rewards_ucb = []
for bandit in ucb_bandits:
reward = bandit.simulate(2000, 1000)[1].mean()
average_rewards_ucb.append(reward)
optimistic_bandits = [Bandit(arms=10, estimation_method=EstimationMethod.CONSTANT_STEP_SIZE,
alpha=0.1,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=0,
initial_value_estimation=2**power) for power in range(-2, 3)]
average_rewards_optimistic = []
for bandit in optimistic_bandits:
reward = bandit.simulate(2000, 1000)[1].mean()
average_rewards_optimistic.append(reward)
# %matplotlib inline
plt.xticks(range(-7, 3), ('1/128', '1/64', '1/32', '1/16', '1/8', '1/4', '1/2', '1', '2', '4'))
plt.plot(range(-7, -1), average_rewards_epsilon_greedy, label='epsilon-greedy parameter:epsilon')
plt.plot(range(-5, 3), average_rewards_gradient, label='gradient bandit parameter:alpha')
plt.plot(range(-4, 3), average_rewards_ucb, label='UCB parameter:c')
plt.plot(range(-2, 3), average_rewards_optimistic,
label='greedy with optimistic initialization α = 0.1parameter:Q1')
plt.xlabel('Parameter')
plt.ylabel('Average reward')
plt.legend()
# -
# **Exercise 2.11 (programming) Make a figure analogous to Figure 2.6 for the nonstationary
# case outlined in Exercise 2.5. Include the constant-step-size $\epsilon$-greedy algorithm with
# $\alpha$ = 0.1. Use runs of 200,000 steps and, as a performance measure for each algorithm and
# parameter setting, use the average reward over the last 100,000 steps.**
#
# +
import matplotlib.pyplot as plt
#excercise 2.11
runs=500
times=20000
epsilon_greedy_bandits = [
Bandit(action_selection_method=ActionSelectionMethod.EPSILON_GREEDY, epsilon=2 ** power,
estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
start_equal=True, random_walk=0.01)
for power in range(-7, -1)]
average_rewards_epsilon_greedy = []
for bandit in epsilon_greedy_bandits:
reward = bandit.simulate(runs, times)[1][int(times / 2):].mean()
average_rewards_epsilon_greedy.append(reward)
gradient_bandit_bandits = [
Bandit(action_selection_method=ActionSelectionMethod.GRADIENT_BANDIT, alpha=2 ** power,
estimation_method=EstimationMethod.GRADIENT_BANDIT, gradient_baseline=True,
start_equal=True,random_walk=0.01) for power in range(-5, 3)]
average_rewards_gradient = []
for bandit in gradient_bandit_bandits:
reward = bandit.simulate(runs, times)[1][int(times / 2):].mean()
average_rewards_gradient.append(reward)
ucb_bandits = [Bandit(estimation_method=EstimationMethod.SAMPLE_AVERAGE_INCREMENTAL,
action_selection_method=ActionSelectionMethod.UPPER_CONFIDENCE_BOUND,
c=2 ** power,
start_equal=True, random_walk=0.01, start_time=1) for power in range(-4, 3)]
average_rewards_ucb = []
for bandit in ucb_bandits:
reward = bandit.simulate(runs, times)[1][int(times / 2):].mean()
average_rewards_ucb.append(reward)
optimistic_bandits = [Bandit(arms=10, estimation_method=EstimationMethod.CONSTANT_STEP_SIZE,
alpha=0.1,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=0,
start_equal=True, random_walk=0.01,
initial_value_estimation=2 ** power) for power in range(-2, 3)]
average_rewards_optimistic = []
for bandit in optimistic_bandits:
reward = bandit.simulate(runs, times)[1][int(times / 2):].mean()
average_rewards_optimistic.append(reward)
constant_step_size_bandits = [Bandit(arms=10,
estimation_method=EstimationMethod.CONSTANT_STEP_SIZE,
alpha=0.1,
random_walk=0.01,
action_selection_method=ActionSelectionMethod.EPSILON_GREEDY,
epsilon=2**power, start_equal=True) for power in range(-7, -1)]
average_rewards_step_size = []
for bandit in constant_step_size_bandits:
reward = bandit.simulate(runs, times)[1][int(times / 2):].mean()
average_rewards_step_size.append(reward)
# %matplotlib inline
plt.xticks(range(-7, 3), ('1/128', '1/64', '1/32', '1/16', '1/8', '1/4', '1/2', '1', '2', '4'))
plt.plot(range(-7, -1), average_rewards_epsilon_greedy, label='epsilon-greedy parameter:epsilon')
plt.plot(range(-7, -1), average_rewards_step_size, label='constant alpha=0.1 parameter:epsilon')
plt.plot(range(-5, 3), average_rewards_gradient, label='gradient bandit parameter:alpha')
plt.plot(range(-4, 3), average_rewards_ucb, label='UCB parameter:c')
plt.plot(range(-2, 3), average_rewards_optimistic,
label='greedy with optimistic initialization α = 0.1 parameter:Q1')
plt.xlabel('Parameter')
plt.ylabel('Average reward over 10000 time step')
plt.legend()
| Chapter 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
import re
# %matplotlib inline
rcParams['figure.figsize'] = 10,8
# -
df_train = pd.read_csv("input/train.csv")
df_test = pd.read_csv("input/test.csv")
df_train.head(10)
print (df_train.info())
print (df_test.info())
df_train.describe()
df_train['Name'].head()
df_train['Title'] = df_train.Name.apply(lambda x: re.search(' ([A-Z][a-z]+)\.', x).group(1))
df_test['Title'] = df_test.Name.apply(lambda x: re.search(' ([A-Z][a-z]+)\.', x).group(1))
# +
plt.figure(figsize=(12,5))
#Plotting the result
sns.countplot(x='Title', data=df_train, palette="hls")
plt.xlabel("Title", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.title("Title Count", fontsize=20)
plt.xticks(rotation=45)
plt.show()
# +
Title_Dictionary = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Dr": "Officer",
"Rev": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"the Countess":"Royalty",
"Dona": "Royalty",
"Lady" : "Royalty",
"Mme": "Mrs",
"Ms": "Mrs",
"Mrs" : "Mrs",
"Mlle": "Miss",
"Miss" : "Miss",
"Mr" : "Mr",
"Master" : "Master"
}
# we map each title to correct category
df_train['Title'] = df_train.Title.map(Title_Dictionary)
df_test['Title'] = df_test.Title.map(Title_Dictionary)
# +
print("Chances to survive based on titles: ")
print(df_train.groupby("Title")["Survived"].mean())
plt.figure(figsize=(12,5))
#Plotting the results
sns.countplot(x='Title', data=df_train, palette="hls", hue="Survived")
plt.xlabel("Titles", fontsize=16)
plt.ylabel("Count", fontsize=16)
plt.title("Title Grouped Count", fontsize=20)
plt.xticks(rotation=45)
plt.show()
# +
#First I will look my distribuition without NaN's
#I will create a df to look distribuition
age_high_zero_died = df_train[(df_train["Age"] > 0) &
(df_train["Survived"] == 0)]
age_high_zero_surv = df_train[(df_train["Age"] > 0) &
(df_train["Survived"] == 1)]
plt.figure(figsize=(10,5))
sns.distplot(age_high_zero_surv["Age"], bins=24, color='g')
sns.distplot(age_high_zero_died["Age"], bins=24, color='r')
plt.title("Distribuition and density by Age",fontsize=20)
plt.xlabel("Age",fontsize=15)
plt.ylabel("Distribuition Died and Survived",fontsize=15)
plt.show()
# +
#Let's group the median age by sex, pclass and title, to have any idea and maybe input in Age NAN's
age_group = df_train.groupby(["Sex","Pclass","Title"])["Age"]
print(age_group.median())
# -
print(df_train["Age"].isnull().sum())
# +
df_train.loc[df_train.Age.isnull(), 'Age'] = df_train.groupby(['Sex','Pclass','Title']).Age.transform('median')
print(df_train["Age"].isnull().sum())
# -
plt.figure(figsize=(12,5))
sns.distplot(df_train["Age"], bins=24)
plt.title("Distribuition and density by Age")
plt.xlabel("Age")
plt.show()
# +
plt.figure(figsize=(12,5))
g = sns.FacetGrid(df_train, col='Survived', height=5)
g = g.map(sns.distplot, "Age")
plt.show()
# +
interval = (0, 5, 12, 18, 25, 35, 60, 120)
cats = ['babies', 'Children', 'Teen', 'Student', 'Young', 'Adult', 'Senior']
df_train["Age_cat"] = pd.cut(df_train.Age, interval, labels=cats)
df_test["Age_cat"] = pd.cut(df_test.Age, interval, labels=cats)
print df_train["Age_cat"].head()
print df_test["Age_cat"].head()
# -
#Describe of categorical Age
print(pd.crosstab(df_train.Age_cat, df_train.Survived))
plt.figure(figsize=(12,5))
#Plotting the result
sns.countplot("Age_cat",data=df_train, hue="Survived", palette="hls")
plt.xlabel("Categories names", fontsize=15)
plt.xlabel("Count", fontsize=15)
plt.title("Age Distribution ", fontsize=20)
plt.show()
# +
#Looking the Fare distribuition to survivors and not survivors
plt.figure(figsize=(12,5))
sns.distplot(df_train[df_train.Survived == 0]["Fare"],
bins=50, color='r')
sns.distplot(df_train[df_train.Survived == 1]["Fare"],
bins=50, color='g')
plt.title("Fare Distribuition by Survived", fontsize=20)
plt.xlabel("Fare", fontsize=15)
plt.ylabel("Density",fontsize=15)
plt.show()
# +
#Filling the NA's with -0.5
df_train.Fare = df_train.Fare.fillna(-0.5)
#intervals to categorize
quant = (-1, 0, 8, 15, 31, 600)
#Labels without input values
label_quants = ['NoInf', 'quart_1', 'quart_2', 'quart_3', 'quart_4']
#doing the cut in fare and puting in a new column
df_train["Fare_cat"] = pd.cut(df_train.Fare, quant, labels=label_quants)
#Description of transformation
print(pd.crosstab(df_train.Fare_cat, df_train.Survived))
plt.figure(figsize=(12,5))
#Plotting the new feature
sns.countplot(x="Fare_cat", hue="Survived", data=df_train, palette="hls")
plt.title("Count of survived x Fare expending",fontsize=20)
plt.xlabel("Fare Cat",fontsize=15)
plt.ylabel("Count",fontsize=15)
plt.show()
# +
# Replicate the same to df_test
df_test.Fare = df_test.Fare.fillna(-0.5)
quant = (-1, 0, 8, 15, 31, 1000)
label_quants = ['NoInf', 'quart_1', 'quart_2', 'quart_3', 'quart_4']
df_test["Fare_cat"] = pd.cut(df_test.Fare, quant, labels=label_quants)
# +
#Now lets drop the variable Fare, Age and ticket that is irrelevant now
del df_train["Fare"]
del df_train["Ticket"]
del df_train["Age"]
del df_train["Cabin"]
del df_train["Name"]
#same in df_test
del df_test["Fare"]
del df_test["Ticket"]
del df_test["Age"]
del df_test["Cabin"]
del df_test["Name"]
# -
df_train.head()
# +
# Let see how many people die or survived
print("Total of Survived or not: ")
print(df_train.groupby("Survived")["PassengerId"].count())
plt.figure(figsize=(12,5))
sns.countplot(x="Survived", data=df_train, palette="hls")
plt.title('Total Distribuition by survived or not')
plt.xlabel('Target Distribuition')
plt.ylabel('Count')
plt.show()
# +
print(pd.crosstab(df_train.Survived, df_train.Sex))
plt.figure(figsize=(12,5))
sns.countplot(x="Sex", data=df_train, hue="Survived",palette="hls")
plt.title('Sex Distribuition by survived or not', fontsize=20)
plt.xlabel('Sex Distribuition',fontsize=15)
plt.ylabel('Count', fontsize=15)
plt.show()
# +
# Distribuition by class
print(pd.crosstab(df_train.Pclass, df_train.Embarked))
plt.figure(figsize=(12,5))
sns.countplot(x="Embarked", data=df_train, hue="Pclass",palette="hls")
plt.title('Embarked x Pclass Count', fontsize=20)
plt.xlabel('Embarked with PClass',fontsize=15)
plt.ylabel('Count', fontsize=15)
plt.show()
# -
#lets input the NA's with the highest frequency
df_train["Embarked"] = df_train["Embarked"].fillna('S')
# +
# Exploring Survivors vs Embarked
print(pd.crosstab(df_train.Survived, df_train.Embarked))
plt.figure(figsize=(12,5))
sns.countplot(x="Embarked", data=df_train, hue="Survived",palette="hls")
plt.title('Class Distribuition by survived or not',fontsize=20)
plt.xlabel('Embarked',fontsize=15)
plt.ylabel('Count', fontsize=15)
plt.show()
# +
# Exploring Survivors vs Pclass
print(pd.crosstab(df_train.Survived, df_train.Pclass))
plt.figure(figsize=(12,5))
sns.countplot(x="Pclass", data=df_train, hue="Survived",palette="hls")
plt.xlabel('PClass',fontsize=15)
plt.ylabel('Count', fontsize=15)
plt.title('Class Distribuition by Survived or not', fontsize=20)
plt.show()
# +
g = sns.catplot(x="SibSp",y="Survived",data=df_train,kind="bar", height = 5, aspect= 1.6, palette = "hls")
g.set_ylabels("Probability(Survive)", fontsize=15)
g.set_xlabels("SibSp Number", fontsize=15)
plt.show()
# -
# Explore Parch feature vs Survived
g = sns.catplot(x="Parch", y="Survived", data=df_train, kind="bar", height = 6, palette = "hls")
g = g.set_ylabels("survival probability")
# +
#Create a new column and sum the Parch + SibSp + 1 that refers the people self
df_train["FSize"] = df_train["Parch"] + df_train["SibSp"] + 1
df_test["FSize"] = df_test["Parch"] + df_test["SibSp"] + 1
# -
print(pd.crosstab(df_train.FSize, df_train.Survived))
sns.catplot(x="FSize",y="Survived", data=df_train, kind="bar", height=6, aspect=1.6)
plt.show()
# +
del df_train["SibSp"]
del df_train["Parch"]
del df_test["SibSp"]
del df_test["Parch"]
# -
df_train.head()
# +
df_train = pd.get_dummies(df_train, columns=["Sex","Embarked","Age_cat","Fare_cat","Title"],\
prefix=["Sex","Emb","Age","Fare","Prefix"], drop_first=True)
df_test = pd.get_dummies(df_test, columns=["Sex","Embarked","Age_cat","Fare_cat","Title"],\
prefix=["Sex","Emb","Age","Fare","Prefix"], drop_first=True)
# -
#Finallt, lets look the correlation of df_train
plt.figure(figsize=(15,12))
plt.title('Correlation of Features for Train Set')
sns.heatmap(df_train.astype(float).corr(),vmax=1.0, annot=True)
plt.show()
df_train.shape
df_train.head()
# +
train = df_train.drop(["Survived","PassengerId"],axis=1)
train_ = df_train["Survived"]
test_ = df_test.drop(["PassengerId"],axis=1)
X_train = train.values
y_train = train_.values
X_test = test_.values
X_test = X_test.astype(np.float64, copy=False)
# -
X_train[5]
X_test[5]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
X_train[5]
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
import keras
from keras.optimizers import SGD
# +
# Creating the model
model = Sequential()
# Inputing the first layer with input dimensions
model.add(Dense(15, activation='relu', input_dim=20, kernel_initializer='uniform'))
# Adding an Dropout layer to prevent from overfitting
model.add(Dropout(0.3))
#adding second hidden layer
model.add(Dense(15, kernel_initializer='uniform', activation='tanh'))
# Adding another Dropout layer
model.add(Dropout(0.3))
# #adding third hidden layer
# model.add(Dense(128, kernel_initializer='uniform', activation='relu'))
# # Adding another Dropout layer
# model.add(Dropout(0.50))
# adding the output layer that is binary [0,1]
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
#With such a scalar sigmoid output on a binary classification problem, the loss
#function you should use is binary_crossentropy
#Visualizing the model
model.summary()
# +
#Creating an Stochastic Gradient Descent
sgd = SGD(lr = 0.01, momentum = 0.9)
# Compiling our model
model.compile(optimizer = sgd, loss = 'binary_crossentropy', metrics = ['accuracy'])
#optimizers list
#optimizers['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
# Fitting the ANN to the Training set
model.fit(X_train, y_train, batch_size = 10, epochs = 60, verbose=1, validation_split = 0.2)
# -
scores = model.evaluate(X_train, y_train, batch_size=30)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# +
# Fit the model
history = model.fit(X_train, y_train, validation_split=0.20,
epochs=60, batch_size=5, verbose=1)
# list all data in history
print(history.history.keys())
# -
# summarizing historical accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
import pyspark
| Codes/Titanic/Titanic Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
from arcgis.gis import GIS
from arcgis.geometry import Point, distance
from dotenv import load_dotenv
from tqdm.notebook import tqdm
sys.path.append('../')
from water_reach_tools import Reach
from water_reach_tools.water_reach_tools import ReachLineFeatureLayer, ReachPointFeatureLayer
# %load_ext autoreload
# %autoreload 2
# -
load_dotenv('../.env')
gis = GIS(
url=os.getenv('ARCGIS_URL', 'https://arcgis.com'),
username=os.getenv('ARCGIS_USERNAME', None),
password=os.getenv('ARCGIS_PASSWORD', None)
)
gis
# +
line_lyr_id=os.getenv('REACH_LINE_ID')
centroid_lyr_id=os.getenv('REACH_CENTROID_ID')
line_lyr = ReachLineFeatureLayer.from_item_id(gis, line_lyr_id)
centroid_lyr = ReachPointFeatureLayer.from_item_id(gis, centroid_lyr_id)
# -
reach_id_lst = line_lyr.get_unique_values('reach_id', 'gauge_id IS NOT NULL')
len(reach_id_lst)
def update_reach_stage(rid):
rch = Reach.get_from_aw(rid)
line_lyr.update_attributes_only(rch)
centroid_lyr.update_attributes_only(rch)
update_reach_stage(reach_id_lst[0])
from dask.distributed import Client
import dask.bag
import dask.config
dask.config.set(scheduler='threads')
client = Client()
client
db = dask.bag.from_sequence(reach_id_lst).map(update_reach_stage)
db
db.map(update_reach_stage)
fs = line_lyr.query_by_reach_id(reach_id_lst[50])
fs
df = fs.sdf
df
from arcgis.mapping import WebMap
WebMap()
| notebooks/reach_length-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Градиентный спуск и его модификации
# ## Задача #1: Базовый градиентный спуск
# Реализуйте градиентный спуск для функции
# $$
# f(x)=e^{ax}+e^{-bx}+c(x-d)^2
# $$
# при $a, b, c>0$.
import numpy as np
from typing import List, Union
def gradient_descent_scalar(
x_0: float,
a: float,
b: float,
c: float,
d: float,
iters: int,
alpha: Union[float, List[float]]) -> List[float]:
"""
Применяет градиентный спуск с указанными начальным приближением x_0 и шагами alpha
к функции exp(ax)+exp(-bx)+c(x-d)^2
Args:
x_0: начальная точка, с которой начинать спуск
a, b, c, d: параметры функции
iters: количество итераций градиентного спуска
alpha: либо скаляр, обозначающий постоянный размер шага,
либо список размера iters с указанием какой размер использовать
на каждом шаге
Returns:
последовательность приближений [x_0, x_1, x_2, ..., x_iters]
"""
pass
import matplotlib.pyplot as plt
a, b, c, d = 1, 2, 3, 2
estimates = gradient_descent_scalar(0, a, b, c, d, 50, 0.05)
f = lambda x: np.exp(a * x) + np.exp(-b * x) + c * (x - d) ** 2
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
ax.plot([i for i in range(len(estimates))], [f(x) for x in estimates])
plt.close(fig)
fig
# ## Задача #2: Градиентный спуск и метод Чебышёва для решение симметричных линейных систем
# Реализуйте градиентный спуск и метод Чебышёва для минимизации
# $$
# f(x)=\frac{1}{2}x^TAx-b^tx
# $$
def gradient_descent_quadratic(x_0: np.ndarray,
A: np.ndarray,
b: np.ndarray,
m: float,
M: float,
iters: int) -> List[np.ndarray]:
"""
Применяет градиентный спуск к квадратичной функции, заданной A, b
с начальным приближением x_0. Все собственные числа A лежат на отрезке
[m: M], m > 0.
Args:
x_0: ndarray размером (n)
A: ndarray размером (n, n)
b: ndarray размером (n)
m, M: границы спектра A
iters: количество итераций спуска
Returns:
последовательность [x_0, x_1, ..., x_iters]
"""
pass
def chebyshev_descent(x_0: np.ndarray,
A: np.ndarray,
b: np.ndarray,
m: float,
M: float,
iters: int) -> List[np.ndarray]:
"""
Применяет метод Чебышёва к квадратичной функции, заданной A, b
с начальным приближением x_0. Все собственные числа A лежат на отрезке
[m: M], m > 0.
Args:
x_0: ndarray размером (n)
A: ndarray размером (n, n)
b: ndarray размером (n)
m, M: границы спектра A
iters: количество итераций спуска
Returns:
последовательность [x_0, x_1, ..., x_iters]
"""
pass
def show_chebyshev_and_gradient():
A = np.random.rand(5, 5)
A = A @ A.T
b = np.random.rand(5)
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
eig, v = np.linalg.eigh(A)
m, M = eig[0], eig[-1]
x = np.zeros_like(b)
iters = 500
estimates_basic = gradient_descent_quadratic(x, A, b, m, M, iters)
estimates_chebyshev = chebyshev_descent(x, A, b, m, M, iters)
axs.plot([i for i in range(len(estimates_basic))], [np.linalg.norm(A @ x - b) for x in estimates_basic], label='Basic')
axs.plot([i for i in range(len(estimates_chebyshev))], [np.linalg.norm(A @ x - b) for x in estimates_chebyshev], label='Chebyshev')
axs.legend()
axs.set_ylabel(r'|\Ax_k-b|', fontsize=20)
axs.set_xlabel(r'k', fontsize=20)
axs.set_yscale('log')
plt.close(fig)
return fig
show_chebyshev_and_gradient()
# ## Задача #3: полиномиальная регрессия
# Дан набор точек $\{x_i, y_i\}_{i=1}^m$, нужно найти такой полином $P$ степени $k$, что величина
# $$
# \sum_{i=1}^m(P(x_i)-y_i)^2
# $$
# минимальна среди всех многочленов степени $k$.
def polynomial_regression(x: np.ndarray,
y: np.ndarray,
k: int) -> np.ndarray:
"""
Вычисляет наиболее подходящий точкам x, y многочлен степени k
Args:
x, y: ndarray размером (m)
k: степень многочлена
Returns:
ndarray размером (k) соответствующий коэффициентам минимального
в порядке от младшего к старшему
"""
pass
y = [0]
num = 8
for i in range(num - 1):
y.append(y[-1] + 2 * np.random.rand() - 1)
y = np.stack(y)
x = np.array(list(range(-(num // 2), num - (num // 2))))
coeffs = polynomial_regression(x, y, num - 1)
r = np.arange(x[0], x[-1] + 0.1, 0.1)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(x, y, color='black')
ax.plot(r, [sum([a * t ** i for i, a in enumerate(coeffs)]) for t in r])
plt.close(fig)
fig
| optimization_course/practice/03_gradient_descent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab4: Statistical Learning in Python
#
# Outline:
# 1. Data Pre-procesing: pandas
# 2. statsmodels and sklearn
# # Problem Statement
#
# Estimate the **probability of <NAME> scoring a triple shot** in any given game as a function of other predictors such as period and position.
#
# # 1. Loading data
import numpy as np
# import module
import helper_basketball as h
import imp
imp.reload(h);
# +
params = {'PlayerID':'201939',
'PlayerPosition':'',
'Season':'2016-17',
'ContextMeasure':'FGA',
'DateFrom':'',
'DateTo':'',
'GameID':'',
'GameSegment':'',
'LastNGames':'0',
'LeagueID':'00',
'Location':'',
'Month':'0',
'OpponentTeamID':'0',
'Outcome':'',
'Period':'0',
'Position':'',
'RookieYear':'',
'SeasonSegment':'',
'SeasonType':'Regular Season',
'TeamID':'0',
'VsConference':'',
'VsDivision':''}
shotdata = h.get_nba_data('shotchartdetail', params)
shotdata.head()
# -
# # 2. Data Pre-processing
#
# Our task is first to obtain the total number of attempted and scored shots in any given game.
# See dtype of each column
shotdata.dtypes
# Unique values of column of interest
shotdata["EVENT_TYPE"].unique()
shotdata["SHOT_ZONE_AREA"].unique()
shotdata["SHOT_TYPE"].unique()
shotdata["SHOT_ZONE_RANGE"].unique()
shotdata["GAME_DATE"].unique()
shotdata["SHOT_ATTEMPTED_FLAG"].unique()
shotdata["SHOT_MADE_FLAG"].unique()
train_data = shotdata.query('SHOT_TYPE=="3PT Field Goal"') # Only 3 pointers made
train_data
# # 3. Logistic regression
#
# We assume that the total number of scored shots are the realized value of a Binomial experiment where:
#
#
# - no. of trials: the total number of triple shots attempted.
#
# - no. of successes: total number of triple shots scored.
#
# - $p_{i}$ is the probability of scoring a triple in any given game (which is our parameter of interest).
#
# ## 3.1 `statsmodels` package
import statsmodels.api as sm
import statsmodels.formula.api as smf
# +
# sm.GLM?
# -
# Fitting models using R-style formulas:
# See: http://www.statsmodels.org/dev/example_formulas.html
fitted_model1 = smf.glm(formula = 'SHOT_MADE_FLAG ~ LOC_X + LOC_Y + C(PERIOD) + C(SHOT_ZONE_AREA)',
data=train_data,
family=sm.families.Binomial()).fit()
# See results
print(fitted_model1.summary())
# ## 3.2 `scilearn` package
from patsy import dmatrices # For constructing design matrices from R-types of formulae
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
# ### 3.2.1 Prepare data for logistic regression
# create dataframes with an intercept column and dummy variables
y, X = dmatrices('SHOT_MADE_FLAG ~ LOC_X + LOC_Y + C(PERIOD) + C(SHOT_ZONE_AREA)',
train_data, return_type="dataframe")
# flatten y into a 1-D array
y = np.ravel(y)
y
# ### 3.2.2 Train and test data
# +
from sklearn.model_selection import train_test_split
# train_test_split?
# -
X_train, X_test, y_train, y_test = train_test_split(X, # Predictors
y, # response
test_size=0.3, # % of test data
random_state=123) # seed for random sampling
# ### 3.3.3 Model fitting
# +
# LogisticRegression?
# -
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# https://stackoverflow.com/questions/26319259/sci-kit-and-regression-summary
#
# > There exists no `R` type regression summary report in sklearn. The main reason is that sklearn is used for predictive modelling / machine learning and the evaluation criteria are based on performance on previously unseen data (such as predictive r^2 for regression).
# For a more classic statistical approach, take a look at statsmodels.
# ### 3.3.4 Predicting the test set results and calculating the accuracy
y_pred = logreg.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'
.format(logreg.score(X_test, y_test)))
# ### 3.3.5 Cross Validation
#
# Cross validation attempts to avoid overfitting while still producing a prediction for each observation dataset. We are using 10-fold Cross-Validation to train our Logistic Regression model.
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
kfold = model_selection.KFold(n_splits=10, random_state=7) # 10 fold CV
modelCV = LogisticRegression()
scoring = 'accuracy'
results = model_selection.cross_val_score(modelCV, X_train, y_train, cv=kfold, scoring=scoring)
print("10-fold cross validation average accuracy: %.3f" % (results.mean()))
# ### 3.3.6 Confusion Matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
# ### 3.3.7 Compute precision, recall, F-measure and support
#
# To quote from Scikit Learn:
#
# - The precision is the ratio tp / (tp + fp) where tp is the number of true positives and fp the number of false positives. The precision is intuitively the ability of the classifier to not label a sample as positive if it is negative.
#
# - The recall is the ratio tp / (tp + fn) where tp is the number of true positives and fn the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples.
#
# - The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0.
#
# - The F-beta score weights the recall more than the precision by a factor of beta. beta = 1.0 means recall and precision are equally important.
#
# - The support is the number of occurrences of each class in y_test.
#
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
# ### 3.3.8 ROC curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
# ## Other ML methods
#
# - `KNeighborsClassifier`
# - `DecisionTreeClassifier(max_depth=5)`
# - `RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)`
# - `QuadraticDiscriminantAnalysis()`
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# ### Example: Random Forest
clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1).fit(X_train, y_train)
score = clf.score(X_test, y_test)
logit_roc_auc = roc_auc_score(y_test, clf.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, clf.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Random Forest (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
# # 4. Conclusions
Pr_pred = clf.predict_proba(X_test)
Pr_pred = Pr_pred[:,1] # Probability of scoring a 3pt
Pr_pred
plt.figure(figsize=(12,11))
plt.scatter(X_test.LOC_X, X_test.LOC_Y,c=Pr_pred)
h.draw_court(outer_lines=True)
plt.colorbar()
plt.xlim(300,-300)
plt.ylim(-100,500)
plt.show()
| Labs/Lab4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
drive_path = 'c:/'
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
# %matplotlib inline
from scipy.stats import ks_2samp
from scipy.stats import anderson_ksamp
from scipy.stats import kruskal
from scipy.stats import variation
from scipy import signal as sps
import seaborn as sns
import glob
import re
# # Detrended peaks
# +
# (pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\160330_3\\44dtnew.txt'))
# badfile=(pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\160330_3\\44dt.txt'))
# rightfile=badfile.iloc[:,1:16]
# rightfile
# rightfile.to_csv('44dtnew.txt')
# -
date='160525'
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s'%date)
#This piece spits out all the peaks from one session in one dataframe
peakdf=pd.DataFrame([])
# date='160626_2'
# os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\%s'%date)
for filename in glob.glob('*dt.txt'):
f=pd.read_csv(filename,nrows=175)
df=f[[col for col in f.columns if 'G PMT' in col]]
peak=[]
for col in df.columns:
a=df[col]
firsta=1;
firstb=24;
#Figures out if there is a min or max and sees if it passes threshold (3SD)
if np.absolute(min(a[26:80]))>np.absolute(max(a[26:80])) and np.absolute(min(a[26:80]))>=3*np.std(df[col][firsta:firstb]):
b=min(a[26:80])
peak.append(b)
elif np.absolute(max(a[26:80]))>np.absolute(min(a[26:80]))and np.absolute(max(a[26:80]))>=3*np.std(df[col][firsta:firstb]):
b=max(a[26:80])
peak.append(b)
else:
b=0
peak.append(b)
peaks=pd.DataFrame(peak).T
peaks.columns=df.columns
peaks=pd.concat([pd.DataFrame({'Trial':[int(filename.split('dt')[0])]}),peaks],axis=1)
peakdf=peakdf.append(peaks,ignore_index=True)
# peakdf.to_csv('%s_peaks.csv'%date,index=False)
peakdf
trials=pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow=trials.loc[trials['File']==date]
odortrials={}
for t in filerow.Odor.unique():
y={t:[int(x) for x in filerow.loc[filerow['Odor'] == t][['T1','T2','T3','T4']].values.tolist()[0]]}
odortrials.update(y)
odortrials
#Get average peak across all trials using peakdf dataframe
meandf=pd.DataFrame([])
for key in odortrials:
odor=odortrials[key]
mean=[]
for col in peakdf.loc[peakdf['Trial'].isin(odor)][[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(peakdf.loc[peakdf['Trial'].isin(odor)][col].mean())
mean=pd.DataFrame(mean).T
mean.columns=peakdf.loc[peakdf['Trial'].isin(odor)][[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meandf=meandf.append(mean)
meandf=meandf.reset_index(drop=True)
meandf.columns=[str(col)+'_'+date for col in meandf.columns]
meandf=pd.concat([pd.DataFrame({'Odor':odortrials.keys()}),meandf],axis=1)
# meandf.to_csv('%s_mean.csv'%date,index=False)
meandf
#Get proportion of successful trials
successdf=pd.DataFrame([])
for key in odortrials:
odor=odortrials[key]
newdf=peakdf.loc[peakdf['Trial'].isin(odor)]
s=[]
for col in peakdf.loc[peakdf['Trial'].isin(odor)][[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
s.append(np.divide((newdf.loc[:,col]!=0).sum(),float(len(newdf.loc[:,col]))))
s=pd.DataFrame(s).T
s.columns=peakdf.loc[peakdf['Trial'].isin(odor)][[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
successdf=successdf.append(s)
successdf=successdf.reset_index(drop=True)
successdf.columns=[str(col)+'_'+date for col in successdf.columns]
successdf=pd.concat([pd.DataFrame({'Odor':odortrials.keys()}),successdf],axis=1)
successdf.to_csv('%s_success.csv'%date,index=False)
# # Detrended integral
# +
# # date='160330_3'
# # os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\%s'%date)
# #example dataframe
# for filename in
# df=pd.read_csv('1dt.txt')
# df=df[[col for col in df.columns if 'G PMT' in col]]
# -
temp=pd.DataFrame([])
date='160330_3'
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\%s'%date)
#Pull the trials that correspond to specific date/odors
trials=pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow=trials.loc[trials['File']==date]
odortrials={}
for t in trials.Odor.unique():
y={t:[int(x) for x in filerow.loc[filerow['Odor'] == t][['T1','T2','T3','T4']].values.tolist()[0]]}
odortrials.update(y)
#Get the frame rate for a specified date
num=trials.File.unique().tolist().index('%s'%date)
fr=trials.loc[trials['File']==trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
#Get the integral
intdf=pd.DataFrame([])
for filename in glob.glob('*dt.txt'):
f=pd.read_csv(filename,nrows=125)
df=f[[col for col in f.columns if 'G PMT' in col]]
winstart=np.int(4*fr)
winend=np.int(12*fr)
integral=[]
for col in df.columns:
a=df[col]
firsta=1;
firstb=24;
#Figures out if there is a min or max and sees if it passes threshold (3SD)
if np.absolute(min(a[26:80]))>np.absolute(max(a[26:80])) and np.absolute(min(a[26:80]))>=3*np.std(df[col][firsta:firstb]):
b=sum(df[col][winstart:winend]*(1/fr))
integral.append(b)
elif np.absolute(max(a[26:80]))>np.absolute(min(a[26:80]))and np.absolute(max(a[26:80]))>=3*np.std(df[col][firsta:firstb]):
b=sum(df[col][winstart:winend]*(1/fr))
integral.append(b)
else:
b=0
integral.append(b)
integral=pd.DataFrame(integral).T
integral.columns=df.columns
integral=pd.concat([pd.DataFrame({'Trial':[int(filename.split('dt')[0])]}),integral],axis=1)
intdf=intdf.append(integral)
# intdf.to_csv('%s_integral.csv'%date,index=False)
#Get average integral across all trials using integral dataframe
meanint=pd.DataFrame([])
for key in odortrials:
odor=odortrials[key]
mean=[]
for col in intdf.loc[intdf['Trial'].isin(odor)][[col for col in intdf.loc[intdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(intdf.loc[intdf['Trial'].isin(odor)][col].mean())
mean=pd.DataFrame(mean).T
mean.columns=intdf.loc[intdf['Trial'].isin(odor)][[col for col in intdf.loc[intdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meanint=meanint.append(mean)
meanint=meanint.reset_index(drop=True)
meanint.columns=[str(col)+'_'+date for col in meanint.columns]
meanint=pd.concat([pd.DataFrame({'Odor':odortrials.keys()}),meanint],axis=1)
# meanint.to_csv('%s_meanint.csv'%date,index=False)
# # Detrended baseline
temp=pd.DataFrame([])
date='160330_3'
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\%s'%date)
#Pull the trials that correspond to specific date/odors
trials=pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow=trials.loc[trials['File']==date]
odortrials={}
for t in trials.Odor.unique():
y={t:[int(x) for x in filerow.loc[filerow['Odor'] == t][['T1','T2','T3','T4']].values.tolist()[0]]}
odortrials.update(y)
#Get the frame rate for a specified date
num=trials.File.unique().tolist().index('%s'%date)
fr=trials.loc[trials['File']==trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
#Get baseline
baseline=pd.DataFrame([])
for filename in glob.glob('*dt.txt'):
f=pd.read_csv(filename,nrows=125)
df=f[[col for col in f.columns if 'G PMT' in col]]
winstart=np.int(4*fr)
winend=np.int(12*fr)
base=[]
for col in df.columns:
a=df[col]
firsta=1;
firstb=24;
b=(df[col][firsta:firstb]).mean()
base.append(b)
base=pd.DataFrame(base).T
base.columns=df.columns
base=pd.concat([pd.DataFrame({'Trial':[int(filename.split('dt')[0])]}),base],axis=1)
baseline=baseline.append(base)
# baseline.to_csv('%s_baseline.csv'%date,index=False)
baseline
#mean baseline
meanbase=pd.DataFrame([])
for key in odortrials:
odor=odortrials[key]
mean=[]
for col in baseline.loc[baseline['Trial'].isin(odor)][[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(baseline.loc[baseline['Trial'].isin(odor)][col].mean())
mean=pd.DataFrame(mean).T
mean.columns=baseline.loc[baseline['Trial'].isin(odor)][[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meanbase=meanbase.append(mean)
meanbase=meanbase.reset_index(drop=True)
meanbase.columns=[str(col)+'_'+date for col in meanbase.columns]
meanbase=pd.concat([pd.DataFrame({'Odor':odortrials.keys()}),meanbase],axis=1)
# meanbase.to_csv('%s_meanbase.csv'%date,index=False)
meanbase
# +
# test=pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\160321_1\\160321_1_baseline.csv')
# +
# plt.plot(test.iloc[:,1:-1]);
# +
# test2=pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\160321_1\\160321_1_meanbase.csv')
# +
# plt.plot(test2.iloc[:,1:]);
# +
# test2
# -
# # Untrended baseline dataframe
# +
temp=pd.DataFrame([])
date='160626_2'
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Untrended_For_B\\%s'%date)
#Pull the trials that correspond to specific date/odors
trials=pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow=trials.loc[trials['File']==date]
odortrials={}
for t in trials.Odor.unique():
y={t:[int(x) for x in filerow.loc[filerow['Odor'] == t][['T1','T2','T3','T4']].values.tolist()[0]]}
odortrials.update(y)
#Get the frame rate for a specified date
num=trials.File.unique().tolist().index('%s'%date)
fr=trials.loc[trials['File']==trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
#Get baseline
baseline=pd.DataFrame([])
for filename in glob.glob('*.txt'):
f=pd.read_table(filename,skiprows=4)
df=f[[col for col in f.columns if 'G PMT (' in col]]
base=[]
for col in df.columns:
a=df[col]
firsta=2;
firstb=24;
b=(df[col][firsta:firstb]).mean()
base.append(b)
base=pd.DataFrame(base).T
base.columns=df.columns
base=pd.concat([pd.DataFrame({'Trial':[int(filename.split('.txt')[0])]}),base],axis=1)
baseline=baseline.append(base)
baseline.to_csv('%s_untrendedbaseline.csv'%date,index=False)
#mean baseline
meanbase=pd.DataFrame([])
for key in odortrials:
odor=odortrials[key]
mean=[]
for col in baseline.loc[baseline['Trial'].isin(odor)][[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(baseline.loc[baseline['Trial'].isin(odor)][col].mean())
mean=pd.DataFrame(mean).T
mean.columns=baseline.loc[baseline['Trial'].isin(odor)][[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meanbase=meanbase.append(mean)
meanbase=meanbase.reset_index(drop=True)
meanbase.columns=[str(col)+'_'+date for col in meanbase.columns]
meanbase=pd.concat([pd.DataFrame({'Odor':odortrials.keys()}),meanbase],axis=1)
meanbase.to_csv('%s_meanbase.csv'%date,index=False)
# -
#mean baseline
meanbase=pd.DataFrame([])
for key in odortrials:
odor=odortrials[key]
mean=[]
for col in baseline.loc[baseline['Trial'].isin(odor)][[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(baseline.loc[baseline['Trial'].isin(odor)][col].mean())
mean=pd.DataFrame(mean).T
mean.columns=baseline.loc[baseline['Trial'].isin(odor)][[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meanbase=meanbase.append(mean)
meanbase=meanbase.reset_index(drop=True)
meanbase.columns=[str(col)+'_'+date for col in meanbase.columns]
meanbase=pd.concat([pd.DataFrame({'Odor':odortrials.keys()}),meanbase],axis=1)
# meanbase.to_csv('%s_meanbase.csv'%date,index=False)
meanbase
pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\160325_1\\58dt.txt').size
# badfile=pd.read_table('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Untrended_For_B\\160401_1\\50.txt',skiprows=4)
# rightfile=badfile.iloc[:,3:18]
# rightfile
# rightfile.to_csv('44new.csv',index=False)
meanbase
| Detrended analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Miscellaneous Python things
#
# In this session, we'll talk about:
#
# - More control flow tools: [`try/except`](https://docs.python.org/3/tutorial/errors.html), [`break`](https://docs.python.org/3/reference/simple_stmts.html#the-break-statement) and [`continue`](https://docs.python.org/3/reference/simple_stmts.html#the-continue-statement)
# - A few other built-in Python functions
# - Installing third-party modules
# ### Handling errors with try/except
#
# Sometimes your script will throw errors. When it does, sometimes you want the script to continue after handling the error in some way. Let's take a look at some examples.
#
# What happens when we run the code below?
# +
humans = [
{'name': 'Cody', 'age': 32, 'job': 'Training director', 'height_in': 72},
{'name': 'Jeff', 'age': 44, 'job': 'Snake charmer', 'height_in': 60},
{'name': 'Sally', 'age': 55, 'job': 'Fry cook'}
]
for human in humans:
print(human['name'], 'is', human['height_in'], 'inches tall')
# -
# Let's catch the `KeyError`. You could use a bare `except` statement, which would fire if _any_ exception is raised, but it's good practice to specify the class of error that you're controlling for.
# +
humans = [
{'name': 'Cody', 'age': 32, 'job': 'Training director', 'height_in': 72},
{'name': 'Jeff', 'age': 44, 'job': 'Snake charmer', 'height_in': 60},
{'name': 'Sally', 'age': 55, 'job': 'Fry cook'}
]
for human in humans:
try:
print(human['name'], 'is', human['height_in'], 'inches tall')
except KeyError:
print('We don\'t know how tall', human['name'], 'is')
# -
# ### Break and continue
#
# These statements are frequently used in loops to control the flow of your program. We'll use [`range()`](https://docs.python.org/3/library/functions.html#func-range) to demo how each statement works.
#
# - [`break`](https://docs.python.org/3/reference/simple_stmts.html#the-break-statement) breaks out of the loop
# - [`continue`](https://docs.python.org/3/reference/simple_stmts.html#the-continue-statement) skips to the next iteration
# break
for x in range(10):
if x == 7:
break
else:
print(x)
# continue
for x in range(10):
if x == 7:
continue
else:
print(x)
# ### Other built-in functions
#
# Check out the [full list here](https://docs.python.org/3/library/functions.html#built-in-functions). We're just going to look at a couple.
# #### [`dir()`](https://docs.python.org/3/library/functions.html#dir)
#
# Use the `dir()` function to see all of the attributes and methods available to an object -- this is often how I learn about new ways to manipulate data!
#
# Let's try it out on some different data types.
# string
dir('hello!')
# lists
l = [1, 2, 3, 4, 5, 6]
dir(l)
# +
# dicts
d = {'name': 'Cody', 'age': 32, 'job': 'Training director', 'height_in': 72}
dir(d)
# -
# #### [`enumerate()`](https://docs.python.org/3/library/functions.html#enumerate)
#
# Use `enumerate()` in a loop to keep track of _where_ you're at in the loop -- the index. Notice that we then need to use two variables in the loop -- the index and the actual value.
# +
humans = [
{'name': 'Cody', 'age': 32, 'job': 'Training director', 'height_in': 72},
{'name': 'Jeff', 'age': 44, 'job': 'Snake charmer', 'height_in': 60},
{'name': 'Sally', 'age': 55, 'job': 'Fry cook'}
]
for idx, human in enumerate(humans):
print(idx, human['name'])
# -
# #### [`zip()`](https://docs.python.org/3/library/functions.html#zip) and [`dict()`](https://docs.python.org/3/library/functions.html#func-dict)
#
# Use zip to fold multiple iterable objects into one thing. My favorite use of zip is turning two lists of related data into a single dictionary using `dict()` to coerce the zip object:
# +
names = ['Cody', 'Jeff', 'Sally']
ages = [32, 44, 55]
zip_obj = zip(names, ages)
human_dict = dict(zip_obj)
print(human_dict)
# -
# #### [`sum()`](https://docs.python.org/3/library/functions.html#sum), [`max()`](https://docs.python.org/3/library/functions.html#max) and [`min()`](https://docs.python.org/3/library/functions.html#min)
#
# - Sum a list of numbers
# - Find the highest value in a list
# - Find the lowest value in a list
# +
# a list of numbers
l = [1100, 200, 9400, 800, 1000]
# sum
total = sum(l)
# max
max_value = max(l)
# min
min_value = min(l)
# print the results
print(total, max_value, min_value)
# -
# ### Installing third-party packages
#
# Use the `pip` package manager to install third-party packages. The `pip` tool comes bundled with Python 3. To install a module system-wide, you'd run this command from the Terminal app: `pip install name_of_your_package`.
#
# A saner approach would be to use a "virtual environment" and install dependencies specifically for each project. That way, you avoid the problem of "I need version X of `pandas` for this project but version Y for this other project." If `pandas` is installed globally on your computer, you'll quickly run into problems.
#
# For this boot camp, we have installed these packages:
#
# - `jupyter`
# - `bs4` (Beautiful Soup)
# - `requests`
# - `pandas`
# - `matplotlib`
#
# Each of these packages, in turn, has dependencies that are automatically installed.
| completed/17. Miscellaneous.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Clustering de documents
# ## Imports
# +
import collections
import os
import string
import sys
import pandas as pd
from nltk import word_tokenize
from nltk.corpus import stopwords
from pprint import pprint
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cosine
# +
import nltk
nltk.download('punkt')
# -
data_path = "../data/txt/"
# ## Choisir une décennie
DECADE = '1950'
# ## Charger tous les fichiers de la décennie et en créer une liste de textes
files = [f for f in sorted(os.listdir(data_path)) if f"_{DECADE[:-1]}" in f]
# Exemple de fichiers
files[:5]
texts = [open(data_path + f).read() for f in files]
# Exemple de textes
texts[0][:400]
# ## Vectoriser les documents à l'aide de TF-IDF
# Création d'une fonction de pré-traitement
def preprocessing(text, stem=True):
""" Tokenize text and remove punctuation """
text = text.translate(string.punctuation)
tokens = word_tokenize(text)
return tokens
# ### Instancier le modèle TF-IDF avec ses arguments
vectorizer = TfidfVectorizer(
tokenizer=preprocessing,
stop_words=stopwords.words('french'),
max_df=0.5,
min_df=0.1,
lowercase=True)
# ### Construire la matrice de vecteurs à l'aide de la fonction `fit_transform`
# %time tfidf_vectors = vectorizer.fit_transform(texts)
# Détail de la matrice
tfidf_vectors
# ### Imprimer le vecteur tf-IDF du premier document
pd.Series(
tfidf_vectors[0].toarray()[0],
index=vectorizer.get_feature_names_out()
).sort_values(ascending=False)
# ## Comprendre les vecteurs et leurs "distances"
cosine([1, 2, 3], [1, 2, 3])
cosine([1, 2, 3], [1, 2, 2])
cosine([1, 2, 3], [2, 2, 2])
# ### Tests sur nos documents
tfidf_array = tfidf_vectors.toarray()
# Vecteur du document 0
tfidf_array[0]
# Vecteur du document 1
tfidf_array[1]
cosine(tfidf_array[0], tfidf_array[1])
# ## Appliquer un algorithme de clustering sur les vecteurs TF-IDF des documents
# Pour en savoir plus sur le KMeans clustering :
# - https://medium.com/dataseries/k-means-clustering-explained-visually-in-5-minutes-b900cc69d175
# ### Définir un nombre de clusters
N_CLUSTERS = 4
# ### Instancier le modèle K-Means et ses arguments
km_model = KMeans(n_clusters=N_CLUSTERS, random_state = 42)
# ### Appliquer le clustering à l'aide de la fonction `fit_predict`
clusters = km_model.fit_predict(tfidf_vectors)
# +
clustering = collections.defaultdict(list)
for idx, label in enumerate(clusters):
clustering[label].append(files[idx])
# -
pprint(dict(clustering))
# ## Visualiser les clusters
# ### Réduire les vecteurs à 2 dimensions à l'aide de l'algorithme PCA
# Cette étape est nécessaire afin de visualiser les documents dans un espace 2D
#
# https://fr.wikipedia.org/wiki/Analyse_en_composantes_principales
pca = PCA(n_components=2)
reduced_vectors = pca.fit_transform(tfidf_vectors.toarray())
reduced_vectors[:10]
# ### Générer le plot
# +
x_axis = reduced_vectors[:, 0]
y_axis = reduced_vectors[:, 1]
plt.figure(figsize=(10,10))
scatter = plt.scatter(x_axis, y_axis, s=100, c=clusters)
# Ajouter les centroïdes
centroids = pca.transform(km_model.cluster_centers_)
plt.scatter(centroids[:, 0], centroids[:, 1], marker = "x", s=100, linewidths = 2, color='black')
# Ajouter la légende
plt.legend(handles=scatter.legend_elements()[0], labels=set(clusters), title="Clusters")
# -
| module4/s2_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PhysioNet/Computing in Cardiology Challenge 2020
# ## Classification of 12-lead ECGs
# ### Explore Heart Rate Classification
# # Setup Notebook
# +
# Import 3rd party libraries
import os
import sys
import json
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
# Import local Libraries
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
from kardioml import DATA_PATH
# Configure Notebook
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# # Dataset A
# +
# Set dataset
dataset = 'A'
# Get list of JSONs
files = [file for file in os.listdir(os.path.join(DATA_PATH, dataset, 'formatted')) if 'json' in file]
# Extract heart rate data
data = list()
# Arrhythmias
hr_arrhythmias = ['bradycardia', 'sinus tachycardia', 'sinus bradycardia']
# Loop through files
for file in files:
# Load file
file = json.load(open(os.path.join(DATA_PATH, dataset, 'formatted', file)))
# Get label
for hr_arrhythmia in hr_arrhythmias:
if file['labels_full']:
if hr_arrhythmia in file['labels_full']:
label = hr_arrhythmia
else:
label = 'other'
data.append({'filename': file['filename'], 'dataset': dataset, 'label': label, 'hr': np.float(file['hr'])})
# Create DataFrame
data_df_a = pd.DataFrame(data)
# View DataFrame
data_df_a.head()
# +
# Plot age distribution
fig = plt.figure(figsize=(15, 5), facecolor='w')
fig.subplots_adjust(wspace=0, hspace=0.05)
ax = plt.subplot2grid((1, 1), (0, 0))
ax.set_title('Dataset: {}'.format(dataset))
ax.hist(data_df_a['hr'][data_df_a['label'] == 'other'].values, bins=30, facecolor='g',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='other')
ax.hist(data_df_a['hr'][data_df_a['label'] == 'bradycardia'].values, bins=30, facecolor='m',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='bradycardia')
ax.hist(data_df_a['hr'][data_df_a['label'] == 'sinus tachycardia'].values, bins=30, facecolor='c',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus tachycardia')
ax.hist(data_df_a['hr'][data_df_a['label'] == 'sinus bradycardia'].values, bins=30, facecolor='b',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus bradycardia')
plt.legend()
ax.set_xlabel('Heart Rate, BPM', fontsize=24)
ax.set_ylabel('Frequency', fontsize=24)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# ax.set_xlim([0, 110])
plt.show()
# -
# # Dataset B
# +
# Set dataset
dataset = 'B'
# Get list of JSONs
files = [file for file in os.listdir(os.path.join(DATA_PATH, dataset, 'formatted')) if 'json' in file]
# Extract heart rate data
data = list()
# Arrhythmias
hr_arrhythmias = ['bradycardia', 'sinus tachycardia', 'sinus bradycardia']
# Loop through files
for file in files:
# Load file
file = json.load(open(os.path.join(DATA_PATH, dataset, 'formatted', file)))
# Get label
for hr_arrhythmia in hr_arrhythmias:
if file['labels_full']:
if hr_arrhythmia in file['labels_full']:
label = hr_arrhythmia
else:
label = 'other'
data.append({'filename': file['filename'], 'dataset': dataset, 'label': label, 'hr': np.float(file['hr'])})
# Create DataFrame
data_df_b = pd.DataFrame(data)
# View DataFrame
data_df_b.head()
# +
# Plot age distribution
fig = plt.figure(figsize=(15, 5), facecolor='w')
fig.subplots_adjust(wspace=0, hspace=0.05)
ax = plt.subplot2grid((1, 1), (0, 0))
ax.set_title('Dataset: {}'.format(dataset))
ax.hist(data_df_b['hr'][data_df_b['label'] == 'other'].values, bins=30, facecolor='g',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='other')
ax.hist(data_df_b['hr'][data_df_b['label'] == 'bradycardia'].values, bins=30, facecolor='m',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='bradycardia')
ax.hist(data_df_b['hr'][data_df_b['label'] == 'sinus tachycardia'].values, bins=30, facecolor='c',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus tachycardia')
ax.hist(data_df_b['hr'][data_df_b['label'] == 'sinus bradycardia'].values, bins=30, facecolor='b',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus bradycardia')
plt.legend()
ax.set_xlabel('Heart Rate, BPM', fontsize=24)
ax.set_ylabel('Frequency', fontsize=24)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# ax.set_xlim([0, 110])
plt.show()
# -
# # Dataset C
# +
# Set dataset
dataset = 'C'
# Get list of JSONs
files = [file for file in os.listdir(os.path.join(DATA_PATH, dataset, 'formatted')) if 'json' in file]
# Extract heart rate data
data = list()
# Arrhythmias
hr_arrhythmias = ['bradycardia', 'sinus tachycardia', 'sinus bradycardia']
# Loop through files
for file in files:
# Load file
file = json.load(open(os.path.join(DATA_PATH, dataset, 'formatted', file)))
# Get label
for hr_arrhythmia in hr_arrhythmias:
if file['labels_full']:
if hr_arrhythmia in file['labels_full']:
label = hr_arrhythmia
else:
label = 'other'
data.append({'filename': file['filename'], 'dataset': dataset, 'label': label, 'hr': np.float(file['hr'])})
# Create DataFrame
data_df_c = pd.DataFrame(data)
# View DataFrame
data_df_c.head()
# +
# Plot age distribution
fig = plt.figure(figsize=(15, 5), facecolor='w')
fig.subplots_adjust(wspace=0, hspace=0.05)
ax = plt.subplot2grid((1, 1), (0, 0))
ax.set_title('Dataset: {}'.format(dataset))
ax.hist(data_df_c['hr'][data_df_c['label'] == 'other'].values, bins=30, facecolor='g',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='other')
ax.hist(data_df_c['hr'][data_df_c['label'] == 'bradycardia'].values, bins=30, facecolor='m',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='bradycardia')
ax.hist(data_df_c['hr'][data_df_c['label'] == 'sinus tachycardia'].values, bins=30, facecolor='c',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus tachycardia')
ax.hist(data_df_c['hr'][data_df_c['label'] == 'sinus bradycardia'].values, bins=30, facecolor='b',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus bradycardia')
plt.legend()
ax.set_xlabel('Heart Rate, BPM', fontsize=24)
ax.set_ylabel('Frequency', fontsize=24)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# ax.set_xlim([0, 110])
plt.show()
# -
# # Dataset D
# +
# Set dataset
dataset = 'D'
# Get list of JSONs
files = [file for file in os.listdir(os.path.join(DATA_PATH, dataset, 'formatted')) if 'json' in file]
# Extract heart rate data
data = list()
# Arrhythmias
hr_arrhythmias = ['bradycardia', 'sinus tachycardia', 'sinus bradycardia']
# Loop through files
for file in files:
# Load file
file = json.load(open(os.path.join(DATA_PATH, dataset, 'formatted', file)))
# Get label
for hr_arrhythmia in hr_arrhythmias:
if file['labels_full']:
if hr_arrhythmia in file['labels_full']:
label = hr_arrhythmia
else:
label = 'other'
data.append({'filename': file['filename'], 'dataset': dataset, 'label': label, 'hr': np.float(file['hr'])})
# Create DataFrame
data_df_d = pd.DataFrame(data)
# View DataFrame
data_df_d.head()
# +
# Plot age distribution
fig = plt.figure(figsize=(15, 5), facecolor='w')
fig.subplots_adjust(wspace=0, hspace=0.05)
ax = plt.subplot2grid((1, 1), (0, 0))
ax.set_title('Dataset: {}'.format(dataset))
ax.hist(data_df_d['hr'][data_df_d['label'] == 'other'].values, bins=30, facecolor='g',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='other')
ax.hist(data_df_d['hr'][data_df_d['label'] == 'bradycardia'].values, bins=30, facecolor='m',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='bradycardia')
ax.hist(data_df_d['hr'][data_df_d['label'] == 'sinus tachycardia'].values, bins=30, facecolor='c',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus tachycardia')
ax.hist(data_df_d['hr'][data_df_d['label'] == 'sinus bradycardia'].values, bins=30, facecolor='b',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus bradycardia')
plt.legend()
ax.set_xlabel('Heart Rate, BPM', fontsize=24)
ax.set_ylabel('Frequency', fontsize=24)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# ax.set_xlim([0, 110])
plt.show()
# -
# # Dataset E
# +
# Set dataset
dataset = 'E'
# Get list of JSONs
files = [file for file in os.listdir(os.path.join(DATA_PATH, dataset, 'formatted')) if 'json' in file]
# Extract heart rate data
data = list()
# Arrhythmias
hr_arrhythmias = ['bradycardia', 'sinus tachycardia', 'sinus bradycardia']
# Loop through files
for file in files:
# Load file
file = json.load(open(os.path.join(DATA_PATH, dataset, 'formatted', file)))
# Get label
for hr_arrhythmia in hr_arrhythmias:
if file['labels_full']:
if hr_arrhythmia in file['labels_full']:
label = hr_arrhythmia
else:
label = 'other'
data.append({'filename': file['filename'], 'dataset': dataset, 'label': label, 'hr': np.float(file['hr'])})
# Create DataFrame
data_df_e = pd.DataFrame(data)
# View DataFrame
data_df_e.head()
# +
# Plot age distribution
fig = plt.figure(figsize=(15, 5), facecolor='w')
fig.subplots_adjust(wspace=0, hspace=0.05)
ax = plt.subplot2grid((1, 1), (0, 0))
ax.set_title('Dataset: {}'.format(dataset))
ax.hist(data_df_e['hr'][data_df_e['label'] == 'other'].values, bins=30, facecolor='g',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='other')
ax.hist(data_df_e['hr'][data_df_e['label'] == 'bradycardia'].values, bins=30, facecolor='m',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='bradycardia')
ax.hist(data_df_e['hr'][data_df_e['label'] == 'sinus tachycardia'].values, bins=30, facecolor='c',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus tachycardia')
ax.hist(data_df_e['hr'][data_df_e['label'] == 'sinus bradycardia'].values, bins=30, facecolor='b',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus bradycardia')
plt.legend()
ax.set_xlabel('Heart Rate, BPM', fontsize=24)
ax.set_ylabel('Frequency', fontsize=24)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# ax.set_xlim([0, 110])
plt.show()
# -
# # Dataset F
# +
# Set dataset
dataset = 'F'
# Get list of JSONs
files = [file for file in os.listdir(os.path.join(DATA_PATH, dataset, 'formatted')) if 'json' in file]
# Extract heart rate data
data = list()
# Arrhythmias
hr_arrhythmias = ['bradycardia', 'sinus tachycardia', 'sinus bradycardia']
# Loop through files
for file in files:
# Load file
file = json.load(open(os.path.join(DATA_PATH, dataset, 'formatted', file)))
# Get label
for hr_arrhythmia in hr_arrhythmias:
if file['labels_full']:
if hr_arrhythmia in file['labels_full']:
label = hr_arrhythmia
else:
label = 'other'
data.append({'filename': file['filename'], 'dataset': dataset, 'label': label, 'hr': np.float(file['hr'])})
# Create DataFrame
data_df_f = pd.DataFrame(data)
# View DataFrame
data_df_f.head()
# +
# Plot age distribution
fig = plt.figure(figsize=(15, 5), facecolor='w')
fig.subplots_adjust(wspace=0, hspace=0.05)
ax = plt.subplot2grid((1, 1), (0, 0))
ax.set_title('Dataset: {}'.format(dataset))
ax.hist(data_df_f['hr'][data_df_f['label'] == 'other'].values, bins=30, facecolor='g',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='other')
ax.hist(data_df_f['hr'][data_df_f['label'] == 'bradycardia'].values, bins=30, facecolor='m',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='bradycardia')
ax.hist(data_df_f['hr'][data_df_f['label'] == 'sinus tachycardia'].values, bins=30, facecolor='c',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus tachycardia')
ax.hist(data_df_f['hr'][data_df_f['label'] == 'sinus bradycardia'].values, bins=30, facecolor='b',
edgecolor=[0.7, 0.7, 0.7], linewidth=1.2, alpha=0.5, label='sinus bradycardia')
plt.legend()
ax.set_xlabel('Heart Rate, BPM', fontsize=24)
ax.set_ylabel('Frequency', fontsize=24)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# ax.set_xlim([0, 110])
plt.show()
| notebooks/Goodfellow/heart_rate_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Role of Economic Factors in Political Shifts #
#
# ### by <NAME> ###
#
# #### Contents ####
# 1. Background
# 2. Data Sources
# 3. Libraries and API
# 4. Economic Analysis & Visualization
# 5. Political Analysis & Visualization
# 6. The Role of Economic Factors in Political Shifts
# 7. Conclusion
# ### 1. Background ###
#
# During the 1992 elections, political stragist <NAME> noted one of the pivotal reasons why he believed his candidate, former Arkansas governor <NAME>, had a chance at beating then President Bush: ** "The economy, stupid!" **
#
# Carville noted that President Bush had become deeply unpopular thanks to the ongoing recession and his recent raising of taxes despite an earlier pledge not to, and he was defeated by Clinton in the election. The 1992 presidential election affirmed the political axion that "people vote with their pocketbooks," implying that voters will reward political parties who preside over stronger economic conditions and penalize parties that govern under weaker economic conditions.
#
# The intent of this project is to analyze economic and political trends from 1948 to the present in order to explore the relationship between economic performance and political shifts in the United States.
# +
import sys
import pandas as pd
import pandas_datareader.data as web
import matplotlib.pyplot as plt
import datetime as dt
import seaborn as sns
import numpy as np
import datetime as dt
from dateutil.relativedelta import relativedelta
# %matplotlib inline
print('Python version:', sys.version)
print('Pandas version: ', pd.__version__)
print('Today: ', dt.date.today())
# -
# ### 2. Data Sources ###
#
# #### 2|1 Economic Data Source ####
#
# For the economic data, I have used the authoritative databases of the Federal Reserve Bank of St. Louis' *FRED* dataset. In particular, I am interested in the unemployment rate in the year before a presidential election relative to the average unemployment rate in the previous three years. The *FRED* dataset [is available here](https://fred.stlouisfed.org/series/PAYEMS) but can also be accessed through the DataReader panda I imported above.
# #### 2|2 Political Data Source ####
#
# For the political data, I found a recently-published report from the think-tank Brookings Institute titled *Vital Statistics on Congress* which was published on January 9th, 2017. The Brookings report [can be viewed here](https://www.brookings.edu/multi-chapter-report/vital-statistics-on-congress/). The report features dozens of individual datasources, and I have chosen to use *Data Table 1-20 Political Parties of Senators and Representatives, 34th - 114th Congresses, 1855-2015*, a PDF of which [can be viewed here](https://www.brookings.edu/wp-content/uploads/2017/01/vitalstats_ch1_tbl20.pdf).
# ### 3. Slicing and Cleaning Datasets ###
# As noted above, the economic dataset is readily available through the DataReader panda. The political data is available through Brookings as either a PDF, Excel, or CSV file. In the below cell I retrieve the CSV file and assign it to my DataFrame.
URL1 = 'https://www.brookings.edu/wp-content/uploads/'
URL2 = '2017/01/vitalstats_ch1_tbl20.csv'
poldata = pd.read_csv(URL1 + URL2)
df = pd.DataFrame(poldata, index = ['Congress'])
# This DataFrame is a good start, but I need to clean it up to not only narrow the date range to our 1950-2015 period, but to also eliminate third parties (Column 4's 'Other') and 'Vacant' seats, and finally to create a more convenient layout for analysis.
# First, let's drop all of the results in the 'PartyStatus' column that aren't Republican or Democrat.
poldata = poldata[np.asarray(poldata.PartyStatus == 'Republican') | np.asarray(poldata.PartyStatus == 'Democrat')]
# Next, let's remove the earlier historical data from before our survey (before the 81st Congress took office in 1949).
poldata = poldata[np.asarray(poldata.Congress >= 81)]
# Before we proceed, I want to check the types of the values in my DataFrame to be sure my later operations will be successful.
poldata.dtypes
# If we look at what is left, we have all the data in one large DataFrame. For the ease of plotting, we will split these up into four groups later (House Democrats, House Republicans, Senate Democrats, and Senate Republicans).
# The other bit of cleaning we have to do is to convert the index 'Congress' from the Congressional number (which began with the 1st Congress in 1787) to the AD year date in order to compare economic and political data. To do this I will create a yearconverter program and apply it to the 'Congress' column of my dataframe to modify the column 'Years' to not reflect a date range (as it was originally) but to have one year.
yearconverter = lambda x: x*2 + 1787
poldata['Years'] = poldata['Congress'].apply(yearconverter)
poldata.dtypes
# Next, we can further slice the DataFrame into both chambers, Senate and House.
senate = poldata[np.asarray(poldata.Chamber == 'Senate')]
house = poldata[np.asarray(poldata.Chamber == 'House')]
# Lastly, we will further slice each DataFrame to return only the results for Republicans and Democrats - eliminating 'Vacant' and 'All' - so we are left with four DataFrames, two for each house and two for each party.
senatedem = senate[np.asarray(senate.PartyStatus == 'Democrat')]
senategop = senate[np.asarray(senate.PartyStatus == 'Republican')]
housedem = house[np.asarray(house.PartyStatus == 'Democrat')]
housegop = house[np.asarray(house.PartyStatus == 'Republican')]
# I have also concatenated both houses' results into one stack for potential ease of graphing later.
senatevertstack = pd.concat([senatedem, senategop], axis=0)
housevertstack = pd.concat([housedem, housegop], axis=0)
# ### 4. Economic Analysis & Visualization ###
# #### 4|1 Unemployment ####
# First, let's examine the overall unemployment trends from 1950 to 2016. To do this, we take the FRED database panda, set the dates (1950-2016), and adjust the aspect ratio, titles, and other stylistic components.
# +
start, end = dt.datetime(1950, 2, 1), dt.datetime(2015, 1, 1)
data = web.DataReader(['UNRATE'],'fred', start, end)
data.columns = ['Unemployment Rate']
plt.figure(figsize=plt.figaspect(0.25))
data['Unemployment Rate'].plot()
plt.text(dt.datetime(1950, 1, 1), 1, 'c. FRED Database', fontsize=10, weight='regular')
plt.suptitle('Figure 1. Unemployment Rate, 1950-2015', fontsize=20, weight='bold')
plt.show()
# -
# As we see from this chart, the US unemployment rate has fluctuated from a low of roughly 2% in the mid-1950s to a high of over 10% during the early 1980s, with great variability in these numbers throughout.
# #### 4|2 Recessions ####
# Next, we can use the same FRED database to examine the relationship between unemployment rates and recessions, which are defined by the National Bureau of Economic Research (NBER) as a "significant decline in economic activity spreading across the economy, lasting more than a few months." The below code takes the graph of unemployment rates above and highlights the months where the NBER determiend the US was in a recession.
# +
start, end = dt.datetime(1950, 2, 1), dt.datetime(2015, 1, 1)
data = web.DataReader(['UNRATE', 'USREC'],'fred', start, end)
data.columns = ['Unemployment Rate', 'Recession']
plt.figure(figsize=plt.figaspect(0.25))
data['Unemployment Rate'].plot()
plt.text(dt.datetime(1948, 1, 1), 1, 'c. FRED Database', fontsize=10, weight='regular')
plt.suptitle('Figure 2. Unemployment Rate w/Recessions, 1950-2015', fontsize=20, weight='bold')
def recession_months():
rec_dates = data['Recession']
one_vals = np.where(rec_dates == 1)
rec_startind = rec_dates.index[one_vals]
return rec_startind
def hl_recession(dates):
for date in dates:
plt.axvspan(date, date+relativedelta(months=+1), color='k', alpha=.1, lw=0)
hl_recession(recession_months())
# -
# ### 5. Political Analysis & Visualization ###
# Now, we will take the political datasets we cleaned up and spliced above and visualize them to better see the trends. The political datasets do not have a strict year because Congress serves on a two-year term, but the year 1950 occurred during the 81st Congress and the 115th came into office by 2015.
plt.figure(figsize=plt.figaspect(.2))
p1 = plt.scatter(senatedem['Years'], senatedem['Seats'], color = 'blue')
p2 = plt.scatter(senategop['Years'], senategop['Seats'], color = 'red')
plt.legend((p1,p2),('Senate Democrats', 'Senate Republicans'),numpoints=1, loc='best')
plt.xlabel('Year')
plt.ylabel('Seats')
plt.suptitle('Figure 3. Party Seats in the Senate, 1950-2015', fontsize=20, weight='bold')
plt.show()
plt.figure(figsize=plt.figaspect(.2))
p3 = plt.scatter(housedem['Years'], housedem['Seats'], color = 'blue')
p4 = plt.scatter(housegop['Years'], housegop['Seats'], color = 'red')
plt.legend((p3,p4),('House Democrats', 'House Republicans'),numpoints=1, loc='best')
plt.xlabel('Year')
plt.ylabel('Seats')
plt.suptitle('Figure 3. Party Seats in the House, 1950-2015', fontsize=20, weight='bold')
plt.show()
# As we can see in the chart above, there have been ebbs and flows in both the Senate and the House of Representatives, with the Democrats dominating both chambers from the 86th Congress (1959-1961) until the 104th Congress (1995-1997).
# ### 6. The Role of Economic Factors in Political Shifts ###
# Bringing it all together, for the final step we will merge the economic data with the political trend data above.
start, end = dt.datetime(1950, 2, 1), dt.datetime(2015, 1, 1)
data = web.DataReader(['UNRATE', 'USREC'],'fred', start, end)
data.columns = ['Unemployment Rate', 'Recession']
plt.figure(figsize=plt.figaspect(0.25))
data['Unemployment Rate'].plot()
plt.text(dt.datetime(1948, 1, 1), 1, 'c. FRED Database', fontsize=10, weight='regular')
plt.suptitle('Figure 4. US House Impact of Economic Conditions, 1950-2015', fontsize=20, weight='bold')
p3 = plt.scatter(housedem['Years'], housedem['Seats'], color = 'blue')
p4 = plt.scatter(housegop['Years'], housegop['Seats'], color = 'red')
hl_recession(recession_months())
start, end = dt.datetime(1950, 2, 1), dt.datetime(2015, 1, 1)
data = web.DataReader(['UNRATE', 'USREC'],'fred', start, end)
data.columns = ['Unemployment Rate', 'Recession']
plt.figure(figsize=plt.figaspect(0.25))
data['Unemployment Rate'].plot()
plt.text(dt.datetime(1948, 1, 1), 1, 'c. FRED Database', fontsize=10, weight='regular')
plt.suptitle('Figure 4. US Senate Impact of Economic Conditions, 1950-2015', fontsize=20, weight='bold')
p3 = plt.scatter(senatedem['Years'], senatedem['Seats'], color = 'blue')
p4 = plt.scatter(senategop['Years'], senategop['Seats'], color = 'red')
hl_recession(recession_months())
# ### 7. Conclusion ###
# It is difficult to draw from the data too many conclusions about the impact of economics. Indeed, the multitude of factors at play in any given moment, including social upheaval, challenges to cultural orders, and other factors, make it difficult to point to any one element and its impact on an election. However, there is certainly enough political variability occuring during times of economic difficulty to be able to ascribe some degree of causality between the two.
| MBA_S17/170512_PeterMeijer_EconomyAndPolitics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as nm
import seaborn as sns
import matplotlib.pylab as plt
# +
# Step 1: Loading the csv file
import pandas as pd
df = pd.read_csv('processed_data.csv')
# Step 2.1: Retrieving basic information of the data.
df.head()
# +
# Step 2: Removing a column 'Unnamed:0'
processed_df = df.drop('Unnamed: 0',axis=1)
processed_df
# -
# Step 3.1: Retrieving basic information of the data (count)
print(processed_df.count())
# Step 3.2: Retrieving basic information of the data (dimension)
print(processed_df.shape)
# Step 3.3: Retrieving basic information of the data (name of coulmns)
print(processed_df.columns)
# Step 4: Generating an overall statistical measure of the dataset.
processed_df.describe().T
# +
# Analysis calculation part 1: Calculating confirmed number of cases happening in each months
# Analysis calculation part 2: Calculating confirmed number of cases happening in each month using location
# Analysis calculation part 3: Calculating confirmed number of cases happening in each month using vrius typ
# -
# Droping status (0) ie values that are not confirmed!
processed_DF = processed_df[processed_df.Status != 0]
pdf111 = processed_DF.loc[(df['Month'] == 1) & (df['Status'] == '1')]
pdf112 = processed_DF[(df['Month'] == 2) & (df['Status'] == '1')]
pdf112
pdf1 = processed_DF.loc[(df['Month'] == 1) & (df['Status'] >= '1')]
print(pdf1['Status'].value_counts())
pdf1_1 = processed_DF.loc[(df['Month'] == 1) & (df['Location'] == '1')]
print(pdf1_1['Location'].value_counts()) #part 2
pdf1_1_1 = processed_DF.loc[(df['Month'] == 1) & (df['Location'] == '0')]
print(pdf1_1_1['Location'].value_counts()) #part 3
pdf2 = processed_DF.loc[(df['Month'] == 2) & (df['Status'] >= '1')]
print(pdf2['Status'].value_counts()) #part 1
pdf2_1 = processed_DF.loc[(df['Month'] == 2) & (df['Location'] == '1')]
print(pdf2_1['Location'].value_counts()) #part 2
pdf2_1_1 = processed_DF.loc[(df['Month'] == 2) & (df['Location'] == '0')]
print(pdf2_1_1['Location'].value_counts()) #part 3
pdf3 = processed_DF.loc[(df['Month'] == 3) & (df['Status'] >= '1')]
print(pdf3['Status'].value_counts()) #part 1
pdf3_1 = processed_DF.loc[(df['Month'] == 3) & (df['Location'] == '1')]
print(pdf3_1['Location'].value_counts()) #part 2
pdf3_1_1 = processed_DF.loc[(df['Month'] == 3) & (df['Location'] == '0')]
print(pdf3_1_1['Location'].value_counts()) #part 3
pdf4 = processed_DF.loc[(df['Month'] == 4) & (df['Status'] >= '1')]
print(pdf4['Status'].value_counts()) #part 1
pdf1_1 = processed_DF.loc[(df['Month'] == 4) & (df['Location'] == '1')]
print(pdf1_1['Location'].value_counts()) #part 2
pdf4_1_1 = processed_DF.loc[(df['Month'] == 4) & (df['Location'] == '0')]
print(pdf4_1_1['Location'].value_counts()) #part 3
pdf5 = processed_DF.loc[(df['Month'] == 5) & (df['Status'] >= '1')]
print(pdf5['Status'].value_counts()) #part 1
pdf5_1 = processed_DF.loc[(df['Month'] == 5) & (df['Location'] == '1')]
print(pdf5_1['Location'].value_counts()) #part 2
pdf5_1_1 = processed_DF.loc[(df['Month'] == 5) & (df['Location'] == '0')]
print(pdf5_1_1['Location'].value_counts()) #part 3
pdf6 = processed_DF.loc[(df['Month'] == 6) & (df['Status'] >= '1')]
print(pdf6['Status'].value_counts()) #part 1
pdf6_1 = processed_DF.loc[(df['Month'] == 6) & (df['Location'] == '1')]
print(pdf6_1['Location'].value_counts()) #part 2
pdf6_1_1 = processed_DF.loc[(df['Month'] == 6) & (df['Location'] == '0')]
print(pdf6_1_1['Location'].value_counts()) #part 3
pdf7 = processed_DF.loc[(df['Month'] == 7) & (df['Status'] >= '1')]
print(pdf7['Status'].value_counts()) #part 1
pdf7_1 = processed_DF.loc[(df['Month'] == 7) & (df['Location'] == '1')]
print(pdf7_1['Location'].value_counts()) #part 2
pdf7_1_1 = processed_DF.loc[(df['Month'] == 7) & (df['Location'] == '0')]
print(pdf7_1_1['Location'].value_counts()) #part 3
pdf8 = processed_DF.loc[(df['Month'] == 8) & (df['Status'] >= '1')]
print(1," ",41) #part 1 (The calculation worked before but was showinn unexpected error afterwards)
pdf8_1 = processed_DF.loc[(df['Month'] == 8) & (df['Location'] == '1')]
print(pdf8_1['Location'].value_counts()) #part 2
pdf8_1_1 = processed_DF.loc[(df['Month'] == 8) & (df['Location'] == '0')]
print(pdf8_1_1['Location'].value_counts()) #part 3
pdf9 = processed_DF.loc[(df['Month'] == 9) & (df['Status'] >= '1')]
print(pdf9['Status'].value_counts())
pdf9_1 = processed_DF.loc[(df['Month'] == 9) & (df['Location'] == '1')]
print(pdf9_1['Location'].value_counts()) #part 2
pdf9_1_1 = processed_DF.loc[(df['Month'] == 9) & (df['Location'] == '0')]
print(pdf9_1_1['Location'].value_counts()) #part 3
pdf10 = processed_DF.loc[(df['Month'] == 10) & (df['Status'] >= '1')]
print(pdf10['Status'].value_counts()) #part 1
pdf10_1 = processed_DF.loc[(df['Month'] == 10) & (df['Location'] == '1')]
print(pdf10_1['Location'].value_counts()) #part 2
pdf10_1_1 = processed_DF.loc[(df['Month'] == 10) & (df['Location'] == '0')]
print(pdf10_1_1['Location'].value_counts()) #part 3
pdf11 = processed_DF.loc[(df['Month'] == 11) & (df['Status'] >= '1')]
print(pdf11['Status'].value_counts()) #part 1
pdf11_1 = processed_DF.loc[(df['Month'] == 11) & (df['Location'] == '1')]
print(pdf1_1['Location'].value_counts()) #part 2
pdf11_1_1 = processed_DF.loc[(df['Month'] == 11) & (df['Location'] == '0')]
print(pdf1_1_1['Location'].value_counts()) #part 3
pdf12 = processed_DF.loc[(df['Month'] == 12) & (df['Status'] >= '1')]
print(pdf12['Status'].value_counts()) #part 1
pdf12_1 = processed_DF.loc[(df['Month'] == 12) & (df['Location'] == '1')]
print(pdf12_1['Location'].value_counts()) #part 2
pdf12_1_1 = processed_DF.loc[(df['Month'] == 12) & (df['Location'] == '0')]
print(pdf12_1_1['Location'].value_counts()) #part 3
sns.barplot(x="Month", y="Illnesses", data=processed_df)
# # Analysis:
#
# In the above graph, Months were plotted on the x axis and the number of illnesses was
# plotted on the y-axis. This graph was created to plot and visualize the number fo illnesses
# and their occurences in various months.
#
df = pd.DataFrame([
[8, 15],
[5, 17],
[13, 13],
[9, 20],
[12, 26],
[12, 29],
[25, 31],
[16, 27],
[12, 21],
[8, 20],
[9, 15],
[7, 17],
],columns=['Home made', 'Prepared outside'])
lines = df.plot.line(title='Illnesses by location of consumption of food')
lines.set_xlabel("Months")
lines.set_ylabel("Frequency of occurence")
# # Analysis:
#
# In the above graph, Months were plotted on the x-axis and the number of confirmed Illnesses based on location occuring was
# plotted on the y-axis. This graph was created to plot and visualize the number Illnesses that would have been caused by
# consumption of home made or outside food that occured in various months.
# +
sns.lineplot(data=processed_df,x="Month", y="Hospitalizations")
sns.relplot(data=processed_df, kind="line",x="Month", y="Fatalities")
# -
# # Analysis:
#
# In the above graph, Months were plotted on the x-axis and the number of hospitalizations occuring was
# plotted on the y-axis. This graph was created to plot and visualize the number hospitalizations
# that occured in various months.
#
# In the above graph, Months were plotted on the x-axis and the number of Fatality rate occuring was
# plotted on the y-axis. This graph was created to plot and visualize the Fatality rate
# that occured in various months.
#
# # A brief summary of the EDA performed:
#
# In the following milestone, I performed a Data Analysis on the data (outbreaks.csv). The first step was going through an Analysis Pipeline which roughly consisted of cleaning, sorting and renaming the dataset. In my case, firstly, I used various functions to get the dimension, size and overall basic information of the dataset and then removed all of the rows in which atleast one value was either missing or NAN (not a number). Secondly, I then created a replacement which would replace all of the strings to numerical values (it would later on help me in my calculation). After cleaning and renaming, I moved on to the actual Data Analysis part.
#
# In the next part, then further cleaned the data (by cross checking and removing any unecessary rows or columns) and used built in measures to get a rough statistical standpoint of the data. Then, I performed calculations to establish or check whether the data showed or lacked any particular trends. These trends were then later plotted into graphs to get a visual understanding.
# The calculations primarily consisted of obtaining the occurence in numbers of illnesses, location and the virus type in each months. My findings are presented below as answers to research questions.
#
# Q1) Are there any trends in your data? If so, what are they?
#
# Through my first calculation, which was occurence of foodborne diseases (numerically) in each months, I found out that a trend was almost inconclusive. This was backed up by the bar graph plotted. There had been an almost linear rise in the number of cases from march to june, however, the number had fallen back in july with a increasing and decreasing trend.
#
# In the second calculation(location), one can see the pattern of consumption of home made food causing more number of illnesses
# any given month than consumption of outside food. There is also a sharp rise in these numbers between the months of may and june with the graph hitting its maxima on June. We can thus conclude that homemade food caused less illnesses than consumption of food from restaurants and especially during the months of may and june.
#
# Q2) What conclusion can be drawn from the following data?
#
# Location of the food consumed (ie home cooked or ordered/purchased from outside) was a pretty significant factor in determining
# the number of illnesses caused. One could conclude that eating food that was cooked from outside ones' house was likely going to cause more probability of illnesses than eating home cooked food . The number of cases was especially high in May and June. Hence these months would be the most riskiest to consume food from outside.
#
# Hospitalization rates graph is not very conclusive in determining any trend since the line is more fluctuating than ever.
# Hence no trend could be gathered from the graph.
#
# All in all, the fatality rate is pretty low overall (mostly), which suggests that the all incidents that requires hospitalizations most certainly are non fatal. However, from the previous trends, we do notice that the rate shoots up particularly in the months of may and june and steadily declines reaching august and then shoots up again till november.
#
# Conclusion:
#
# A) Highest recorded illnesses occurred during the months of May and June
#
# B) Consumption of food prepared from outside caused more illnesses than home cooked meals
#
# C) Could not establish a definite trend from the number of hospitalizations
#
# D) Fatality rate is overall very low, however, most of them occur during the months of May and August
| analysis/Milestone2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright (C)
# 2016 - 2019 <NAME>(<EMAIL>)
#
# https://www.cnblogs.com/pinard
#
# Permission given to modify the code as long as you keep this declaration at the top
#
# scikit-learn Adaboost类库使用小结 https://www.cnblogs.com/pinard/p/6136914.html
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# 生成2维正态分布,生成的数据按分位数分为两类,500个样本,2个样本特征,协方差系数为2
X1, y1 = make_gaussian_quantiles(cov=2.0,n_samples=500, n_features=2,n_classes=2, random_state=1)
# 生成2维正态分布,生成的数据按分位数分为两类,400个样本,2个样本特征均值都为3,协方差系数为2
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,n_samples=400, n_features=2, n_classes=2, random_state=1)
#讲两组数据合成一组数据
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
plt.scatter(X[:, 0], X[:, 1], marker='o', c=y)
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
algorithm="SAMME",
n_estimators=200, learning_rate=0.8)
bdt.fit(X, y)
# +
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z)
plt.scatter(X[:, 0], X[:, 1], marker='o', c=y)
plt.show()
# -
print "Score:", bdt.score(X,y)
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
algorithm="SAMME",
n_estimators=300, learning_rate=0.8)
bdt.fit(X, y)
print "Score:", bdt.score(X,y)
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
algorithm="SAMME",
n_estimators=300, learning_rate=0.5)
bdt.fit(X, y)
print "Score:", bdt.score(X,y)
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
algorithm="SAMME",
n_estimators=600, learning_rate=0.7)
bdt.fit(X, y)
print "Score:", bdt.score(X,y)
| ensemble-learning/adaboost-classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.1
# language: julia
# name: julia-1.0
# ---
using Plots
f(x) = cos(x) - x
plot(f, -2, 2)
using Roots
function falsePos(f)
x = find_zero(f, (0, 1), FalsePosition(), verbose=true)
iszero(f(x))
end
using ForwardDiff
function newton(f)
D(f) = x->ForwardDiff.derivative(f, float(x))
x = find_zero((f, D(f)),0, Roots.Newton(),verbose=true)
iszero(f(x))
end
function steffensen(f)
x = find_zero(f, 0, Order2(), verbose=true)
iszero(f(x))
end
f(x) = x^3
falsePos(f)
newton(f)
steffensen(f)
f(x) = cos(x)- x
falsePos(f)
newton(f)
steffensen(f)
f(x) =(x + 3)*(x - 1)^2
falsePos(f)
newton(f)
steffensen(f)
f(x) = (x-1)^7
falsePos(f)
newton(f)
steffensen(f)
f(x) = x^3 - 2*x - 5
x = find_zero(f, (0, 3), FalsePosition(), verbose=true)
iszero(f(x))
newton(f)
steffensen(f)
f(x) = exp(x) - 1 / (10 * x)^2
falsePos(f)
D(f) = x->ForwardDiff.derivative(f, float(x))
x = find_zero((f, D(f)),1, Roots.Newton(),verbose=true)
iszero(x)
x = find_zero(f, 1, Order2(), verbose=true)
iszero(f(x))
using DataFrames
df = DataFrame(f=String[], method=String[], iterations=Int64[], calls=Int64[], iszero=Bool[])
# +
push!(df, ["x^3", "FalsePosition", 17, 19, false])
push!(df, ["x^3", "Newton", 0, 1, true])
push!(df, ["x^3", "Steffensen", 0, 1, true])
push!(df, ["cos(x)-x", "FalsePosition", 7, 9, false])
push!(df, ["cos(x)-x", "Newton", 5, 6, true])
push!(df, ["cos(x)-x", "Steffensen", 5, 11, true])
push!(df, ["(x+3)*(x-1)^2", "FalsePosition", 0, 2, true])
push!(df, ["(x+3)*(x-1)^2", "Newton", 26, 27, false])
push!(df, ["(x+3)*(x-1)^2", "Steffensen", 26, 53, false])
push!(df, ["(x-1)^7", "FalsePosition", 0, 2, true])
push!(df, ["(x-1)^7", "Newton", 33, 34, false])
push!(df, ["(x-1)^7", "Steffensen", 33, 67, false])
push!(df, ["x^3-2*x-5", "FalsePosition", 9, 11, false])
push!(df, ["x^3-2*x-5", "Newton", 19, 20, false])
push!(df, ["x^3-2*x-5", "Steffensen", 19, 39, false])
push!(df, ["exp(x)-1/(10*x)^2", "FalsePosition", 10, 12, false])
push!(df, ["exp(x)-1/(10*x)^2", "Newton", 12, 13, false])
push!(df, ["exp(x)-1/(10*x)^2", "Steffensen", 12, 25, false])
# -
f(x) = (x-1)^7
x = find_zero(f, (0, 3), FalsePosition(), verbose=true)
iszero(f(x))
function f(x)
if ( x < 6.0 )
fx = 0.75 * ( x - 6.25 ) - 0.3125
elseif ( x <= 6.50 )
fx = 2.00 * ( x - 6.25 )
else
fx = 0.75 * ( x - 6.25 ) + 0.3125
end
end
D(f) = x->ForwardDiff.derivative(f, float(x))
x = find_zero((f, D(f)),0.001, Roots.Newton(),verbose=true)
iszero(f(x))
f(x) = exp(x)-1/(10*x)^2
x = find_zero(f, -1, Order2(), verbose=true)
iszero(f(x))
| lab_6/RootFinding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# SELECT runtime { local | colab }
# runtime = 'local'
# SELECT Road Network Data { osm | estrada | combined }
road_path = 'osm'
# SELECT Population Data { facebook | worldpop | census }
pop_path = 'worldpop'
# SELECT Distance Threshold in KM
distance_thresholds = [10]
# Daily probability to a stroke
prob_stroke = 0.0000114 # 0.00114% calculated out: 1 out of 3 gets stroke, average person can be 80 years old
# Average days in stroke center
average_days = 4 # 4 days, Processes of Stroke Unit Care and Outcomes at Discharge in Vietnam: Findings from the Registry of Stroke Care Quality (RES-Q) in a Major Public Hospital
# Utilization rate (% of people with a stroke actually go to the hospital)
utilization_rate = 0.8 # 80%, -> TO TEST, NO REFERENCES
# List of number of additional stroke facilities added in the optimization
nhospitals = [10,20,30,40,50,60,70,80,90,100,150,200,250,300,400,500,750,1000]
# List of number of additional beds added in the optimization
nbeds = [100, 200, 300, 400, 500, 600]
# SELECT potential stroke facilities { existing hospitals }
potential_facilities = 'existing hospitals'
# +
# %%time
import pandas as pd
import geopandas as gpd
import matplotlib
import matplotlib.pyplot as plt
import plotly.express as px
import folium
import itertools
import geopy.distance
import warnings
warnings.filterwarnings('ignore')
import json
import time
from datetime import datetime
data_path = '../../Vietnam project/0906Data/'
code_path = '../Scripts/'
import sys
sys.path.append(code_path)
sys.path.append(data_path)
# Import python functions
from optimization_model import OptimizationModel
from distance_matrix import DistanceCalculation, haversine_vectorize
from network_gpbp import get_nodes_and_edges
from InputDataPreprocessingv1 import CurrentHospitals, NewHospitals, NewHospitalsGrid, NewHospitalsCSV, Population, PopulationFB
from Visualization_GPBP import ParetoCurve, CreateMap
# %load_ext line_profiler
# -
# # Import data
# %%time
# Preprocessing of the road network
nodes, edges_attr, network = get_nodes_and_edges(data_path+'road_osm_preprocessed.geojson')
#Plot
f, ax = plt.subplots(figsize=(10, 10))
edges_attr.plot(ax=ax, color='grey', zorder=1)
ax.set_axis_off()
plt.show()
edges_attr.shape
# %%time
# Current Hospitals -- stroke facilities in this case
health_facilities = pd.read_csv(data_path+'stroke-facs.csv').reset_index()
health_facilities = health_facilities[['index','longitude','latitude','Name_English']]
current_hospitals_ID, current_hospitals = CurrentHospitals(health_facilities, network, nodes)
len(current_hospitals)
# +
# %%time
# Location of new potential hospitals.
# Pick "1kmGrid", "5kmGrid", "10kmGrid" or "existing hospitals"
hospitals = gpd.read_file(data_path+'hospitals_vietnam.geojson')
new_hospitals_ID, new_hospitals = NewHospitals(current_hospitals, hospitals, network, nodes)
len(new_hospitals)
# +
# %%time
# Population Data
#Round the coordinates to cluster the population. 8 digits: no rounding
digits_rounding = 1
if pop_path == 'facebook':
read_population = pd.read_csv(data_path+'pop_fb_2020.csv').reset_index()
read_population = read_population[['index','longitude','latitude','population_2020']]
elif pop_path == 'worldpop':
read_population = pd.read_csv(data_path+'WorldPop_2020_1km.csv').reset_index()
array_household, population = PopulationFB(digits_rounding, read_population, network, nodes)
len(population)
# -
# # Distance matrix
# Determine the smallest and largest threshold
distance_threshold_largest = max(distance_thresholds)
distance_threshold_smallest = min(distance_thresholds)
# %%time
# Calculate distance from households to current hospitals
pop_subset = population[['nearest_node','xcoord','ycoord']]
matrix_current = DistanceCalculation(network, current_hospitals, population, pop_subset, distance_threshold_largest)
matrix_current = matrix_current[['ID','pop_dist_road_estrada','household_count','xcoord','ycoord',
'Hosp_ID','hosp_dist_road_estrada','Longitude','Latitude',
'shortest_path_length','euclidean_distance','total_network_distance', 'distance_corrected']]
matrix_current = matrix_current.rename(columns={'ID':'Pop_ID'})
# %%time
# Create a subset of the population.
# This subset contains only the population that is not already within a range of S kilometers
withinSkm = matrix_current[matrix_current.total_network_distance<=distance_threshold_smallest]
withinSkm=withinSkm['Pop_ID'].unique()
population_new = population.drop(population[population.ID.isin(withinSkm)].index.tolist())
# %%time
# Calculate distance to new hospitals
pop_subset = population_new[['nearest_node','xcoord','ycoord']]
matrix_new = DistanceCalculation(network, new_hospitals, population, pop_subset, distance_threshold_largest)
matrix_new = matrix_new[['ID','pop_dist_road_estrada','household_count','xcoord','ycoord',
'Cluster_ID','hosp_dist_road_estrada','Longitude','Latitude',
'shortest_path_length','euclidean_distance','total_network_distance', 'distance_corrected']]
matrix_new = matrix_new.rename(columns={'ID':'Pop_ID'})
# # Optimization
# %%time
# Add matrix with distances of new hospitals to matrix of distances of current hospitals
matrix_new1 = matrix_new[['Pop_ID','Cluster_ID','distance_corrected']]
matrix_current1 = matrix_current[['Pop_ID','Hosp_ID','distance_corrected']]
matrix_new1.columns = ['Pop_ID','HospCluster','distance']
matrix_current1.columns = ['Pop_ID','HospCluster','distance']
df_combined = pd.concat([matrix_current1, matrix_new1],axis=0)
df_combined= df_combined.sort_values(by='Pop_ID')
# # Model
#
#
#
#
# $\textrm{Maximize } \quad \sum_{i \in I} \sum_{j \in J} v_iy_{ij} $
#
# $\textrm{Subject to } \quad (1) x_j = 1 \quad \quad \quad \quad \quad\forall j = 1, ... m $
#
# $ \quad \quad \quad \quad \quad (2) \sum^{M}_{j=m+1} x_j \leq p $
#
# $ \quad \quad \quad \quad \quad (3) \sum_{j=1}^M z_j \leq q $
#
# $ \quad \quad \quad \quad \quad (4) \sum_{i \in I} y_{ij} \leq nx_j \quad \quad \quad \quad \quad \forall j \in J $
#
# $ \quad \quad \quad \quad \quad (5) z_j \leq Mx_j \quad \quad \quad \quad \quad \forall j \in J $
#
# $ \quad \quad \quad \quad \quad (6) \sum_{j \in J} y_{ij} \leq 1 \quad \quad \quad \quad \quad \forall i \in I$
#
# $ \quad \quad \quad \quad \quad (7) a * s * \sum_{i \in I} v_iy_{ij} \leq u(c_j + z_j) \quad \quad \quad \quad \quad \forall j \in J $
#
# $ \quad \quad \quad \quad \quad (8) y_{ij} = 0 \quad \quad \quad \quad \quad \forall i \in I, \forall j \in J, d_{ij} > S $
#
# $ \quad \quad \quad \quad \quad x_j \in \{0,1\}, y_{ij} \in [0,1] \quad \quad\quad \quad \quad\forall i \in I, \forall j \in J $
#
# where:
#
# - $I$ = the index set of households, or clusters of households, indexed by $i = 1, ..., n$.
#
# - $J$ = the index set of all healthcare sites, where indices $j = 1, ..., m$ are corresponding to the already existing healthcare facilities and indices $j = m+1, ..., M$ are corresponding to potential hospital locations
#
# - $v_i$ = the number of people in (cluster of) household(s) $i$
#
# - $d_{ij}$ = the travel distance from (cluster of) household(s) $i$ to hospital facility $j$
#
# - $S$ = the maximum travel distance from a household (or cluster) to a health care facility
#
# - $p$ = the number of additional stroke centers
#
# - $q$ = the total number of additional capacity (in #beds)
#
# - $c_j$ = the current capacity at stroke center $j$ (in # beds)
#
# - $u$ = maximal allowed utilization rate (between 0 and 1)
#
# - $s$ = average number o days patient occupies a bed in stroke center
#
# - $a$ = probability (per days) that a person has to visit a stroke center
#
# Decision variables:
# $
# x_j = \begin{cases} 1 & \text{ if hospital } j \text{ is opened} \\
# 0 & \text{ otherwise}
# \end{cases} \\
# $
# $
# y_i = \begin{cases} 1 & \text{ if there is an opened health facility within } S \\ & \text{ kilometers travel distance away from the (cluster of) household(s) } i\\
# 0 & \text{ otherwise}
# \end{cases} \\
# $
# $
# z_j = \text{extra capacity at stroke center j}
# $
#
def getvariables(n, m, X, Y, Z, II, JJ):
import numpy as np
Xvalues = np.zeros(m)
Yvalues = np.zeros(n*m)
Zvalues = np.zeros(m)
for j in range(m):
Xvalues[j]=X[j].x
Zvalues[j]=Z[j].x
Yvalues = { j: [] for j in range(m) }
for i,j in combinations:
Yvalues[j].append(i)
return(Xvalues, Yvalues, Zvalues)
import gurobipy as gb
gurobicode = { gb.GRB.LOADED : 'loaded',
gb.GRB.OPTIMAL : 'optimal',
gb.GRB.INFEASIBLE : 'infeasible',
gb.GRB.INF_OR_UNBD : 'inf_or_unbd',
gb.GRB.UNBOUNDED : 'unbounded',
gb.GRB.CUTOFF : 'cutoff',
gb.GRB.ITERATION_LIMIT : 'iteration_limit',
gb.GRB.NODE_LIMIT : 'node_limit',
gb.GRB.TIME_LIMIT : 'time_limit',
gb.GRB.SOLUTION_LIMIT : 'solution_limit',
gb.GRB.INTERRUPTED : 'interrupted',
gb.GRB.NUMERIC : 'numeric',
gb.GRB.SUBOPTIMAL : 'suboptimal',
gb.GRB.INPROGRESS : 'inprogress',
gb.GRB.USER_OBJ_LIMIT : 'user_obj_limit'}
# +
# Optimize directly for all number of beds in nmax_beds and all additional number
# of hospitals in hosp_count
def Optimization(nmax_beds, array_household, current_hospitals_ID, new_hospitals_ID, distance_matrix, S, hosp_count, maxTimeInSeconds = 100, mipGap = 0.001, trace=False, seed = 2021 ):
import time
import gurobipy as gb
from gurobipy import GRB
import numpy as np
import pandas as pd
import random
np.random.seed( seed )
tStart = time.time()
obj_val_array = []
distances = distance_matrix[distance_matrix.distance <= S]
existinghosp = len(current_hospitals_ID)
m = len(current_hospitals_ID) + len(new_hospitals_ID)
n = len(array_household)
p = existinghosp + 0
maxbeds = 0
groot_getal = 1000000000
# Create existing capacity
currently_served = matrix_current1[matrix_current1.distance <= S]
currently_served = currently_served.merge(population[['ID','household_count']],how='left', left_on='Pop_ID', right_on = 'ID')
c=[0]*(len(current_hospitals_ID) + len(new_hospitals_ID))
for i in currently_served['HospCluster'].unique():
served_hosp_i = currently_served[currently_served.HospCluster == i]
c[i] = served_hosp_i['household_count'].sum()
currently_served = currently_served[['Pop_ID', 'household_count']].drop_duplicates().sum()
average_served = sum(c) / currently_served['household_count'].sum()
ratio = np.random.uniform(low=0.8, high=1.0, size=(len(c))) * prob_stroke * average_days * (1/utilization_rate) / average_served.sum()
c = np.round(np.multiply(ratio, c), decimals = 0)
# Existing capacity as large number (no limitations)
# c = [10000000] * m
II = distances['Pop_ID']
JJ = distances['HospCluster']
combinations = gb.tuplelist(zip(II,JJ))
M = gb.Model("Facility location problem")
# Add variables AND objective
X = M.addVars(m, vtype=gb.GRB.BINARY)
Z = M.addVars(m, lb = 0, vtype=gb.GRB.INTEGER)
Y = M.addVars(combinations, lb=0, ub=1, vtype=gb.GRB.CONTINUOUS, obj=[-array_household[i] for i,j in combinations])
# Create lists for I and J
ah = { j : [] for j in range(m) }
JI = { j : [] for j in range(m) }
IJ = { i : [] for i in range(n) }
for i,j in combinations:
ah[j].append(array_household[i])
JI[j].append(Y[i,j])
IJ[i].append(Y[i,j])
#Set parameters
M.Params.OutputFlag = trace
M.Params.mipgap = mipGap
M.Params.timelimit = maxTimeInSeconds
#Constraints
# 1 Existing hospitals are opened
M.addConstrs( (X[j] == 1 for j in range(existinghosp)), name="Existing hospitals" )
#2 Additional hopsitals less or equal to maximum
s1 = M.addLConstr( gb.LinExpr( [ (1,X[j]) for j in range(m) ] ) <= p )
#3 Extra beds less or equal to maximum
s2 = M.addLConstr( gb.LinExpr( [ (1,Z[j]) for j in range(m) ] ) <= maxbeds )
#4 Only assign beneficiaries if the hospital is opened
#5 Only add beds when the hospital is opened
#7 Utilization constraint
for j in range(m):
M.addLConstr( Z[j] <= groot_getal*X[j] )
M.addLConstr( gb.LinExpr( [1]*len(JI[j]), JI[j] ) <= n*X[j] )
M.addLConstr( prob_stroke*average_days*gb.LinExpr( ah[j], JI[j] ) <= utilization_rate*(c[j]+Z[j]) )
#6 One can only be assigned once
for i in set(II):
M.addLConstr( gb.LinExpr( [1]*len(IJ[i]), IJ[i] ) <= 1 )
modelling_time = time.time() - tStart
tStart = time.time()
# Optimize and extract solution
M.optimize()
obj_val = -M.objVal
obj_val_array.append([S,0,0,obj_val])
for each_bed_count in nmax_beds:
M.remove(s2)
s2 = M.addLConstr( gb.LinExpr( [ (1,Z[j]) for j in range(m) ] ) <= each_bed_count )
M.optimize()
obj_val = -M.objVal
obj_val_array.append([S, 0,each_bed_count,obj_val])
# Iterate for multiple additional hospital facilities
for each_hosp_count in hosp_count:
M.remove(s1)
p = existinghosp + each_hosp_count
s1 = M.addLConstr( gb.LinExpr( [ (1,X[j]) for j in range(m) ] ) <= p )
for each_bed_count in nmax_beds:
M.remove(s2)
s2 = M.addLConstr( gb.LinExpr( [ (1,Z[j]) for j in range(m) ] ) <= each_bed_count )
M.optimize()
obj_val = -M.objVal
obj_val_array.append([S, each_hosp_count,each_bed_count,obj_val])
solving_time = time.time() - tStart
Xvalues = np.zeros(m)
Yvalues = np.zeros(n*m)
Zvalues = np.zeros(m)
for j in range(m):
Xvalues[j]=X[j].x
Zvalues[j]=Z[j].x
Yvalues = { j: [] for j in range(m) }
for i,j in combinations:
Yvalues[j].append(i)
# Xvalues, Yvalues, Zvalues = getvariables(n, m, X, Y, Z, II, JJ)
df_opt_array = pd.DataFrame(obj_val_array)
df_opt_array.columns = ['km','number_of_new_facilities','number_of_extra_beds','count']
df_opt_array['number_of_facilities'] = df_opt_array['number_of_new_facilities'] + existinghosp
df_opt_array['%'] = (df_opt_array['count']*100/sum(array_household)).round(1)
# return obj_val, Xvalues, Yvalues, Zvalues
return df_opt_array, Xvalues, Yvalues, Zvalues, modelling_time, solving_time, gurobicode[M.status]
# +
# %%time
# nmax_beds = [10,50,100,500,1000,2000]
# nhospitals = [10,20,30,40,50,60,70,80,90,100,150,200,250,300,400,500,750,1000]
nmax_beds = [100]
nhospitals = [100]
df_combined_output = pd.DataFrame()
for each_threshold in distance_thresholds:
opt_array, Xvalues, Yvalues, Zvalues, _, _, _ = Optimization(nmax_beds, array_household, current_hospitals_ID, new_hospitals_ID, df_combined, each_threshold, nhospitals, maxTimeInSeconds = 300, mipGap = 0.001, trace=False)
df_opt_outputs = pd.DataFrame(opt_array)
df_combined_output = df_combined_output.append(df_opt_outputs)
print("Threshold distance: " + str(each_threshold))
print("Solving time in minutes: " + str(tSolving/60) + ", modelling time: " + str(tModelling/60))
# -
max(Zvalues)
distances = df_combined[df_combined.distance <= S]
II = distances['Pop_ID']
JJ = distances['HospCluster']
combinations = gb.tuplelist(zip(II,JJ))
combinations
# %lprun -u 1e-3 -T opt.txt -f Optimization Optimization(nmax_beds, array_household, current_hospitals_ID, new_hospitals_ID, df_combined, each_threshold, nhospitals, maxTimeInSeconds = 300, mipGap = 0.001, trace=False)
df_combined_output.head(20)
# # Optimization without capacity
# ## Model
#
# $\textrm{Maximize } \quad \sum_i v_iy_i $
#
# $\textrm{Subject to } \quad x_j = 1 \quad \quad \quad \quad \quad\forall j = 1, ... m $
#
# $ \quad \quad \quad \quad \quad \sum^{M}_{j=m+1} x_j \leq p $
#
# $ \quad \quad \quad \quad \quad y_i \leq \sum_{j|d_{ij}\leq S} x_j \quad \forall i \in I$
#
# $ \quad \quad \quad \quad \quad y_i, x_j \in \{0,1\} \quad \quad\forall i \in I, \forall j \in J $
#
# where:
#
# - $I$ = the index set of households, or clusters of households, indexed by $i = 1, ..., n$.
#
# - $J$ = the index set of all healthcare sites, where indices $j = 1, ..., m$ are corresponding to the already existing healthcare facilities and indices $j = m+1, ..., M$ are corresponding to potential hospital locations
#
# - $v_i$ = the number of people in (cluster of) household(s) $i$
#
# - $d_{ij}$ = the travel distance from (cluster of) household(s) $i$ to hospital facility $j$
#
# - $S$ = the maximum travel distance from a household (or cluster) to a health care facility
#
# - $p$ = the number of additional hospitals located.
#
# Decision variables:
# $
# x_j = \begin{cases} 1 & \text{ if hospital } j \text{ is opened} \\
# 0 & \text{ otherwise}
# \end{cases} \\
# $
# $
# y_i = \begin{cases} 1 & \text{ if there is an opened health facility within } S \\ & \text{ kilometers travel distance away from the (cluster of) household(s) } i\\
# 0 & \text{ otherwise}
# \end{cases}
# $
#
# Interesting to check whether this model gives the same results when you set a huge capacity on the extra number of beds added in the previous model.
#
nhospitals = [10,20,30,40,50,60,70,80,90,100,150,200,250,300,400,500,750,1000]
df_combined_output = pd.DataFrame()
for each_threshold in distance_thresholds:
opt_array, tModelling, tSolving = OptimizationModel(array_household, current_hospitals_ID, new_hospitals_ID, df_combined, each_threshold, nhospitals)
df_opt_outputs = pd.DataFrame(opt_array)
df_combined_output = df_combined_output.append(df_opt_outputs)
print("Threshold distance: " + str(each_threshold))
print("Solving time: " + str(tSolving/60) + ", modelling time: " + str(tModelling/60))
df_combined_output
| Analytics Notebooks/22-10 Vietnam capacitated model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from tensorflow.keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
(train_X, train_y), (test_X, test_y) = mnist.load_data()
# +
# reshape datasets, create toy model for initial experimentation
POC = train_X[0:599]
num,d1,d2 = POC.shape
POC = POC.reshape((num, d1 * d2))
POC_lbl = train_y[0:599]
POC_tst = train_X[400:599]
num,d1,d2 = POC_tst.shape
POC_tst = POC_tst.reshape((num, d1 * d2))
POC_lbl_tst = train_y[400:599]
num,d1,d2 = train_X.shape
train_X_transform = train_X.reshape((num, d1*d2))
num,d1,d2 = test_X.shape
test_X_transform = test_X.reshape((num, d1*d2))
# -
from sklearn.neural_network import MLPClassifier
nnet = MLPClassifier(hidden_layer_sizes=(300,), activation='relu', max_iter=250, solver='sgd', learning_rate_init=.001)
nnet.fit(train_X_transform,train_y)
print(nnet.score(test_X_transform,test_y))
nnet = MLPClassifier(hidden_layer_sizes=(300,), activation='logistic', max_iter=250, solver='sgd', learning_rate_init=.001)
nnet.fit(train_X_transform,train_y)
print(nnet.score(test_X_transform,test_y))
# +
# Tune size of hidden layers
sizes = [5,10,20,40,50,60,70,100,150,200,300,400]
scores = []
for i in range(len(sizes)):
nnet = MLPClassifier(hidden_layer_sizes=(sizes[i],), activation='relu', max_iter=350, solver='sgd', learning_rate_init=.001)
nnet.fit(train_X_transform[0:5000],train_y[0:5000])
score = nnet.score(test_X_transform[0:1000],test_y[0:1000])
scores.append(score)
print(sizes[i], ': ', score)
# -
plt.scatter(sizes,scores)
plt.title('Relu Neural Network Classification Accuracy vs Hidden Layer Size (n = 5000)')
plt.xlabel('Hidden Layer Size')
plt.ylabel('Classifier Accuracy')
plt.show()
# +
# Tune size of hidden layers
log_sizes = [5,10,20,40,50,60,70,100,150,200,300,400]
log_scores = []
for i in range(len(log_sizes)):
nnet = MLPClassifier(hidden_layer_sizes=(sizes[i],), activation='logistic', max_iter=350, solver='sgd', learning_rate_init=.001)
nnet.fit(train_X_transform[0:5000],train_y[0:5000])
score = nnet.score(test_X_transform[0:1000],test_y[0:1000])
log_scores.append(score)
print(log_sizes[i], ': ', score)
# -
plt.scatter(log_sizes,log_scores)
plt.title('Logistic Neural Network Classification Accuracy vs Hidden Layer Size (n = 5000)')
plt.xlabel('Hidden Layer Size')
plt.ylabel('Classifier Accuracy')
plt.show()
# +
# Tune size of hidden layers
tanh_sizes = [5,10,20,40,50,60,70,100,150,200,300,400]
tanh_scores = []
for i in range(len(tanh_sizes)):
nnet = MLPClassifier(hidden_layer_sizes=(sizes[i],), activation='tanh', max_iter=350, solver='sgd', learning_rate_init=.001)
nnet.fit(train_X_transform[0:5000],train_y[0:5000])
score = nnet.score(test_X_transform[0:1000],test_y[0:1000])
tanh_scores.append(score)
print(tanh_sizes[i], ': ', score)
# -
plt.scatter(tanh_sizes,tanh_scores)
plt.title('Hyperbolic Tangent Neural Network Classification Accuracy vs Hidden Layer Size (n = 5000)')
plt.xlabel('Hidden Layer Size')
plt.ylabel('Classifier Accuracy')
plt.show()
# +
# Tune learning rate
rates = [0.0001,0.001,0.01,0.1,1]
relu_learn_scores = []
for i in range(len(rates)):
nnet = MLPClassifier(hidden_layer_sizes=(300,), activation='relu', max_iter=350, solver='sgd', learning_rate_init=rates[i])
nnet.fit(train_X_transform[0:5000],train_y[0:5000])
score = nnet.score(test_X_transform[0:1000],test_y[0:1000])
relu_learn_scores.append(score)
print('learning rate of ', rates[i], ': ', score)
# +
# Tune learning rate
rates = [0.00001,0.0001,0.001,0.01,0.1,1]
log_learn_scores = []
for i in range(len(rates)):
nnet = MLPClassifier(hidden_layer_sizes=(300,), activation='logistic', max_iter=350, solver='sgd', learning_rate_init=rates[i])
nnet.fit(train_X_transform[0:5000],train_y[0:5000])
score = nnet.score(test_X_transform[0:1000],test_y[0:1000])
log_learn_scores.append(score)
print('learning rate of ', rates[i], ': ', score)
# +
# Tune learning rate
rates = [0.0001,0.001,0.01,0.1,1]
tanh_learn_scores = []
for i in range(len(rates)):
nnet = MLPClassifier(hidden_layer_sizes=(300,), activation='tanh', max_iter=350, solver='sgd', learning_rate_init=rates[i])
nnet.fit(train_X_transform[0:5000],train_y[0:5000])
score = nnet.score(test_X_transform[0:1000],test_y[0:1000])
tanh_learn_scores.append(score)
print('learning rate of ', rates[i], ': ', score)
# -
| JupyterNotebooks/.ipynb_checkpoints/NeuralNet-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import stitch_data as sd
import numpy as np
import matplotlib.pyplot as plt
f_name = 'drive_18Mar_1.bag'
bag = sd.getbag(f_name)
track = sd.extractTrack(bag)
raddata = sd.extractRadData(bag)
xpos, ypos = sd.dataMerge(track, raddata)
colors, circles = sd.colorizer(raddata, circles=2.2, data_param='total', log=False)
# +
fig, ax1 = plt.subplots(figsize=(10,6))
line, = ax1.plot(-1*ypos, xpos)
scat = ax1.scatter(-1*ypos, xpos, c=colors, s=circles, cmap='viridis', alpha=.5)
# fig.colorbar(scat, ax=ax1)
cbar = plt.colorbar(scat)
# cbar.ax1.set_yticklabels(['0','1','2','>3'])
cbar.set_label('CPS', rotation=0)
# ax1.set_xlim(15, -2)
plt.title("Localization of 20uCi Cs-137 in Hallway")
ax1.set_xlabel("X Position")
ax1.set_ylabel("Y Position")
# plt.savefig('localization_map.pdf', dpi = 800)
plt.show()
# -
colors, circles = sd.colorizer(raddata, circles=2.2, data_param='total', log=False)
# +
fig, ax1 = plt.subplots(figsize=(10,6))
line, = ax1.plot(-1*ypos, xpos)
scat = ax1.scatter(-1*ypos, xpos, c=colors, s=circles, cmap='viridis', alpha=.5)
# fig.colorbar(scat, ax=ax1)
cbar = plt.colorbar(scat)
# cbar.ax1.set_yticklabels(['0','1','2','>3'])
cbar.set_label('CPS', rotation=0)
# ax1.set_xlim(15, -2)
plt.title("Localization of 20uCi Cs-137 in Hallway")
ax1.set_xlabel("X Position")
ax1.set_ylabel("Y Position")
# plt.savefig('localization_map.pdf', dpi = 800)
plt.show()
# -
sd.giveMeMap(f_name)
| scripts/Analysis/CadetMapper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Oct 30, 2019
#
# Feasability test for Sandy Hook in /r/politics.
# +
from tqdm import tqdm
import os
os.chdir('../../')
from convokit import Corpus, User, Utterance
import nltk
from nltk.stem import SnowballStemmer
stemmer = SnowballStemmer('english')
from collections import defaultdict
# +
shooting_timestamps = {
'Geneva County massacre': (1236657600, 1237953600),
'Binghamton shootings': (1238731200, 1240027200),
'Fort Hood shooting': (1257397200, 1258693200),
'Aurora theater shooting': (1342756800, 1344052800),
'Sandy Hook Elementary School shooting': (1355461200, 1356757200),
'Washington Navy Yard shooting': (1379304000, 1380600000),
'San Bernardino attack': (1449032400, 1450328400),
'Orlando nightclub shooting': (1465704000, 1467000000),
'Las Vegas shooting': (1506830400, 1508126400),
'Sutherland Springs church shooting': (1509854400, 1511150400),
'Stoneman Douglas High School shooting': (1518584400, 1519880400),
'Santa Fe High School shooting': (1526616000, 1527912000)}
# 'Virginia Tech shooting': (1176696000, 1177387200),
# 'Thousand Oaks shooting': (1541566800, 1542258000),
# 'Pittsburgh synagogue shooting': (1540612800, 1541304000)
# -
def identify_timestamp_category(timestamp):
for k, v in shooting_timestamps.items():
if v[0] <= timestamp <= v[1]:
return k
subreddits = ['news', 'politics', 'worldnews', 'Liberal', 'progressive', 'democrats', 'Conservative', 'The_Donald', 'Republican']
def tokenize_and_stem(sentence):
return set(stemmer.stem(word) for word in nltk.word_tokenize(sentence))
# ## Generate specific words dictionaries for each event
generic_words = {stemmer.stem(word) for word in ['shooting', 'survivor', 'wounded', 'shot', 'deaths',
'died', 'injured', 'guns', 'killing', 'attack',
'massacre', 'victim']}
specific_words_raw = {
'Fort Hood shooting': ["<NAME>", "killeen", "texas"],
'Binghamton shootings': ["jiverly antares wong"],
'Geneva County massacre': ['<NAME>', 'kinston', 'samson', 'alabama'],
'Sandy Hook Elementary School shooting': ["<NAME>", "newton", "connecticut"],
'Aurora theater shooting': ['<NAME>', 'colorado'],
'Washington Navy Yard shooting': ['<NAME>'],
'San Bernardino attack': ['<NAME>', '<NAME>'],
'Orlando nightclub shooting': ['<NAME>'],
'Las Vegas shooting': ['<NAME>'],
'Sutherland Springs church shooting': ['<NAME>', "baptist", "texas"],
'Stoneman Douglas High School shooting': ['Marjory', 'Parkland', 'Florida'],
'Santa Fe High School shooting': ['<NAME>', "texas"]
}
specific_words_list = {}
for k, v in specific_words_raw.items():
specific_words_list[k] = []
for phrase in v + [k]:
specific_words_list[k].extend(phrase.split())
specific_words_list[k] = set([stemmer.stem(w) for w in set(specific_words_list[k])])
specific_words_list[k] -= generic_words
# +
def get_utt_convo_counts(corpus):
convo_counts = defaultdict(int)
utt_counts = defaultdict(int)
for convo in corpus.iter_conversations():
if convo.meta['valid']: convo_counts[convo.meta['event']] += 1
for utt in corpus.iter_utterances():
if utt.meta['valid']: utt_counts[utt.meta['event']] += 1
# -
for subreddit in subreddits:
corpus = Corpus(filename='/Users/calebchiam/Documents/{}-filtered-corpus'.format(subreddit))
# Label conversations and utterances by event time category
for convo in corpus.iter_conversations():
convo.meta['event'] = identify_timestamp_category(convo.meta['timestamp'])
for utt in convo.iter_utterances():
utt.meta['event'] = convo.meta['event']
# Tokenize and stem titles
for convo in corpus.iter_conversations():
convo.meta['stem_tokens'] = tokenize_and_stem(convo.meta['title'])
# Label conversations and utterances with whether they are actually associated with the event
for convo in corpus.iter_conversations():
event = convo.meta['event']
tokens = convo.meta['stem_tokens']
if len(tokens.intersection(generic_words)) > 0 or len(tokens.intersection(specific_words_list.get(event, {}))) > 0:
convo.meta['valid'] = True
else:
convo.meta['valid'] = False
for utt in convo.iter_utterances():
utt.meta['valid'] = convo.meta['valid']
# ## Let's see a distribution of the counts
convo_counts
utt_counts
from tqdm import tqdm
for utt in tqdm(list(corpus.iter_utterances())):
if "stem_tokens" in utt.meta: continue
if utt.meta['valid']:
utt.meta['stem_tokens'] = tokenize_and_stem(utt.text)
else:
utt.meta['stem_tokens'] = None
corpus.dump("politics-filtered-labelled", base_path="/Users/calebchiam/Documents")
corpus = Corpus(filename='/Users/calebchiam/Documents/politics-filtered-labelled')
| examples/politicization/corpus_filter_by_event.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Config
# +
'''
Standard Configuration:
- use_gpu: (Bool) If False, training on CPU
- num_classes: (Int) Number of classes on
dataset + 1 (Class 0 represents
backround)
- test_batch_size: (Int) Batch size used
while evaluating over test_set
- dataset_path: Annotations file directory path.
Directory must contain files:
File Name: File Column Data:
- 'test.txt': (String) Path
- 'test_bbox.txt': (Int, Int, Int, Int) X1, Y1, X2, Y2
- 'test_cate.txt': (Int) Class
- 'train.txt': (String) Path
- 'train_bbox.txt': (Int, Int, Int, Int) X1, Y1, X2, Y2
- 'train_cate.txt': (Int) Class
- 'val.txt': (String) Path
- 'val_bbox.txt': (Int, Int, Int, Int) X1, Y1, X2, Y2
- 'val_cate.txt': (Int) Class
- load_model: (String) Saved model path.
Locates the model to evaluate.
'''
use_gpu = True
num_classes = 50
test_batch_size = 16
dataset_path = '../data/Annotations/'
load_model = './checkpoints/RetinaNet_Jitter_and_Flip_lr_scheduled_9.pt'
# -
# # Dataset
from utils import dataset
test_set = dataset.ClothingDataset(dataset_path, train='test')
len(test_set.imgs)
test_set.plot()
# # Evaluate
# +
import torch
import numpy as np
import matplotlib.pyplot as plt
from utils import trainer, evaluator
# -
model = torch.load(load_model)
# +
'''
Evaluator Builder
Creates an Evaluator Object with the given model and parameters.
An Evaluator Object is a simple implementation of NN evaluator for
object detection. It implements the ability plot real and predicted
class and box values. It also allows to calculate the classification
accuracy and the mean box IoU.
'''
evaluate = evaluator.Evaluator(model, test_set, n_classes=num_classes, batch_size=test_batch_size, use_gpu=use_gpu)
# +
'''
Show 10 images with their real and
predicted values.
'''
evaluate.plot()
# +
'''
Run over test_set to obtain the
classification accuracy and mean
box IoU.
'''
evaluate.evaluate()
| Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Install(Log10)과 Review(Log10)의 상관관계
# + slideshow={"slide_type": "skip"}
df = pd.read_csv('3_new.csv')
df.tail()
# + slideshow={"slide_type": "skip"}
df['Reviews_log'] = np.log10(df['reviews'])
df.loc[df.Reviews_log == float('-inf')] = 0
# + slideshow={"slide_type": "skip"}
col = ['Category', 'Rating', 'reviews', 'Installs', 'Reviews_log', 'Installs_log']
df1 = df[col]
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Install(Log10)과 Review(Log10)으로 sorting 후 상위 10개 data 번호
# + slideshow={"slide_type": "fragment"}
df1 = df1.sort_values(by=['Installs_log', 'Reviews_log'], ascending=False)
df1.head(10)
# + slideshow={"slide_type": "skip"}
df_or = pd.read_csv('googleplaystore.csv')
df_or.tail()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### 상위 10개 data에 대한 app의 이름
# + slideshow={"slide_type": "fragment"}
df_or.iloc[[2544, 3943, 336, 381, 3904, 2604, 2545, 2611, 3909, 382]].App
# + slideshow={"slide_type": "skip"}
df_r = pd.read_csv('googleplaystore_user_reviews.csv')
df_r.tail()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Install과 Review 수 최다 app인 Facebook의 Review Data 분석 - Sentiment로 정렬
# + slideshow={"slide_type": "fragment"}
df_facebook = df_r.loc[df_r["App"] == 'Facebook']
df_facebook_f = df_facebook.sort_values(by='Sentiment_Polarity', ascending=False)
df_facebook_f.head(10)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Facebook의 Review의 Sentiment 분포도
# + slideshow={"slide_type": "skip"}
g = sns.pairplot(df_facebook, hue="Sentiment")
# + slideshow={"slide_type": "fragment"}
df_facebook["Sentiment"].value_counts().plot.pie(label='Sentiment', autopct='%1.0f%%', figsize=(2, 2))
# + [markdown] slideshow={"slide_type": "subslide"}
# ### 다른 app들에 대한 Review들은 data가 존재하지 않아 분석할 수 없었다.
| python/EDA_practice/google-play-store-apps/0508_Review_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Glottolog stats dashboard
# ## report stats, monitor change
#
# record properties of ``treedb.sqlite3``
# loaded from [Glottolog](https://glottolog.org)
# [master repo](https://github.com/glottolog/glottolog) commit
# and monitor changes by diffing this
#
# <div style="text-align: right">
# <a href="https://nbviewer.jupyter.org/github/glottolog/treedb/blob/master/Stats.ipynb">
# latest version from GitHub in nbviewer
# </a>
# </div>
#
# ### Contents
# 1. [Clone the data repository](#Clone-the-data-repository)
# 2. [Use the repository root as source](#Use-the-repository-root-as-source)
# 3. [Set the database file and load it](#Set-the-database-file-and-load-it)
# 4. [Check the database](#Check-the-database)
# 5. [treedb.sqlite3](#treedb.sqlite3)
# 6. [print_dataset()](#print_dataset())
# 7. [\_\_dataset\_\_](#__dataset__)
# 8. [\_\_producer\_\_](#__producer__)
# 9. [treedb.raw](#treedb.raw)
# 10. [languoid](#languoid)
# 11. [macroarea](#macroarea)
# 12. [country](#country)
# 13. [altname](#altname)
# 14. [source](#source)
# 15. [classification](#classification)
# 16. [link](#link)
# 17. [endangerment](#endangerment)
# 18. [Example query](#example-query)
# +
# %matplotlib inline
import collections
import os
os.environ['SQLALCHEMY_WARN_20'] = 'true'
import pandas as pd
import sqlalchemy as sa
import matplotlib as mpl
import matplotlib.pyplot as plt
import treedb
import treedb.raw
treedb.configure_logging(log_sql=False)
treedb.print_versions()
# -
# ## Clone the data repository
# +
# %%time
GLOTTOLOG_TAG = 'v4.4'
TARGET = '../glottolog/'
treedb.checkout_or_clone(GLOTTOLOG_TAG, target=TARGET)
# -
# ## Use the repository root as source
treedb.set_root(TARGET)
next(treedb.iterfiles())
dict(treedb.iterlanguoids(limit=1))
# %time treedb.checksum(source='files')
# ## Set the database file and load it
treedb.set_engine('treedb.sqlite3')
# +
# %%time
engine = treedb.load(rebuild=False, exclude_raw=False)
engine
# -
# ## Check the database
engine.file_mtime()
engine.file_size(as_megabytes=True)
# %time engine.file_sha256()
# %time treedb.raw.checksum()
# %time treedb.raw.checksum(weak=True)
# %time treedb.checksum(source='tables')
# %time treedb.checksum(source='raw')
# %time treedb.check()
# ## treedb.sqlite3
# +
treedb.configure_logging(level='INFO', log_sql=True)
treedb.scalar(sa.select(sa.func.sqlite_version()))
# +
application_id = treedb.scalar(sa.text('PRAGMA application_id'))
assert application_id == 1122 == 0x462
assert application_id == sum(ord(c) for c in treedb.Dataset.__tablename__)
assert treedb.Dataset.__tablename__ == '__dataset__'
application_id
# +
from treedb import pd_read_sql as read_sql
read_sql(treedb.select_tables_nrows(), index_col='table_name')
# -
treedb.print_rows(treedb.backend.sqlite_master.select_views(),
format_='{name}')
# +
from treedb import print_table_sql as print_sql
print_sql('sqlite_master')
# -
# ## print_dataset()
treedb.print_dataset()
# ## \_\_dataset\_\_
# +
from treedb import Dataset
print_sql(Dataset)
# +
dataset, = treedb.iterrows(sa.select(Dataset), mappings=True)
pd.DataFrame.from_dict(dataset, orient='index',
columns=['__dataset__'])
# -
# ## \_\_producer\_\_
# +
from treedb import Producer
print_sql(Producer)
# +
producer, = treedb.iterrows(sa.select(Producer), mappings=True)
pd.DataFrame.from_dict(producer, orient='index',
columns=['__producer__'])
# -
# ## `treedb.raw`
# +
from treedb.raw import File, Option, Value
for model in (File, Option, Value):
print_sql(model)
# -
read_sql(sa.select(File).limit(5), index_col='id')
read_sql(sa.select(Option).limit(5), index_col='id')
read_sql(sa.select(Value).limit(5), index_col=['file_id', 'option_id'])
# +
select_file_values = (sa.select(Option.section, Option.option, Value.line, Value.value)
.select_from(File)
.filter_by(glottocode=sa.bindparam('glottocode'))
.join(Value).join(Option))
read_sql(select_file_values, params={'glottocode': 'abin1243'},
index_col=['section', 'option', 'line'])
# +
path_depth = File.path_depth()
select_path_depths = (sa.select(path_depth,
treedb.Languoid.level, sa.func.count().label('n_files'))
.join_from(File, treedb.Languoid, File.glottocode == treedb.Languoid.id)
.group_by(path_depth, treedb.Languoid.level)
.order_by('path_depth', 'level'))
_ = (read_sql(select_path_depths, index_col=['path_depth', 'level'])
.unstack(fill_value=0).droplevel(0, axis='columns')[list(treedb.LEVEL)])
_.plot.bar(stacked=True, figsize=(12, 3))
(100 * _.div(_.sum(axis='columns'), axis='rows')).plot.bar(stacked=True, figsize=(12, 3));
# +
file_size = File.size.label('file_size')
select_file_sizes = (sa.select(file_size, sa.func.count().label('n_files'))
.group_by(file_size)
.order_by('file_size'))
_ = read_sql(select_file_sizes, index_col='file_size')
(_.plot.area(figsize=(12, 3), logx=True)
.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter()))
(_.index.to_series().describe()
.to_frame().transpose()[['count', 'min', 'max']])
# +
file_nvalues = (sa.select(File.glottocode,
sa.func.count(Value.option_id.distinct()).label('n_values'))
.join_from(File, Value)
.group_by(File.glottocode)
.alias('file_nvalues')
.c.n_values)
select_nvalues = (sa.select(file_nvalues, sa.func.count().label('n_files'))
.group_by(file_nvalues)
.order_by(file_nvalues))
_ = read_sql(select_nvalues, index_col='n_values')
_.plot.bar(figsize=(12, 3))
(_.index.to_series().describe()
.to_frame().transpose()[['count', 'min', 'max']])
# +
value_length = sa.func.length(Value.value).label('value_length')
select_value_length = (sa.select(value_length, sa.func.count().label('n_values'))
.group_by(value_length)
.order_by('value_length'))
_ = read_sql(select_value_length, index_col='value_length')
(_.plot.area(figsize=(12, 3), logx=True)
.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter()))
(_.index.to_series().describe()
.to_frame().transpose()[['count', 'min', 'max']])
# -
# %time treedb.raw.print_stats()
# ## languoid
# +
from treedb import Languoid
print_sql(Languoid)
# +
from treedb import LEVEL
from treedb.models import LanguoidLevel
FAMILY, LANGUAGE, DIALECT = LEVEL
print_sql(LanguoidLevel)
read_sql(sa.select(LanguoidLevel).order_by('ordinal'), index_col='name')
# +
from treedb.models import PseudoFamily
print_sql(PseudoFamily)
read_sql(sa.select(PseudoFamily).order_by('name'), index_col='config_section')
# -
# %time treedb.print_languoid_stats()
read_sql(sa.select(Languoid).limit(5), index_col='id')
read_sql(sa.select(Languoid).order_by('id').limit(5), index_col='id')
read_sql(sa.select(Languoid).order_by('name').limit(5), index_col='id')
# +
Child, Parent = (sa.orm.aliased(Languoid, name=n) for n in ('child', 'parent'))
select_parent_levels = (sa.select(Child.level.label('child_level'),
Parent.level.label('parent_level'),
sa.func.count().label('n_languoids'))
.outerjoin_from(Child, Parent, Child.parent_id == Parent.id)
.group_by(Child.level, Parent.level)
.order_by('child_level', 'parent_level'))
(read_sql(select_parent_levels, index_col=['child_level', 'parent_level'])
.unstack(fill_value=0).assign(all=lambda x: x.sum(axis='columns'))
.pipe(lambda x: x.append(x.sum().rename('all'))))
# +
select_lang_nisos = (sa.select(Languoid.level.label('level'),
sa.func.count().label('n_languoids'),
sa.func.count(Languoid.iso639_3).label('n_isos'))
.group_by(Languoid.level)
.order_by('level'))
(read_sql(select_lang_nisos, index_col='level')
.assign(ratio=lambda x: 100 * x['n_isos'] / x['n_languoids']))
# +
select_lang_nlocations = (sa.select(Languoid.level.label('level'),
sa.func.count().label('n_languoids'),
sa.func.count(Languoid.latitude).label('n_locations'))
.group_by(Languoid.level)
.order_by('level'))
(read_sql(select_lang_nlocations, index_col='level')
.assign(ratio=lambda x: 100 * x['n_locations'] / x['n_languoids']))
# +
select_latlon = (sa.select(Languoid.latitude, Languoid.longitude)
.select_from(Languoid)
.filter_by(level=LANGUAGE))
latitudes, longitudes = zip(*treedb.iterrows(select_latlon))
plt.figure(figsize=(12, 6))
plt.axis([-180, 180, -90, 90])
plt.xticks(range(-180, 181, 60))
plt.yticks(range(-90, 91, 30))
plt.scatter(longitudes, latitudes, 1, 'black');
# +
Family, Child, family_child = treedb.Languoid.parent_descendant(parent_root=True,
parent_level=FAMILY)
n_languages = sa.func.count(Child.id).label('n_languages')
select_family_nlanguages = (sa.select(Family.id.label('family_id'), n_languages)
.select_from(family_child)
.where(Child.level == LANGUAGE)
.group_by(Family.id))
select_top_families = (select_family_nlanguages
.having(n_languages >= 100)
.order_by('n_languages'))
(read_sql(select_top_families, index_col='family_id')
.plot.barh());
# +
family_size = (select_family_nlanguages
.alias('family_nlanguages')
.c.n_languages.label('family_size'))
select_family_sizes = (sa.select(family_size, sa.func.count().label('n_families'))
.group_by(family_size)
.order_by('family_size'))
_ = read_sql(select_family_sizes, index_col='family_size')
(_.plot.area(figsize=(12, 3), logx=True)
.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter()))
(_.index.to_series().describe()
.to_frame().transpose()[['count', 'min', 'max']])
# +
levels = (sa.union_all(*[sa.select(sa.literal(l).label('level')) for l in LEVEL])
.cte(name='levels'))
select_parent_nchildren = (sa.select(Parent.id.label('parent_id'),
Parent.level.label('parent_level'),
levels.c.level.label('child_level'),
sa.func.count(Child.id).label('n_children'))
.join_from(Parent, levels, sa.true())
.outerjoin(Child, sa.and_(Parent.id == Child.parent_id,
Child.level == levels.c.level))
.group_by(Parent.id, Parent.level, levels.c.level))
(read_sql(select_parent_nchildren)
.pivot_table(index='parent_level', columns='child_level', values='n_children',
aggfunc=['sum', 'max', 'mean'], fill_value=0))
# +
Parent, Child, parent_child = treedb.Languoid.parent_descendant()
select_parent_ndescendants = (sa.select(Parent.id.label('parent_id'),
Parent.level.label('parent_level'),
sa.func.count(Child.id).label('n_descendants'))
.select_from(parent_child)
.group_by(Parent.id, Parent.level)
.alias('parent_ndescendants'))
parent_level = select_parent_ndescendants.c.parent_level
n_descendants = select_parent_ndescendants.c.n_descendants
d_min = sa.func.min(n_descendants).label('min')
d_max = sa.func.max(n_descendants).label('max')
d_mean = (sa.func.sum(n_descendants).cast(sa.Float) / sa.func.count(n_descendants)).label('mean')
select_level_mean_descendants = (sa.select(parent_level, d_min, d_max, d_mean)
.group_by(parent_level)
.order_by(parent_level))
select_total_mean_descendants = sa.select(sa.literal('total').label('parent_level'),
d_min, d_max, d_mean)
select_mean_descendants = [select_level_mean_descendants, select_total_mean_descendants]
_ = pd.concat([read_sql(q, index_col='parent_level') for q in select_mean_descendants])
_.columns = pd.MultiIndex.from_product([['n_descendants'], _.columns])
_
# -
# ## macroarea
# +
from treedb.models import Macroarea, languoid_macroarea
print_sql(Macroarea)
print_sql(languoid_macroarea)
read_sql(sa.select(Macroarea).order_by('name'), index_col='config_section')
# +
select_macroarea_nlanguages = (sa.select(Macroarea.name.label('macroarea'),
sa.func.count().label('n_languages'))
.outerjoin_from(Macroarea, languoid_macroarea).join(Languoid)
.filter_by(level=LANGUAGE)
.group_by(Macroarea.name)
.order_by(sa.desc('n_languages')))
_ = read_sql(select_macroarea_nlanguages, index_col='macroarea')
_.plot.pie(y='n_languages')
_
# +
macroareas = sa.func.group_concat(Macroarea.name, ', ').label('macroareas')
select_multiarea_languages = (sa.select(Languoid.id, Languoid.name, macroareas)
.select_from(Languoid)
.filter_by(level=LANGUAGE)
.join(languoid_macroarea).join(Macroarea)
.group_by(Languoid.id)
.having(sa.func.count() > 1)
.order_by('id'))
assert read_sql(select_multiarea_languages).empty
# -
# ## country
# +
from treedb.models import Country, languoid_country
print_sql(Country)
print_sql(languoid_country)
# +
select_country_nlanguages = (sa.select(Country.name.label('country'),
sa.func.count().label('n_languages'))
.outerjoin_from(Country, languoid_country).join(Languoid)
.filter_by(level=LANGUAGE)
.group_by(Country.id)
.order_by(sa.desc('n_languages'))
.limit(10))
read_sql(select_country_nlanguages, index_col='country')
# +
select_lang_country = (sa.select(Languoid.id, Languoid.name, Country.id.label('country'))
.select_from(Languoid)
.filter_by(level=LANGUAGE)
.join(languoid_country).join(Country)
.order_by(Languoid.id, 'country')
.alias('lang_country'))
countries = sa.func.group_concat(select_lang_country.c.country, ', ').label('countries')
select_multicountry_languages = (sa.select(select_lang_country.c.id,
select_lang_country.c.name,
sa.func.count().label('n_countries'),
countries)
.group_by(select_lang_country.c.id,
select_lang_country.c.name)
.having(sa.func.count() > 1)
.order_by(sa.desc('n_countries'),
select_lang_country.c.id)
.limit(10))
read_sql(select_multicountry_languages, index_col='id')
# +
n_countries = (sa.select(Languoid.id, sa.func.count().label('n_countries'))
.select_from(Languoid)
.filter_by(level=LANGUAGE)
.outerjoin(languoid_country)
.group_by(Languoid.id)
.alias('language_ncountries')
.c.n_countries)
select_lc_dist = (sa.select(n_countries, sa.func.count().label('n_languages'))
.group_by(n_countries)
.order_by('n_countries'))
_ = read_sql(select_lc_dist, index_col='n_countries')
_.plot.bar(figsize=(12, 3))
(read_sql(sa.select(sa.literal('n_countries').label('value'),
sa.func.count().label('count'),
sa.func.sum(n_countries).label('sum'),
sa.func.min(n_countries).label('min'),
sa.func.max(n_countries).label('max'),
sa.func.avg(n_countries).label('mean')),
index_col='value')
.rename_axis(None))
# -
# ## altname
# +
from treedb.models import Altname, AltnameProvider
select_provider_nlanguoids = (sa.select(AltnameProvider.name.label('altname_provider'),
sa.func.count(sa.distinct(Altname.languoid_id)).label('n_languoids'))
.join_from(AltnameProvider, Altname)
.group_by(AltnameProvider.name)
.order_by('n_languoids', 'altname_provider'))
(read_sql(select_provider_nlanguoids, index_col='altname_provider')
.plot.barh());
# +
n_altnames = (sa.select(Languoid.id, sa.func.count().label('n_altnames'))
.outerjoin_from(Languoid, Altname)
.group_by(Languoid.id)
.alias('languoid_naltnames')
.c.n_altnames)
select_la_dist = (sa.select(n_altnames, sa.func.count().label('n_languoids'))
.group_by(n_altnames)
.order_by(n_altnames))
_ = read_sql(select_la_dist, index_col='n_altnames')
_.plot.area(figsize=(12, 3))
(read_sql(sa.select(sa.literal('n_altnames').label('value'),
sa.func.count().label('count'),
sa.func.sum(n_altnames).label('sum'),
sa.func.min(n_altnames).label('min'),
sa.func.max(n_altnames).label('max'),
sa.func.avg(n_altnames).label('mean')),
index_col='value')
.rename_axis(None))
# -
# ## source
# +
from treedb.models import Source, SourceProvider
select_provider_nsources = (sa.select(SourceProvider.name.label('provider'),
Languoid.level,
sa.func.count().label('n_sources'))
.join_from(SourceProvider, Source).join(Languoid)
.group_by(SourceProvider.name, Languoid.level)
.order_by('provider', sa.desc('n_sources')))
read_sql(select_provider_nsources, index_col=['provider', 'level'])
# +
n_sources = (sa.select(Languoid.id,
sa.func.count(Source.languoid_id).label('n_sources'))
.outerjoin_from(Languoid, Source)
.group_by(Languoid.id)
.alias('lang_nsources')
.c.n_sources)
select_nsources_nlangs = (sa.select(n_sources, sa.func.count().label('n_languoids'))
.group_by(n_sources)
.order_by('n_languoids'))
_ = read_sql(select_nsources_nlangs, index_col='n_sources')
(_.groupby(_.index != 0).sum().rename_axis('n_sources')
.rename(index={False: '= 0', True: '> 0'}).plot.bar(figsize=(4, 3)))
(_.drop(0).plot.area(figsize=(8, 3), logx=True)
.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter()))
(read_sql(sa.select(sa.literal('n_sources').label('value'),
sa.func.count().label('count'),
sa.func.sum(n_sources).label('sum'),
sa.func.min(n_sources).label('min'),
sa.func.max(n_sources).label('max'),
sa.func.avg(n_sources).label('mean')),
index_col='value')
.rename_axis(None))
# +
select_lang_nsources = (sa.select(Languoid.id, Languoid.level,
sa.func.count(Source.languoid_id).label('n_sources'))
.outerjoin_from(Languoid, Source)
.group_by(Languoid.id, Languoid.level)
.alias('lang_nsources'))
select_ln_nlangs = (sa.select(select_lang_nsources.c.level,
select_lang_nsources.c.n_sources,
sa.func.count().label('n_languoids'))
.group_by(select_lang_nsources.c.level, select_lang_nsources.c.n_sources)
.order_by('n_languoids'))
_ = read_sql(select_ln_nlangs).pivot(index='n_sources', columns='level', values='n_languoids')
(_.groupby(_.index != 0).sum()[list(treedb.LEVEL)].rename_axis('n_sources')
.rename(index={False: '= 0', True: '> 0'}).plot.bar(figsize=(4, 3)))
(_.drop(0)[list(treedb.LEVEL)].plot.area(figsize=(8, 3), logx=True)
.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter()))
# +
from treedb.models import Bibfile, Bibitem
select_bibfile_nused = (sa.select(Bibfile.name.label('bibfile'), sa.func.count().label('n_used'))
.join_from(Bibfile, Bibitem)
.group_by(Bibfile.name)
.order_by(sa.desc('n_used')))
(read_sql(select_bibfile_nused, index_col='bibfile')
.plot.bar(figsize=(12, 3)));
# -
# ## classification
# +
from treedb.models import ClassificationRef
select_cr_levels = (sa.select(ClassificationRef.kind, Languoid.level,
sa.func.count().label('n_classificationrefs'))
.join_from(ClassificationRef, Languoid)
.group_by(ClassificationRef.kind, Languoid.level)
.order_by('kind', 'level'))
read_sql(select_cr_levels, index_col=['kind', 'level']).unstack()
# +
n_crefs = (sa.select(Languoid.id, sa.func.count().label('n_crefs'))
.outerjoin_from(Languoid, ClassificationRef)
.group_by(Languoid.id)
.alias('lang_ncrefs')
.c.n_crefs)
select_lcr_dist = (sa.select(n_crefs, sa.func.count().label('n_languoids'))
.group_by(n_crefs)
.order_by(n_crefs))
_ = read_sql(select_lcr_dist, index_col='n_crefs')
_.plot.area(figsize=(12, 3))
(read_sql(sa.select(sa.literal('n_crefs').label('value'),
sa.func.count().label('count'),
sa.func.sum(n_crefs).label('sum'),
sa.func.min(n_crefs).label('min'),
sa.func.max(n_crefs).label('max'),
sa.func.avg(n_crefs).label('mean')),
index_col='value')
.rename_axis(None))
# -
# ## link
# +
from treedb.models import Link
print_sql(Link)
# +
select_scheme_nlinks = (sa.select(Link.scheme.label('link_scheme'), sa.func.count().label('n_links'))
.group_by(Link.scheme)
.order_by(sa.desc('n_links')))
(read_sql(select_scheme_nlinks, index_col='link_scheme')
.plot.pie(y='n_links'));
# +
from urllib.parse import urlparse
hosts = collections.Counter(urlparse(url).hostname for url, in treedb.iterrows(sa.select(Link.url)))
(pd.DataFrame.from_dict(hosts, orient='index', columns=['n_links'])
.sort_values(by='n_links')
.plot.barh());
# -
# ## endangerment
# +
from treedb.models import Endangerment, EndangermentStatus, EndangermentSource
print_sql(Endangerment)
print_sql(EndangermentStatus)
print_sql(EndangermentSource)
read_sql(sa.select(EndangermentStatus).order_by('ordinal'), index_col='config_section')
# +
e_source = EndangermentSource.name.label('source')
select_source_nendangerments = (sa.select(e_source, sa.func.count().label('n_endangerments'))
.join_from(Endangerment, EndangermentSource)
.group_by(e_source)
.order_by('n_endangerments'))
(read_sql(select_source_nendangerments, index_col='source')
.plot.barh());
# -
# ## Example query
# %time treedb.hash_csv(treedb.get_example_query())
| Stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/alfafimel/IPWK9-CORE/blob/main/IPWK9_CORE_The_Naive_Bayes_Classifier_ELIZABETH_JOSEPHINE.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="PzyiSh09V1UE"
# # **DEFINING THE QUESTION**
# + [markdown] id="AOAnpTqyXj8J"
# ## **a) Specifying the Question**
#
# Implementing a K-nearest neighbor (kNN) classifier on the provided dataset.
#
# from the dataset given, we can make predictions on the genders that were in the train whether it was male or female
# + [markdown] id="UDM4QNnoXsGc"
# ## **b) Defining the metrics for success**
#
# Implementing a K-nearest neighbor (kNN) classifier on the provided dataset.
# + [markdown] id="L8ngyeNGX8Dj"
# ## **c) Understanding the context**
#
# Implementing a K-nearest neighbor (kNN) classifier on the provided dataset.
# + [markdown] id="78AjZ_RgYITQ"
# ## **d) Recording the Experimental Design**
#
# 1. Define the question, the metric for success, the context, experimental design taken.
# 2. Read and explore the given dataset.
# 3. Define the appropriateness of the available data to answer the given question.
# 4. Find and deal with outliers, anomalies, and missing data within the dataset.
# 5. Perform univariate, and bivariate analysis recording your observations.
# 6. Randomly partition each dataset into two parts i.e 80 - 20 sets
# 7. Performing perform classification of the testing set samples using the Naive Bayes Classifier.
# Compute the accuracy (percentage of correct classification).
#
# > Report the confusion matrix of each classifier.
#
# 8. Repeat step 6 to step 7 twice, each time splitting the datasets differently i.e. 70-30, 60-40, then note the outcomes of your modeling.
# 9. Suggest and apply at least one of the optimization techniques that you learned earlier this week.
# Provide further recommendations to improve both classifiers.
# 10. Provide a recommendation based on your analysis.
#
# + [markdown] id="RbCQdWNqYR41"
# ## **e) Relevance of the data**
#
# The data used for this project is necessary for building a model that implements the KNN classifier
#
# [http://bit.ly/hypothyroid_data].
# + [markdown] id="afVKIlhcWTYy"
# # **DATA ANALYSIS**
# + [markdown] id="Ihbtr2Xeqxqi"
# ### **Data Checking**
# + id="nfHhHUEXWWGe"
# importing libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
from matplotlib import pyplot as plt
# %matplotlib inline
# + id="NF2L72Qgv0IN" outputId="ea09e4fe-1a47-4687-e2a5-5df061570852" colab={"base_uri": "https://localhost:8080/", "height": 224}
# getting the dataset
# !wget https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data
# + id="L3zdMo9YY0_F" outputId="8532c26c-4bc6-445f-d429-a1983b9dd52a" colab={"base_uri": "https://localhost:8080/", "height": 224}
# reading datasets
df = pd.read_csv('spambase.data', header=None)
# previewing the top of the dataset
df.head()
# + id="levrJ5VDa09C" outputId="5b34fb04-8a8e-4651-b5ca-5c05b5afefd3" colab={"base_uri": "https://localhost:8080/", "height": 224}
# previewing the tail of the dataset
df.tail()
# + id="ee3TnljiZs3b" outputId="4535d32e-a54d-4229-ad33-985472cba03f" colab={"base_uri": "https://localhost:8080/", "height": 34}
# previewing the dataset
df.shape
# + id="Now4SvbPhzmh" outputId="341122c8-1262-48a2-c344-c3ed85ae72f9" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# printing the the number of unique values of the columns
print(df.nunique())
# + id="-KRbNRDuZ2MO" outputId="863abcaf-91f0-4346-be0c-3684223fdaf5" colab={"base_uri": "https://localhost:8080/", "height": 102}
# previewing the columns of the datasets
df.columns
# + id="VsAy7EFJqONi" outputId="2f4e9a48-9086-4e6c-db87-61fad871dfaa" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# view summary of dataset
df.info()
# + [markdown] id="TgnXOwbybVk0"
# ### **Data Cleaning**
# + id="r1t5fMIBaC2G" outputId="8cf96fb3-e312-4a51-9b5c-3296b4af4039" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# checking count of null values
df.isnull().sum()
# + id="ayk88SdeZgiq" outputId="c2ccda5e-1aa8-4924-deb4-9b8b356a2750" colab={"base_uri": "https://localhost:8080/", "height": 34}
# checking for duplicates
df.duplicated().any()
# + id="EX729vCBaSlI" outputId="1f5ac154-c160-4d17-d145-c46b5e0b0f1b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# checking count of duplicate entries
df.duplicated().sum()
# + id="mebJLnZwTCK3" outputId="4b03ecc0-ad33-49e8-a98b-760d037d99b5" colab={"base_uri": "https://localhost:8080/", "height": 34}
# dropping duplicates
df1 = df.drop_duplicates()
# checking count of duplicate entries
df1.duplicated().sum()
# + id="ZiUoOC1Da-Pm" outputId="f7ed639d-69a9-401a-955f-19b2c11045a9" colab={"base_uri": "https://localhost:8080/", "height": 34}
# previewing the dataset
df1.shape
# + id="TFjjfmJxrf1u" outputId="7f6d5a8c-8089-4ba3-8cdf-dc677e0e20da" colab={"base_uri": "https://localhost:8080/", "height": 102}
# previewing the columns
df1.columns
# + id="OUhVqsXDQt4h" outputId="d2f22fd9-32e8-45fa-8f59-19dc8b408566" colab={"base_uri": "https://localhost:8080/", "height": 317}
# describing the numerical features
df1.describe()
# + id="IsqE00SxfUQ2" outputId="a07193ec-c96e-40ca-d9f7-34728c8f0c9c" colab={"base_uri": "https://localhost:8080/", "height": 34}
# checking for anomalies
q11 = df1[1].quantile(.25)
q31 = df1[1].quantile(.75)
iqr11 = q31 - q11
iqr11
##
q11, q31 = np.percentile(df1[1], [25, 75])
iqr = q31 - q11
l_bound = q11 - (1.5*iqr)
u_bound = q31 + (1.5 * iqr)
print(iqr11, iqr)
# there are no anomalies in the data
# + id="OrTAc8jjRSxL" outputId="b33552d8-008d-4e30-92b6-17f3544c9e95" colab={"base_uri": "https://localhost:8080/", "height": 221}
# previewing the columns
df1.dtypes
# + id="4Pa5h1nkbicy"
# checking for outliers
# saving the column names onto a dictionary
#columns_dict = {1: 1, 2: 2, 3: 3, 4: 4,...}
#plt.figure(figsize=(20,30))
# make a boxplot for each numerical column
#for variable,i in columns_dict.items():
#plt.subplot(5,4,i)
#plt.boxplot(df1[variable])
#plt.title(variable)
#plt.show()
# + id="5_hXZ3BEqsQ2" outputId="43a9bd65-5e55-4bd2-cb34-82e91b74305d" colab={"base_uri": "https://localhost:8080/", "height": 204}
# view summary statistics in numerical variables
# Outliers in numerical variables
print(round(df1.describe(),2))
# + id="rCfJQQixbsYY" outputId="243a5b9c-8a4e-47a0-8d66-62a637c3de9c" colab={"base_uri": "https://localhost:8080/", "height": 51}
# dealing with the outliers using quantiles
Q1 = df1.quantile(0.25)
Q3 = df1.quantile(0.75)
IQR = Q3 - Q1
# Removing outliers based on the IQR range
df2 = df1[~((df1 < (Q1 - 1.5 * IQR)) | (df1 > (Q3 + 1.5 * IQR))).any(axis=1)]
# Printing the shape of our new dataset
print(df2.shape)
# Printing the shape of our old dataset
print(df1.shape)
# + [markdown] id="nCU3cKdt2NSG"
# without the outliers, the dataset is greaty res=duces and wont be useful in the analysis and proper conclusions. as such, i will not work with the dataset having no outliers.
#
# >the dataset in use is df1 ~ the data with outliers
#
#
#
#
# + [markdown] id="rOpN6rL2WOYG"
# # **EXPLORATORY DATA ANALYSIS**
# + [markdown] id="HGR6EmNRWZcP"
# ## **a) Univariate Analysis**
# + id="83MhYytcqXP-" outputId="d736bd88-001c-47e9-e522-a366a75f3dd4" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Frequency distribution of values in variables
for var in df1.columns:
print(df[var].value_counts())
# + id="0lmPzC2XcG1q" outputId="9fd9c18b-5c62-4695-c5e0-2cc78c37fff6" colab={"base_uri": "https://localhost:8080/", "height": 317}
# describing the statistical features of the data
df1.describe()
# + [markdown] id="Y2SvoEW-6hRJ"
# >plotting histograms was inapproprite for this dataset considering the number of columns and the untitled columns.
# + id="wfngrstu_E9_"
# since there were no column heads in the dataset, interpreting the correlation coefficients and plotted bivariates would be difficult
# especially with the missing headers. therefre i did not perform both the multivariate and bivariate analysis of the unstructured data.
# + [markdown] id="vGDWSTWwgOZz"
# # **IMPLEMENTING THE SOLUTION**
# + id="KWLD_MCaDWzf"
# the exercise expects us to implement a Naive Bayes classifier.
# it is an experiment that demands the metrics be calcuated carefully and
# all observatios noted.
# therefore, after splitting the dataset into two parts i.e 80 - 20 sets,
# we have to further make conclusions based on second ad third experiments with
# different partitionin schemes 70-30, 60-40.
# this experiment expects a computation of the accuracy matrix which is the
# percentage of correct classification
# it is then required that the confusion matrix be calculated and
# optimization done on the models.
# the whole process is as below
# + id="MIWcvpzXEGrd"
# gaussian
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from scipy.stats import norm
# + id="zt4Vaa34EKvh" outputId="445eb70e-a3a9-4e48-b25c-6c23eda7dbb0" colab={"base_uri": "https://localhost:8080/", "height": 282}
x = np.linspace(-5, 5)
y = norm.pdf(x)
plt.plot(x, y)
plt.vlines(ymin=0, ymax=0.4, x=1, colors=['red'])
# + id="1loiXbvBIB-w" outputId="85b5214d-1b43-4818-88b0-15255780522e" colab={"base_uri": "https://localhost:8080/", "height": 439}
df3 = df1.astype(int)
df3
# + [markdown] id="hC0su-SNDFbG"
# ## **PART 1: 80:20 partition**
# + id="vfy7gXOg81_Z"
# importing the required libraries
from sklearn.model_selection import train_test_split
from sklearn import feature_extraction, model_selection, naive_bayes, metrics, svm
import numpy as np
from sklearn.naive_bayes import BernoulliNB
# + id="9PcgrJgm9lB0"
# preprocessing
X = df3.iloc[:, :-1].values
y = df3.iloc[:, 5].values
# + id="4jRblsMN9lB8"
# splitting the dataset into training and test sets
# Train using 80% of the data.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# + id="GruSoOVc9lCA" outputId="28558009-92ec-4457-d383-57e4a7bf5d86" colab={"base_uri": "https://localhost:8080/", "height": 85}
# check the shapes of the train and test sets
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# + id="Nek1mfaa9lCJ" outputId="0445215d-333d-441f-94d0-22c8e42dc77e" colab={"base_uri": "https://localhost:8080/", "height": 238}
# feature scaling
# normalization
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
#
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# X_train = pd.DataFrame(X_train, columns=[cols])
# X_test = pd.DataFrame(X_test, columns=[cols])
# X_train.describe()
X_train
# + id="FT34nr0s9lCO"
# Fitting K Neighbours Classifier to the training eet
# Training and Making Predictions
from sklearn.naive_bayes import MultinomialNB
# instantiating the model
model = MultinomialNB()
# fitting the model to the training set
model.fit(X_train, y_train)
# + id="QZ94gJku9lCU" outputId="48237d22-2bc9-45e6-dce1-7dd3fe5939bc" colab={"base_uri": "https://localhost:8080/", "height": 680}
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred
# + id="sJzpcE7f9lCZ" outputId="142c1523-cb43-4992-8c4c-e89f07f8b02b" colab={"base_uri": "https://localhost:8080/", "height": 51}
# evaluating the algorithm
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, y_pred))
# Print the Confusion Matrix with k =5 and slice it into four pieces
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# + id="s_3vK7dI9lCd" outputId="4ef296e6-7b9e-4295-d0c5-1e8a67c59749" colab={"base_uri": "https://localhost:8080/", "height": 170}
# Classification metrices
print(classification_report(y_test, y_pred))
# + id="zAmDkj2_9lCl" outputId="aac4c6d2-d850-4ef6-d9e4-b77747842bde" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Checking the accuracy score
from sklearn.metrics import accuracy_score
print('Accuracy' + str(accuracy_score(y_test, y_pred)))
print('Model accuracy score: {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
## the accuracy of this evaluation is less than 1
# + [markdown] id="ir19M-U2K1qa"
# ## **PART 2: 70:30 partition**
# + id="TFFBP_UbKju3"
# preprocessing
X = df3.iloc[:, :-1].values
y = df3.iloc[:, 5].values
# + id="SSogo79ZKju-"
# splitting the dataset into training and test sets
# Train using 70% of the data.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# + id="kDCn396CKjvJ" outputId="d7b87422-2546-4036-9b6d-be46065f96ea" colab={"base_uri": "https://localhost:8080/", "height": 85}
# check the shapes of the train and test sets
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# + id="MStlbTExKjvP" outputId="7e6d7ab3-ae64-428d-d22f-2429d8f1b832" colab={"base_uri": "https://localhost:8080/", "height": 238}
# feature scaling
# normalization
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
#
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# X_train = pd.DataFrame(X_train, columns=[cols])
# X_test = pd.DataFrame(X_test, columns=[cols])
# X_train.describe()
X_train
# + id="KqN_BCIAKjvV"
# Fitting K Neighbours Classifier to the training eet
# Training and Making Predictions
from sklearn.naive_bayes import MultinomialNB
# instantiating the model
model = MultinomialNB()
# fitting the model to the training set
model.fit(X_train, y_train)
# + id="tzIUwJGDKjva" outputId="03b1c302-cb99-4a45-c942-602fa7824252" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred
# + id="zlptmvUZKjvk" outputId="0b8ccac3-5241-4f19-9c41-9bda80110778" colab={"base_uri": "https://localhost:8080/", "height": 68}
# evaluating the algorithm
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, y_pred))
# Print the Confusion Matrix with k =5 and slice it into four pieces
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# + id="TzLaO_lBKjvv" outputId="8ab87b7a-18fa-44ca-c3bd-e9bfa48448f8" colab={"base_uri": "https://localhost:8080/", "height": 187}
# Classification metrices
print(classification_report(y_test, y_pred))
# + id="jX40bzI9Kjv1" outputId="b9272d20-9ac0-4d73-be20-0335d02ccaa6" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Checking the accuracy score
from sklearn.metrics import accuracy_score
print('Accuracy' + str(accuracy_score(y_test, y_pred)))
print('Model accuracy score: {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
## the accuracy of this evaluation is less than 1
# + [markdown] id="xMyO5jtJK5Ee"
# ## **PART 3: 60:40 partition**
# + id="uagdC_WtKjAh"
# importing the required libraries
from sklearn.model_selection import train_test_split
from sklearn import feature_extraction, model_selection, naive_bayes, metrics, svm
import numpy as np
from sklearn.naive_bayes import BernoulliNB
# + id="IzsTMaCsKjAz"
# preprocessing
X = df3.iloc[:, :-1].values
y = df3.iloc[:, 5].values
# + id="g5NCdiE-KjBB"
# splitting the dataset into training and test sets
# Train using 60% of the data.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40)
# + id="5VjN9LgJKjBK" outputId="6394267b-9c09-4313-9689-c6518cb709d4" colab={"base_uri": "https://localhost:8080/", "height": 85}
# check the shapes of the train and test sets
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# + id="_bKPmzv6KjBX" outputId="d1172e1c-49f7-4ef2-fd2a-1eabcce30376" colab={"base_uri": "https://localhost:8080/", "height": 238}
# feature scaling
# normalization
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
#
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# X_train = pd.DataFrame(X_train, columns=[cols])
# X_test = pd.DataFrame(X_test, columns=[cols])
# X_train.describe()
X_train
# + id="bZ0O-ipBKjBh"
# Fitting K Neighbours Classifier to the training eet
# Training and Making Predictions
from sklearn.naive_bayes import MultinomialNB
# instantiating the model
model = MultinomialNB()
# fitting the model to the training set
model.fit(X_train, y_train)
# + id="l7BEBM2WKjBp" outputId="023f8853-cde9-4f43-95a8-9f99ba90ec02" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred
# + id="2aOmBHGWKjB1" outputId="9d209668-69d7-43e9-855b-d3b82585e6e6" colab={"base_uri": "https://localhost:8080/", "height": 85}
# evaluating the algorithm
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, y_pred))
# Print the Confusion Matrix with k =5 and slice it into four pieces
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# + id="SK3ZspjdKjCA" outputId="9a319c4d-800d-4841-d033-752f173317dc" colab={"base_uri": "https://localhost:8080/", "height": 258}
# Classification metrices
print(classification_report(y_test, y_pred))
# + id="J6fl5atlKjCX" outputId="6bbb23a9-a286-4fa3-ede5-1680e5f94ae4" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Checking the accuracy score
from sklearn.metrics import accuracy_score
print('Accuracy' + str(accuracy_score(y_test, y_pred)))
print('Model accuracy score: {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
## the accuracy of this evaluation is less than 1
# + [markdown] id="o_WyzsZ6xqZN"
# ## **`Hyperparameter Optimization using GridSearch CV`**
# + id="38KWR3OXzTCe" outputId="cc177329-34fa-4075-d937-54d70847760b" colab={"base_uri": "https://localhost:8080/", "height": 275}
# import GridSearchCV
from sklearn.model_selection import GridSearchCV
k_range=range(1,31)
print(k_range)
param_grid=dict(n_neighbors=k_range)
print (param_grid)
#
grid_search = GridSearchCV(classifier,param_grid,cv=10,scoring='accuracy')
#
grid_search.fit(X_train, y_train)
# + id="RG8dfYOO7Epk" outputId="f904ba89-f7fd-41a7-9923-328ab87fb13c" colab={"base_uri": "https://localhost:8080/"}
# examining the best model
# best score achieved during the GridSearchCV
print('GridSearch CV best score : {:.4f}\n\n'.format(grid_search.best_score_))
# print parameters that give the best results
print('Parameters that give the best results :','\n\n', (grid_search.best_params_))
# print estimator that was chosen by the GridSearch
print('\n\nEstimator that was chosen by the search :','\n\n', (grid_search.best_estimator_))
# + id="k-_silxj7nSh" outputId="621d6a51-5d49-4488-85a3-f597f8c0ee53" colab={"base_uri": "https://localhost:8080/", "height": 34}
# calculating GridSearch CV score on test set
print('GridSearch CV score on test set: {0:0.4f}'.format(grid_search.score(X_test, y_test)))
# + [markdown] id="LHDmNbHsx6aF"
# ## **Results and Conclusion**
# + id="ZyJgE3kV1His"
| IPWK9_CORE_The_Naive_Bayes_Classifier_ELIZABETH_JOSEPHINE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + nbpresent={"id": "6633f94f-8510-4c68-883a-69c232acb251"}
# %pylab inline
pylab.rcParams['figure.figsize'] = (16.0, 8.0)
# + [markdown] nbpresent={"id": "94199ad0-72b2-4e96-bb82-54469231dfd2"} slideshow={"slide_type": "slide"}
# # Drawing from multivariate distributions
# + [markdown] nbpresent={"id": "d15badd1-3763-4623-9720-b27c52b4c18c"} slideshow={"slide_type": "slide"}
# ## Draws from the multivariate normal distribution
# + [markdown] nbpresent={"id": "afe305e4-cc23-4dcf-b361-7bb59405973c"} slideshow={"slide_type": "subslide"}
# Draws from a multivariate normal distribution
# $$ N(\mathbf{\mu}, \mathbf{\Sigma}) $$
# can be generated by
#
# 1) Calculate the Cholesky decomposition $\mathbf{\Sigma} = \mathbf{R}^T\mathbf{R}$
# ``` python
# from numpy.linalg import cholesky
# R = cholesky(Sigma)
# ```
#
# 2) Generate standard normally distributed values $\mathbf{Z}$
#
# 3) Evaluate $$ \mathbf{X} = \mathbf{\mu 1} + \mathbf{RZ}$$
#
# + [markdown] nbpresent={"id": "560ba18b-a397-427e-900d-c125cb782314"} slideshow={"slide_type": "subslide"}
# ### Exercise 2.1
#
# Draw 1000 samples from the bivariate distribution
# $$ N\left(\left( \begin{array}{c}
# 0.2 \\ -1.0
# \end{array}\right),
# \left(\begin{array}{cc}
# 0.01 & -0.014 \\ -0.014 & 0.04
# \end{array}\right)
# \right)
# $$
# + nbpresent={"id": "9a6bb5bb-bd69-4ad9-8a5e-8157fc83bb45"}
draws = 1000
# + [markdown] nbpresent={"id": "a44c6e96-2cf8-47f5-ba9d-3751d8e2a3ce"}
# Draws from the multivariate normal distribution can more easily generated using the built-in **scipy** functions
#
# ```python
# from scipy.stats import multivariate_normal
# dist = multivariate_normal(mu, Sigma)
# X = dist.rvs(size)
# ```
# **Note**
# Scipy *rvs* functions return arrays of shape (number of draws, size of mean)
# + [markdown] nbpresent={"id": "422e5b41-f127-447e-af89-2fa297116aee"}
# ### Exercise 2.2
#
# Repeat Exercise 2.1 with the built-in scipy function and compare the results.
# + nbpresent={"id": "58b57b12-034f-4d2f-9478-402383c537b2"}
from scipy.stats import multivariate_normal
mu = array([0.2, -1.0])
Sigma = array([[0.01, -0.014],
[-0.014, 0.04]])
dist = multivariate_normal(mu, Sigma)
X = dist.rvs(10000)
scatter(X[:,0], X[:,1]);
# + [markdown] nbpresent={"id": "114eed08-abb9-4974-84b9-840b6946c17a"}
# ## Draws using a copula function
# + [markdown] nbpresent={"id": "deb5d6f6-54f3-41a2-8695-daaf6e2e0746"}
# In many practical cases knowledge about the input quantities is available in terms of their individual distributions and a correlation coefficient. This is insufficient to assign a unique multivariate distribution. Therefore, a copula function can be defined
# $$
# C(\mu_1,\ldots,\mu_N) = \mathbb{P} \left[ X_1\leq G_{X_1}^{-1}(\mu_1)\ldots,X_N\leq G_{X_N}^{-1}(\mu_N) \right]
# $$
# + [markdown] nbpresent={"id": "916a715f-8ed5-4e9a-947e-80870201f832"}
# ### Example copula functions
#
# * all input quantities are mutually independent
# $$ C(\mu_1,\ldots,\mu_N) = \prod_{k=1}^N \mu_k $$
#
# * the input quantities are correlated with $\rho\equiv 1$
# $$ C(\mu_1,\ldots,\mu_N) = \min_{k} \mu_k $$
#
# * two input quantities are correlated with $\rho$
# $$ C(\mu_1,\mu_2) = F_2(G_{X_1}^{-1}(\mu_1),G_{X_2}^{-1}(\mu_2),\rho) $$
# + [markdown] nbpresent={"id": "26926f88-c7af-498c-a3f0-ef345f148616"}
# The copula can be used to incorporate the correlation coefficient and the individual distributions $g_{X_i}$ to formally define a multivariate distribution.
# + [markdown] nbpresent={"id": "6ae6845d-dd4d-4a58-bd45-af205d165290"}
# #### Example
# Input quantities $X_1,X_2$ with correlation coefficient $\rho$ and
# \begin{align}
# X_1 \sim & N(\mu, \sigma) \\
# X_2 \sim & U(a, b)
# \end{align}
#
# Use bivariate normal copula function:
#
# 1) Draw from bivariate standard normal distribution
# $$ z \sim N\left(\mathbf{0}, \left(\begin{array}{cc}
# 1.0 & \rho \\ \rho & 1.0
# \end{array}\right) \right)
# $$
# + [markdown] nbpresent={"id": "790d690a-73a9-450c-9b80-07b15e384f7f"}
# 2) Evaluate cumulative distribution function of the copula
# \begin{align}
# \zeta_1 =& G_N(z_1) \\
# \zeta_2 =& G_N(z_2)
# \end{align}
#
# 3) Evaluate inverse cumulative distribution functions
# \begin{align}
# x_1 =& G_{X_1}^{-1}(\zeta_1) \\
# x_2 =& G_{X_2}^{-1}(\zeta_2)
# \end{align}
# + [markdown] nbpresent={"id": "8f40ee48-ae2b-4921-a0d4-82c441e7afd1"}
# ### Exercise 2.3
#
# Consider the input quantities $X_1,X_2$ with
# * $X_1$ has best estimate 0.2 with uncertainty of 50%
# * $X_2$ has best estimate -1.0 with uncertainty of 20%
# * correlation between $X_1$ and $X_2$ is $\rho=-0.7$
#
# Generate 1000 random draws using a bivariate normal copula function.
# + nbpresent={"id": "6defb90f-0ebb-4427-bf98-a8bcdc3a7f95"}
from scipy.stats import norm, multivariate_normal
rho = -0.7
mu1 = 0.2
umu1 = 0.1
mu2 = -1.0
umu2 = 0.2
rho = -0.7
# Ziehung aus der bivariaten Normalverteilung
dist_z = multivariate_normal(array([0,0]), array([[1, rho], [rho, 1]]))
z = dist_z.rvs(1000) # shape(z) = (1000,2)
# Einsetzen in die kumulutative Verteilung der Normalverteilung
zeta_1 = norm.cdf(z[:,0])
zeta_2 = norm.cdf(z[:,1])
# Einsetzen in die inverse kumulative Verteilung von X1, X2
x1 = norm.ppf(zeta_1, loc=mu1, scale=umu1)
x2 = norm.ppf(zeta_2, loc=mu2, scale=umu2)
figure(1)
scatter(x1,x2)
figure(2)
subplot(211)
hist(x1)
subplot(212)
hist(x2);
# + [markdown] nbpresent={"id": "00917eae-205a-4172-975d-cf1ec81053c9"}
# ### Exercise 2.4
#
# Consider the input quantities $X_1, X_2$ with
#
# * $X_1$ has best estimate $x_1=2.4$ with expanded uncertainty $U=0.4 (k=2)$ under normality assumption
#
# * $X_2$ is located in $[-1.5, 1.5]$
#
# * $X_1, X_2$ are correlated with $\rho = 0.4$
#
# Draw 1000 samples from their joint probability distribution using a normal distribution copula function.
# + nbpresent={"id": "500da934-0b4d-41f7-a065-5d55162fc05e"}
from scipy.stats import norm, uniform, multivariate_normal
rho = 0.7
| .ipynb_checkpoints/03 Drawing from multivariate distributions-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from dota2_knowledge_base import Dota2KnowledgeBase
dota2_kb = Dota2KnowledgeBase(
player_file_name="dota2_players.json",
team_file_name="dota2_teams.json",
tournament_file_name="dota2_tournaments.json"
)
dota2_kb.get_matching_player('1437')
dota2_kb.get_matching_player('Artour')
| Entity_Linking/dota2_kb_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Module Import
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
# ## Assignment 1.
# 우리는 붓꽃의 **꽃받침의 길이, 꽃받침의 너비, 꽃잎의 길이, 꽃잎의 너비**를 통해서, **꽃의 종류**를 구분 해 볼 것입니다. **Input**으로 주어 지는 데이터는 다음과 같습니다.
#
# - Sepal Length: 꽃받침의 길이 정보이다.
# - Sepal Width: 꽃받침의 너비 정보이다.
# - Petal Length: 꽃잎의 길이 정보이다.
# - Petal Width: 꽃잎의 너비 정보이다.
# - Species: 꽃의 종류 정보이다. **setosa / versicolor / virginica** 의 3종류로 구분된다.
# +
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
data = load_iris()
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.33, random_state=42, shuffle=True)
print("X_train의 shape:", X_train.shape)
print("X_test의 shape:", X_test.shape)
print("y_train의 shape:", y_train.shape)
print("y_test의 shape:", y_test.shape)
# -
# ### 1-1. Data Normalization
# 첫 번째로, 데이터를 정규화 하는 것이 중요 할 것 같습니다. 데이터를 정규화 해 보세요.
#
# (Min - Max 정규화를 이용하면 될 것 같죠? hint: ndarray.min(), ndarray.max())
X_train = # Your Code
X_test = # Your Code
print(X_train[:10])
# ## 1-2. Data Training
# 그 다음으로는 이제 데이터를 학습 시킬 시간입니다! SVM 모듈을 import 한 후, 학습을 시켜 보도록 하겠습니다.
# +
from sklearn.svm import SVC
svm = SVC(C=2)
# <your code: 학습 과정을 넣어 주세요>
accuracy = (sum(svm.predict(X_test) == y_test) / 50) * 100
print(f'정확도 (accuracy): {accuracy}%')
# -
# ## Assignment 2.
# 다음은 **MNIST 데이터**에 대해 분류를 해보는 연습을 해 보겠습니다. **MNIST**는 손글씨 데이터로, **Input Data**는 [28 x 28]의 데이터로 이루어져 있습니다. 일단, 우리가 이를 학습 시키기 전에 한번 데이터를 확인 해 볼까요?
#
# 이 예제는 DNN을 이용하기 때문에, **Pytorch**로 진행 하겠습니다. 실습은 **Hyperparameter**만 고치면 됩니다.
#
# **학습률 95%에 도전 해 보세요!**
import time
import random
import torch
import torch.nn as nn
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
# +
# 이 부분은 절대 변경하지 마세요.
RANDOM_SEED = 123
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
# -
# ## 2-0. Data Load
# 데이터를 불러와 보겠습니다. `transforms.Compose`를 이용하여, 데이터를 pytorch에서 사용하는 **Tensor**형으로 바꾸고, 이를 **Gaussian Distribution**으로 정규화합니다.
# 수정 가능한 셀입니다.
BATCH_SIZE = 64 # 60000을 사용하면, Full-Batch 학습을 진행 합니다.
# +
# Don't Touch!
custom_train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,), std=(0.5,))
])
custom_test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,), std=(0.5,))
])
# -
# `MNIST`를 이용하여 **MNIST** 데이터를 불러 오고, 이를 transform 해 줍니다. 또한, `DataLoader`를 이용하여, 셔플을 해준 후, 미니 배치를 생성 합니다.
# +
train_dataset = MNIST(".", train=True, download=True, transform=custom_train_transform)
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
test_dataset = MNIST(".", train=False, download=True, transform=custom_test_transform)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=2)
# -
# 데이터가 잘 들어 왔는지 확인 합니다.
for batch_idx, (x, y) in enumerate(train_loader):
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
x = x.to(DEVICE)
y = y.to(DEVICE)
x_numpy = x.numpy()
y_numpy = y.numpy()
print('input batch의 모양:', x_numpy.shape)
plt.imshow(x_numpy[0].reshape(28, 28))
plt.title(f'Label {y_numpy[0]}')
plt.show()
print('break minibatch for-loop')
break
# ## 2-1. Deep Neural Network
# 아래 셀은 Deep Neural Network를 정의 합니다.
# +
# 변경 가능 합니다.
class DNN(nn.Module):
def __init__(self, num_features, num_hidden_1, num_hidden_2, num_hidden_3, num_classes):
"""
num_features: input feature 갯수
num_hidden_1: 첫 번째 레이어의 노드 갯수
num_hidden_2: 두 번째 레이어의 노드 갯수
num_hidden_3: 세 번째 레이어의 노드 갯수
num_classes: 분류하고자 하는 class 갯수
"""
super(DNN, self).__init__()
self.num_classes = num_classes
# 수정 가능!: 레이어를 쌓는 법을 아신다면, 여기서 레이어를 쌓으셔도, 혹은 지우셔도 무방 합니다. (단, nn.Linear만)
self.linear_1 = nn.Linear(num_features, num_hidden_1)
self.linear_2 = nn.Linear(num_hidden_1, num_hidden_2)
self.linear_3 = nn.Linear(num_hidden_2, num_hidden_3)
self.linear_out = nn.Linear(num_hidden_3, num_classes)
def forward(self, x):
out = self.linear_1(x)
out = torch.relu(out)
out = self.linear_2(out)
out = torch.relu(out)
out = self.linear_3(out)
out = torch.relu(out)
logits = self.linear_out(out)
probas = torch.sigmoid(logits)
return logits, probas
# 수정 가능!
model = DNN(num_features=28*28,
num_hidden_1=100,
num_hidden_2=50,
num_hidden_3=20,
num_classes=10)
model = model.to(DEVICE)
# -
# ## 2-2. Training
# 여기서는 **Optimizer**와, **Epoch**를 설정 합니다.
# 수정 가능!
LEARNING_LATE = 0.3 # 흠.. 이 친구는 학습률로는 너무 큰 것 같네요..
NUM_EPOCHS = 5
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_LATE)
# +
def compute_accuracy_and_loss(model, data_loader, device):
correct_pred, num_examples = 0, 0
cross_entropy = 0.
for i, (features, targets) in enumerate(data_loader):
features = features.view(-1, 28*28).to(device)
targets = targets.to(device)
logits, probas = model(features)
cross_entropy += F.cross_entropy(logits, targets).item()
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100, cross_entropy/num_examples
start_time = time.time()
train_acc_lst, test_acc_lst = [], []
train_loss_lst, test_loss_lst = [], []
for epoch in range(NUM_EPOCHS):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
### PREPARE MINIBATCH
features = features.view(-1, 28*28).to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits, probas = model(features) # 모델 계산
cost = F.cross_entropy(logits, targets) # 크로스 엔트로피 계산
optimizer.zero_grad() # 기울기 초기화
cost.backward() # 역전파
### UPDATE MODEL PARAMETERS
optimizer.step() # step 적용
### LOGGING
if not batch_idx % 40:
print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | '
f'Batch {batch_idx:03d}/{len(train_loader):03d} |'
f' Cost: {cost:.4f}')
# 매 Epoch마다 evaluation을 진행합니다.
# Epoch마다 Loss를 기록하여 학습과정을 살펴보고 Underfitting, Overfitting 여부를 확인합니다.
model.eval()
with torch.set_grad_enabled(False): # Gradient 계산이 안되도록
train_acc, train_loss = compute_accuracy_and_loss(model, train_loader, device=DEVICE) # train acc, loss 계산
test_acc, test_loss = compute_accuracy_and_loss(model, test_loader, device=DEVICE) # test acc, loss 계산
# list에 train, test의 acc, loss 추가
train_acc_lst.append(train_acc)
test_acc_lst.append(test_acc)
train_loss_lst.append(train_loss)
test_loss_lst.append(test_loss)
# 로깅
print(f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} Train Acc.: {train_acc:.2f}%'
f' | Test Acc.: {test_acc:.2f}%')
# 1 epoch 학습 소요시간
elapsed = (time.time() - start_time)/60
print(f'Time elapsed: {elapsed:.2f} min')
# 총 학습 소요시간
elapsed = (time.time() - start_time)/60
print(f'Total Training Time: {elapsed:.2f} min')
# -
# ## 2-3. Evaluation
# 테스트 데이터와 학습 데이터의 Loss 변화를 확인 합니다.
plt.plot(range(1, NUM_EPOCHS+1), train_loss_lst, label='Training loss')
plt.plot(range(1, NUM_EPOCHS+1), test_loss_lst, label='Test loss')
plt.legend(loc='upper right')
plt.ylabel('Cross entropy')
plt.xlabel('Epoch')
plt.show()
plt.plot(range(1, NUM_EPOCHS+1), train_acc_lst, label='Training accuracy')
plt.plot(range(1, NUM_EPOCHS+1), test_acc_lst, label='Test accuracy')
plt.legend(loc='upper left')
plt.ylabel('Cross entropy')
plt.xlabel('Epoch')
plt.show()
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
test_acc, test_loss = compute_accuracy_and_loss(model, test_loader, DEVICE)
print(f'Test accuracy: {test_acc:.2f}%')
for batch_idx, (x, y) in enumerate(test_loader):
x = x.to(DEVICE)
y = y.to(DEVICE)
x_numpy = x.numpy()
fig, axes = plt.subplots(1, 5)
for ax in axes:
index = random.randint(0, 64)
logits, probas = model(x[index].view(-1, 28*28))
_, predicted_labels = torch.max(probas, 1)
ax.imshow(x_numpy[index].reshape(28, 28))
ax.set_title(f'Label {predicted_labels[0]}')
plt.show()
break
# ## 2-4. Discussion
# 1. Train Data에 대한 정확도와, Test Data에 대한 정확도가 왜 다를까요?
# - 입력 해 주세요!
#
# 2. 다른 사람들은 정확도가 99퍼가 넘는 모델도 만들던데, DNN의 한계가 있다면 어떤 점이 있을까요? (Hint: 우리는 28x28의 이미지를 768x1로 쫙 펴서 넣어 줬습니다.)
# - 입력 해 주세요!
| assignments/04~08/04~09.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %pylab inline
import sys
import os.path as op
from pathlib import Path
import shutil
# sys.path.insert(0, "/home/mjirik/projects/pyseg_base/")
sys.path.insert(0, op.abspath("../"))
import scipy
import time
import pandas as pd
from imcut import pycut
import sed3
import itertools
import data2tex as dtt
import io3d
import imma
import lisa
import traceback
latex_dir = Path("../../papers/cmbbeiv19/tmp/")
dtt.set_output(latex_dir)
dtt.use_pure_latex = True
# sh 155,160, r10, dpoff 3, seeds 3
# dp_ircad_id = [1, 11]
# dp_ircad_id = [1, 5, 6, 7]
dp_ircad_id = [1, 5, 6, 7, 11, 20]
# dp_ircad_id = [11, 20]
# dp_ircad_id = [1]
dp_keys = ["left_kidney"]
working_voxelsize_mm = None
# working_voxelsize_mm = [1.5, 1.5, 1.5]
working_voxelsize_mm = [1.3, 1.3, 1.3]
# working_voxelsize_mm = [1.7, 1.7, 1.7]
# working_voxelsize_mm = "orig*2"
# working_voxelsize_mm=[2, 2, 2]
# working_voxelsize_mm=[2.2, 2.5, 2.5]
fname = "exp062-multiscale_delme.csv"
fnamenew = "msgc_experiment_ct.csv"
rnd_seed=1
dpi = 400
lisa.__version__
# -
# dry_run = True
dry_run = False
force_rewrite = False
# force_rewrite = True
# ## Methods setup
# +
# block size bylo 10
segparams0 = {
'method':'graphcut',
# 'method':'multiscale_graphcut',
'use_boundary_penalties': True,
'boundary_dilatation_distance': 2,
'boundary_penalties_weight': 1,
'block_size': 10,
'tile_zoom_constant': 1,
# 'pairwise_alpha_per_mm2': 45,
"pairwise_alpha_per_square_unit": 45,
'return_only_object_with_seeds': True,
}
segparams1 = {
# 'method':'graphcut',
'method':'multiscale_graphcut_hi2lo',
'use_boundary_penalties': True,
'boundary_dilatation_distance': 2,
'boundary_penalties_weight': 1,
'block_size': 10,
'tile_zoom_constant': 1,
# 'pairwise_alpha_per_mm2': 45,
"pairwise_alpha_per_square_unit": 45,
'return_only_object_with_seeds': True,
}
segparams2 = {
# 'method':'graphcut',
'method':'multiscale_graphcut_lo2hi',
'use_boundary_penalties': True,
'boundary_dilatation_distance': 2,
'boundary_penalties_weight': 1,
'block_size': 10,
'tile_zoom_constant': 1,
# 'pairwise_alpha_per_mm2': 45,
"pairwise_alpha_per_square_unit": 45,
'return_only_object_with_seeds': True,
}
labels = [
"ssgc ",
"msgc_hi2lo ",
"msgc_lo2hi ",
]
# +
data_seeds_path = Path(io3d.datasets.join_path("medical", "orig", "ircad1b_seeds", get_root=True))
d01_pth = data_seeds_path / "ircadb1-01.pklz"
datap = io3d.read(d01_pth)
datap
str(d01_pth)
datap.keys()
# +
# io3d.write(datap, data_seeds_path / "ircad1b01.hdf5")
# io3d.read(data_seeds_path / "ircad1b01.hdf5")
# +
# datap['saved_seeds']["left_kidney"]
# +
# pth_data3d = Path(io3d.datasets.join_path("medical", "orig", "3Dircadb1.{}", "PATIENT_DICOM", get_root=True))
# pth_ground_true = Path(io3d.datasets.join_path("medical", "orig", "3Dircadb1.{}", "MASKS_DICOM", "{}" get_root=True))
# pth_seeds = Path(io3d.datasets.join_path("medical", "orig", "ircad1b_seeds", "ircad1b{:02d}.pklz", get_root=True))
# print(pth_data3d)
# print(pth_seeds)
# +
# import imma
# help(imma.image_manipulation.resize_to_mm)
# +
def prepare_data(i, seeds_key):
ground_true_key = seeds_key.replace("_", "")
pth_data3d = Path(io3d.datasets.join_path("medical", "orig", "3Dircadb1.{}", "PATIENT_DICOM", get_root=True))
pth_ground_true = Path(io3d.datasets.join_path("medical", "orig", "3Dircadb1.{}", "MASKS_DICOM", "{}", get_root=True))
pth_seeds = Path(io3d.datasets.join_path("medical", "orig", "ircad1b_seeds", "ircadb1-{:02d}.pklz", get_root=True))
pth_data3d = str(pth_data3d).format(i)
pth_seeds = str(pth_seeds).format(i)
pth_ground_true = str(pth_ground_true).format(i, ground_true_key)
print(pth_data3d)
print(pth_ground_true)
print(pth_seeds)
datap_data3d = io3d.read(pth_data3d)
datap_seeds = io3d.read(pth_seeds)
datap_ground_true = io3d.read(pth_ground_true)
seeds = datap_seeds["saved_seeds"][seeds_key]
data3d = datap_data3d["data3d"]
seg_true = datap_ground_true["data3d"]
vs = datap_data3d["voxelsize_mm"]
if working_voxelsize_mm is not None:
if working_voxelsize_mm == "orig*2":
wvs = np.asarray(vs) * 2
else:
wvs = working_voxelsize_mm
data3d = imma.image_manipulation.resize_to_mm(data3d, vs, wvs)
seg_true = imma.image_manipulation.resize_to_mm(seg_true, vs, wvs, order=0)
seeds = imma.image_manipulation.resize_to_mm(seeds, vs, wvs, order=0)
return data3d, seg_true, seeds, wvs, vs
# -
# ### LaTeX export functions
# +
def to_latex_file(df, fn):
with open(fn, "w") as f:
f.write(df.to_latex())
def latex_float(f, precision=4):
float_str = "{0:." + str(int(precision)) + "g}"
float_str = float_str.format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0} \times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
def float_to_latex_file(fl, fn, precision=4):
string = latex_float(fl, precision=precision)
with open(fn, "w") as f:
f.write(string)
def num2latex(num, filename=None, precision=4):
if type(num) is str:
float_str = num
else:
float_str = "{0:." + str(int(precision)) + "g}"
float_str = float_str.format(num)
if float_str[:4] == r"\num":
pass
else:
float_str = "\\num{" + float_str + "}"
if filename is not None:
with open(filename, "w") as f:
f.write(float_str)
return float_str
def to_file(text, fn):
with open(fn, "w") as f:
f.write(text)
# -
# ## CT data, opakovaný experiment
# +
def process_gc_stats(stats1, prefix=None):
if prefix is None:
prefix = ""
outstats = {}
for key in stats1:
outstats[prefix + key] = stats1[key]
outstats[prefix + "nlinks number"] = np.sum(np.asarray(outstats[prefix + "nlinks shape"]), axis=0)[0]
outstats[prefix + "tlinks number"] = np.sum(np.asarray(outstats[prefix + "tlinks shape"]), axis=0)[0]
outstats.pop(prefix + "tlinks shape")
outstats.pop(prefix + "nlinks shape")
outstats[prefix + "edge number"] = outstats[prefix + "nlinks number"] + outstats[prefix + "tlinks number"]
return outstats
def merge_stats(stats0, stats1, stats2, labels=None):
if labels is None:
labels = [""] * 3
stats0 = process_gc_stats(stats0, labels[0])
stats1 = process_gc_stats(stats1, labels[1])
stats2 = process_gc_stats(stats2, labels[2])
stats = {}
stats.update(stats0)
stats.update(stats1)
stats.update(stats2)
return stats
def run_gc_with_defined_setup(img, segparams, seeds, true_seg, voxelsize_mm, dry_run=False, fn_debug_prefix="", true_seg2=None):
start = time.time()
gc = pycut.ImageGraphCut(img, segparams=segparams, voxelsize=voxelsize_mm)
gc.set_seeds(seeds)
if dry_run:
thr = np.mean([np.min(img), np.max(img)])
sg1 = (img < thr).astype(np.uint8)
stats1 = {"nlinks shape": [[5, 5]], "tlinks shape": [[5, 5]]}
else:
gc.run()
sg1 = gc.segmentation
print("segparams: ", gc.segparams)
print("modelparams: ", gc.modelparams)
stats1 = gc.stats
elapsed1 = (time.time() - start)
try:
print("unique true seg: {}, unique sg1: {}".format(np.unique(true_seg), np.unique(sg1)))
io3d.write(sg1, "sg1.pklz")
io3d.write(true_seg, "true_seg.pklz")
except:
print("vyjimka")
traceback.print_exc()
sg1 = (sg1==0).astype(np.int8)
true_seg = (true_seg > 0).astype(np.int8)
io3d.write(sg1, fn_debug_prefix + "_" +segparams["method"] + "_seg.pklz")
io3d.write(true_seg, fn_debug_prefix + "_" +segparams["method"] +"_true_seg.pklz")
io3d.write(np.abs(true_seg - sg1), fn_debug_prefix + "_" +segparams["method"] +"_err.pklz")
err1 = np.sum(np.abs(true_seg - sg1))
stats1["time"] = elapsed1
stats1["error"] = err1
stats1["data segmentation size px"] = np.sum(sg1 > 0)
stats1["data size px"] = np.prod(img.shape)
stats2 = lisa.volumetry_evaluation.compare_volumes(sg1, true_seg, voxelsize_mm)
stats1.update(stats2)
if true_seg2 is not None:
stats3 = lisa.volumetry_evaluation.compare_volumes(sg1, true_seg2, voxelsize_mm)
stats1["dice gc"] = stats3["dice"]
stats1["jaccard gc"] = stats3["jaccard"]
stats1["voe gc"] = stats3["voe"]
return stats1, sg1
def add_data_and_algoritm_info(stats, data_params_dict, segparams, start, true_segmentation, voxelsize_mm, orig_vs_mm):
# stats['msgc time'] = elapsed1
# stats['normal time'] = elapsed2
# stats['data id'] = data_params[0]
# stats['data offset'] = data_params[1]
# stats['target organ'] = data_params[1]
# stats['data radius'] = data_params[2]
# stats['data size'] = data_params[0]
stats.update(data_params_dict)
stats["data size 0"] = true_segmentation.shape[0]
stats["data size 1"] = true_segmentation.shape[1]
stats["data size 2"] = true_segmentation.shape[2]
stats["data size px"] = np.prod(true_segmentation.shape)
stats["data target size px"] = np.sum(true_segmentation > 0)
stats["data voxesize mm^3"] = np.prod(voxelsize_mm)
stats["data voxesize mm 0"] = voxelsize_mm[0]
stats["data voxesize mm 1"] = voxelsize_mm[1]
stats["data voxesize mm 2"] = voxelsize_mm[2]
stats["data orig voxesize mm 0"] = orig_vs_mm[0]
stats["data orig voxesize mm 1"] = orig_vs_mm[1]
stats["data orig voxesize mm 2"] = orig_vs_mm[2]
stats["block size"] = segparams["block_size"]
# stats["data seedsz"] = data_params[3]
# stats["GC error"] = err2
# stats["MSGC error"] = err1
stats['machine hostname'] = machine_hostname
stats['experiment iteration start time'] = start
return stats
def add_data_seaborn(stats, data_params_dict, segparams, start, i, label, true_segmentation, voxelsize_mm, orig_vs_mm):
stats = process_gc_stats(stats, "")
stats = add_data_and_algoritm_info(stats, data_params_dict, segparams, start, true_segmentation, voxelsize_mm, orig_vs_mm)
stats["method"] = label
dfinew = pd.DataFrame(stats, index=[i*3 + 0])
#dfnew = dfnew.append(dfinew, sort=True)
return dfinew
| examples/msgc_experiments_ct_init.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyGeM
# ## Tutorial 5: Inverse Distance Weighting interpolation technique on a cube
# In this tutorial we will show how to use the Inverse Distance Weighting interpolation technique to deform a cube.
#
# First of all, we import the required class, the numpy package and we set matplotlib for the notebook.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from pygem import IDW
# -
# We need to set the deformation parameters: we can set manually, by editing the `IDW` attributes, or we can read them by parsing a file. We remark that it is possible to save the parameters (for example, after set them manually) to a file in order to edit this for the future deformations.
# +
parameters_file = '../tests/test_datasets/parameters_idw_cube.prm'
idw = IDW()
idw.read_parameters(filename=parameters_file)
# -
# The following is the parameters file for this particular case. The Inverse Distance Weighting section describes the power parameter (see the documentation of the [IDW](http://mathlab.github.io/PyGeM/idw.html) class for more details). As control points we consider the 8 vertices of the cube (the first one is not exactly the vertex), and we move 3 of them. In the Control points section there are all the coordinates of the control points.
# %cat '../tests/test_datasets/parameters_idw_cube.prm'
# Here we create a $10 \times 10 \times 10$ lattice to mimic a cube.
# +
nx, ny, nz = (10, 10, 10)
mesh = np.zeros((nx * ny * nz, 3))
xv = np.linspace(0, 1, nx)
yv = np.linspace(0, 1, ny)
zv = np.linspace(0, 1, nz)
z, y, x = np.meshgrid(zv, yv, xv)
mesh = np.array([x.ravel(), y.ravel(), z.ravel()])
mesh = mesh.T
# -
# Now we plot the points to see what we are doing.
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(mesh[:, 0], mesh[:, 1], mesh[:, 2], c='blue', marker='o')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
# Finally we perform the IDW interpolation using the IDW class.
new_mesh = idw(mesh)
# We can plot the new points in order to see the deformation. Try different powers to better fit your specific problem.
fig = plt.figure(2)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(new_mesh[:, 0], new_mesh[:, 1], new_mesh[:, 2], c='red', marker='o')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
| tutorials/tutorial4/tutorial-4-idw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 7</font>
#
# ## Download: http://github.com/dsacademybr
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# ## Missão: Implementar o Algoritmo de Ordenação "Selection sort".
# ## Nível de Dificuldade: Alto
# ## Premissas
#
# * As duplicatas são permitidas?
# * Sim
# * Podemos assumir que a entrada é válida?
# * Não
# * Podemos supor que isso se encaixa na memória?
# * Sim
# ## Teste Cases
#
# * None -> Exception
# * [] -> []
# * One element -> [element]
# * Two or more elements
# ## Algoritmo
#
# Animação do Wikipedia:
# 
#
# Podemos fazer isso de forma recursiva ou iterativa. Iterativamente será mais eficiente, pois não requer sobrecarga de espaço extra com as chamadas recursivas.
#
# * Para cada elemento
# * Verifique cada elemento à direita para encontrar o min
# * Se min < elemento atual, swap
# ## Solução
class SelectionSort(object):
def sort(self, data):
# Implemente aqui sua solução
# ## Teste da Solução
# +
# %%writefile missao4.py
from nose.tools import assert_equal, assert_raises
class TestSelectionSort(object):
def test_selection_sort(self, func):
print('None input')
assert_raises(TypeError, func, None)
print('Input vazio')
assert_equal(func([]), [])
print('Um elemento')
assert_equal(func([5]), [5])
print('Dois ou mais elementos')
data = [5, 1, 7, 2, 6, -3, 5, 7, -10]
assert_equal(func(data), sorted(data))
print('Sua solução foi executada com sucesso! Parabéns!')
def main():
test = TestSelectionSort()
try:
selection_sort = SelectionSort()
test.test_selection_sort(selection_sort.sort)
except NameError:
pass
if __name__ == '__main__':
main()
# -
# %run -i missao4.py
# ## Fim
#
# ### Obrigado
#
# ### Visite o Blog da Data Science Academy - <a href="http://blog.dsacademy.com.br">Blog DSA</a>
#
| Data Science Academy/PythonFundamentos/Cap07/DesafioDSA/Missao4/missao4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:variants]
# language: python
# name: conda-env-variants-py
# ---
# View of the raw mutations
def missense_log_plot(Uniprot_name):
gene_missense_variants = missense_variants_classA[missense_variants_classA['Uniprot_name'] == Uniprot_name]
gene_residue_labels = residue_labels_classA[residue_labels_classA['Uniprot_name'] == Uniprot_name]
fig, ax1 = plt.subplots(nrows=1,figsize=(8,3))
ax1.bar(x=gene_missense_variants['sequence_position'],height=gene_missense_variants['allele_count'],)
ax1.semilogy()
return fig
missense_variants_classA['consequence'].value_counts()
# g = sns.countplot(data=missense_variants_classA, x='consequence')
# g.set_xticklabels(g.xaxis.get_majorticklabels(),rotation=30)
# g.set_xlabel('VEP categorical label')
missense_variants_classA[['SIFT_cat','SIFT_number']] = missense_variants_classA['SIFT'].str.split('(',expand=True)
g = sns.countplot(data=missense_variants_classA, x='SIFT_cat')
g.set_xticklabels(g.xaxis.get_majorticklabels(),rotation=30)
g.set_xlabel('SIFT categorical label')
missense_variants_classA[['PolyPhen_cat','PolyPhen_number']] = missense_variants_classA['PolyPhen'].str.split('(',expand=True)
g = sns.countplot(data=missense_variants_classA, x='PolyPhen_cat')
g.set_xticklabels(g.xaxis.get_majorticklabels(),rotation=30)
g.set_xlabel('Polyphen categorical label')
# What amino acids is R3x50 mutated to?
pd.DataFrame(missense_variants_classA[missense_variants_classA.GPCRdb_alignment_number =='3.50x50'].alternate_amino_acid.value_counts()).reset_index()
# What is the fraction of singleton mutations to R3x50?
(missense_variants_classA[missense_variants_classA.GPCRdb_alignment_number =='3.50x50'].allele_count == 1).sum() / \
missense_variants_classA[missense_variants_classA.GPCRdb_alignment_number =='3.50x50'].shape[0]
# What is the fraction of singleton mutations over all mutations?
(missense_variants_classA.allele_count == 1).sum() / \
missense_variants_classA.shape[0]
# How many homozygous individuals for R3x50 mutations are observed?
missense_variants_classA[(missense_variants_classA.GPCRdb_alignment_number =='3.50x50') & \
(missense_variants_classA.num_alternate_homozygous > 0)]\
[['Uniprot_name','reference_amino_acid','alternate_amino_acid','num_alternate_homozygous']]
| scripts/old_analysis_scripts/gnomad_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Extract delays from PubMed history dates
library(dplyr, warn=F)
# Read history dates for all articles
path = file.path('data', 'history-dates.tsv.bz2')
col_types = list(
accepted_0 = readr::col_date(),
received_0 = readr::col_date(),
pubmed_0 = readr::col_date(),
medline_0 = readr::col_date(),
date_online = readr::col_date()
)
all_df = readr::read_tsv(path, col_types = col_types) %>%
dplyr::rename(received = received_0, accepted = accepted_0, pubmed = pubmed_0, medline = medline_0)
head(all_df, 2)
# Set time constraint to elimate erroneous records
earliest = readr::parse_date('1960-01-01')
latest = readr::parse_date('2015-12-31')
# +
# Count journals and articles by PubMed year
year_df = all_df %>%
dplyr::filter(pubmed >= earliest) %>%
dplyr::filter(pubmed <= latest) %>%
dplyr::mutate(year = lubridate::year(pubmed)) %>%
dplyr::group_by(year) %>%
dplyr::summarize(
n_journals = n_distinct(journal_nlm_id),
n_articles = n()
)
path = file.path('data', 'yearly-pubmed-totals.tsv')
year_df %>%
readr::write_tsv(path)
head(year_df, 2)
# +
# Create an acceptance delay dataset
accept_df = all_df %>%
dplyr::mutate(delay_type = 'Acceptance') %>%
dplyr::mutate(delay = as.numeric(accepted - received, units='days')) %>%
dplyr::rename(date = accepted) %>%
dplyr::select(journal_nlm_id, pubmed_id, delay_type, date, delay) %>%
dplyr::filter(! is.na(delay)) %>%
dplyr::filter(delay > 0) %>%
dplyr::filter(delay <= 365 * 5) %>%
dplyr::filter(date >= earliest) %>%
dplyr::filter(date <= latest)
nrow(accept_df)
# -
head(accept_df, 2)
# +
# Create a publication delay dataset
publish_df = all_df %>%
dplyr::mutate(delay_type = 'Publication') %>%
dplyr::mutate(delay = as.numeric(date_online - accepted, units='days')) %>%
dplyr::rename(date = date_online) %>%
dplyr::select(journal_nlm_id, pubmed_id, delay_type, date, delay) %>%
dplyr::filter(! is.na(delay)) %>%
dplyr::filter(delay >= 0) %>%
dplyr::filter(delay <= 365 * 3) %>%
dplyr::filter(date >= earliest) %>%
dplyr::filter(date <= latest)
nrow(publish_df)
# -
head(publish_df, 2)
# Bind acceptance and publication dataframes
delay_df = dplyr::bind_rows(accept_df, publish_df) %>%
dplyr::arrange(journal_nlm_id, pubmed_id, delay_type)
head(delay_df)
tail(delay_df)
# Save as a gzipped TSV
path = file.path('data', 'delays.tsv')
delay_df %>%
readr::write_tsv(path)
system2('gzip', c('--force', path))
| extract-delays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ayulockin/DLshots/blob/master/Intro_to_covolutional_neural_network_with_PyTorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="CufETwmZN-vF" colab_type="text"
# ## Setups, Imports and Installations
# + id="HESv1l--3AAa" colab_type="code" colab={}
# %%capture
# !pip install wandb -q
# + id="vg0iv_RPT3YG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="6add660b-2ff7-4cb5-8016-d0526c967e03"
import wandb
wandb.login()
# + id="_RFZ81yY3Rfl" colab_type="code" colab={}
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
# + id="NiEHlMTh4ecC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="d177b172-db5f-487d-fa93-e10b0cd19637"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# + [markdown] id="XogH13qlOF-Q" colab_type="text"
# # Download Dataset and Prepare Dataloader
# + id="FtM4FMPP3imH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104, "referenced_widgets": ["638adc7d167b4719bf064bfec4ca6da5", "878540bd6a5b4a49ac861cfea5acacd7", "53334ecf4fe3416d835de7d428e2534d", "<KEY>", "708849d0361d41b4941fb03e3085f942", "84c3a2c0506c42b9af67d7f88f9cccd4", "5bc03505e10d46869f8700a05e1ea9e8", "969764b235204f89aacf70f55a4651ed"]} outputId="4e7e97c6-4791-4a3c-b239-6522f1ca5b5a"
BATCH_SIZE = 32
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=2)
CLASS_NAMES = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# + [markdown] id="XyG3USzkON-d" colab_type="text"
# # Visualize Data
# + id="NWEveFVYN9SE" colab_type="code" colab={}
def show_batch(image_batch, label_batch):
plt.figure(figsize=(10,10))
for n in range(25):
ax = plt.subplot(5,5,n+1)
img = image_batch[n] / 2 + 0.5 # unnormalize
img = img.numpy()
plt.imshow(np.transpose(img, (1, 2, 0)))
plt.title(CLASS_NAMES[label_batch[n]])
plt.axis('off')
# + id="0XVa6VnyOlej" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 591} outputId="56e7b870-5212-4f45-cf54-069dce5b5fff"
sample_images, sample_labels = next(iter(trainloader))
show_batch(sample_images, sample_labels)
# + [markdown] id="VgJjMMT1PrZN" colab_type="text"
# # Model
# + id="CUOU5phlFoZn" colab_type="code" colab={}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# + [markdown] id="qysHUTEdhcxU" colab_type="text"
# # Train Step
#
# + id="Z3Cv0XLX54Xo" colab_type="code" colab={}
def train(model, device, train_loader, optimizer, criterion, epoch, steps_per_epoch=20):
# Switch model to training mode. This is necessary for layers like dropout, batchnorm etc which behave differently in training and evaluation mode
model.train()
train_loss = 0
train_total = 0
train_correct = 0
# We loop over the data iterator, and feed the inputs to the network and adjust the weights.
for batch_idx, (data, target) in enumerate(train_loader, start=0):
# Load the input features and labels from the training dataset
data, target = data.to(device), target.to(device)
# Reset the gradients to 0 for all learnable weight parameters
optimizer.zero_grad()
# Forward pass: Pass image data from training dataset, make predictions about class image belongs to (0-9 in this case)
output = model(data)
# Define our loss function, and compute the loss
loss = criterion(output, target)
train_loss += loss.item()
scores, predictions = torch.max(output.data, 1)
train_total += target.size(0)
train_correct += int(sum(predictions == target))
# Reset the gradients to 0 for all learnable weight parameters
optimizer.zero_grad()
# Backward pass: compute the gradients of the loss w.r.t. the model's parameters
loss.backward()
# Update the neural network weights
optimizer.step()
acc = round((train_correct / train_total) * 100, 2)
print('Epoch [{}], Loss: {}, Accuracy: {}'.format(epoch, train_loss/train_total, acc), end='')
wandb.log({'Train Loss': train_loss/train_total, 'Train Accuracy': acc, 'Epoch': epoch})
# + [markdown] id="KQHvH3dphf_Z" colab_type="text"
# # Test Step
#
# + id="vHcCrrPJ65p-" colab_type="code" colab={}
def test(model, device, test_loader, criterion, epoch, classes):
# Switch model to evaluation mode. This is necessary for layers like dropout, batchnorm etc which behave differently in training and evaluation mode
model.eval()
test_loss = 0
test_total = 0
test_correct = 0
example_images = []
with torch.no_grad():
for data, target in test_loader:
# Load the input features and labels from the test dataset
data, target = data.to(device), target.to(device)
# Make predictions: Pass image data from test dataset, make predictions about class image belongs to (0-9 in this case)
output = model(data)
# Compute the loss sum up batch loss
test_loss += criterion(output, target).item()
scores, predictions = torch.max(output.data, 1)
test_total += target.size(0)
test_correct += int(sum(predictions == target))
acc = round((test_correct / test_total) * 100, 2)
print(' Test_loss: {}, Test_accuracy: {}'.format(test_loss/test_total, acc))
wandb.log({'Test Loss': test_loss/test_total, 'Test Accuracy': acc, 'Epoch': epoch})
# + [markdown] id="5NjWrnP8hjlc" colab_type="text"
# # Initialize Model
# + id="KYSE0Psw38w0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 164} outputId="d00331bf-58f3-45fb-a76b-c89cb11b9e1d"
net = Net().to(device)
print(net)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters())
# + [markdown] id="QnoBEQDnhnSc" colab_type="text"
# # Train
# + id="KJMOZ1Qt69f3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 271} outputId="ff820db9-4fd8-407f-fabf-9941aeb46074"
wandb.init(entity='authors', project='seo')
wandb.watch(net, log='all')
for epoch in range(10):
train(net, device, trainloader, optimizer, criterion, epoch)
test(net, device, testloader, criterion, epoch, CLASS_NAMES)
print('Finished Training')
| Intro_to_covolutional_neural_network_with_PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ZKi5k5ZJ1NDV" colab_type="code" outputId="c42d5c52-735c-4aca-8707-037267f3bb51" executionInfo={"status": "ok", "timestamp": 1570049287090, "user_tz": 420, "elapsed": 25494, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
# + id="gkzoUfGg1qzE" colab_type="code" outputId="f51a0e05-1597-42e9-dd83-e255e92407ab" executionInfo={"status": "ok", "timestamp": 1569427635233, "user_tz": 420, "elapsed": 1587, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# !pwd
# + id="lOYwz9Fm1JW-" colab_type="code" colab={}
# !cp drive/My\ Drive/Insight/data.zip /content
# + id="x8vXv1nz19aY" colab_type="code" colab={}
import zipfile
with zipfile.ZipFile("data.zip", 'r') as zip_ref:
zip_ref.extractall('/content')
# + id="qSlunUhp2ALg" colab_type="code" colab={}
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# + id="HLq7z0WX2EUG" colab_type="code" colab={}
# data augmentation and normalization for training
# normalization for validation
data_transforms = {
'training_set': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ColorJitter(1,1,1,0.5),
# transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test_set': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = './data/'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['training_set', 'test_set']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['training_set', 'test_set']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['training_set',
'test_set',
]}
class_names = image_datasets['training_set'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# + id="STbLIEbc2HO0" colab_type="code" outputId="42cea6b6-3d43-4bd7-e51a-30b3db01711d" executionInfo={"status": "ok", "timestamp": 1570049438842, "user_tz": 420, "elapsed": 1766, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 148}
# Visualize some training images
def image_disp(image, title=None):
"""show image for Tensor"""
image = image.numpy().transpose((1,2,0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001)
# load some training data
inputs, classes = next(iter(dataloaders['training_set']))
# make a grid from batch
out = torchvision.utils.make_grid(inputs)
image_disp(out, title=[class_names[x] for x in classes])
# + id="mlp1erUc2Jsd" colab_type="code" colab={}
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['training_set', 'test_set']:
if phase == 'training_set':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'training_set'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'training_set':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'training_set':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'test_set' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model, f'/content/torch_transfer_resnet_27CAT_100219_2{best_acc:.2f}.pt')
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.2f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
# + id="GuYEwliW2OoH" colab_type="code" colab={}
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['test_set']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
image_disp(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
# + id="AXhXXq422nxm" colab_type="code" outputId="fddba211-d28d-465a-d5b5-7fe2f269031e" executionInfo={"status": "ok", "timestamp": 1570049471615, "user_tz": 420, "elapsed": 12807, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
model_ft = models.resnet50(pretrained=True)
model_ft.to('cuda:0')
num_ftrs = model_ft.fc.in_features
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names))
model_ft.fc = nn.Linear(num_ftrs, len(class_names))
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# observe that all params are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# optimizer_ft = torch.optim.Adam(model_ft.parameters(), lr=0.001, betas=(0.9, 0.999),
# eps=1e-08, weight_decay=0, amsgrad=False)
# decay lr by a factor of 0.1 every default(7 epochs)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=8, gamma=0.1)
# + id="OFGqqj417xg2" colab_type="code" colab={}
# # !pip uninstall pillow -y
# # !pip uninstall albumentations -y
# + id="8vNNMuOh77rx" colab_type="code" colab={}
# # !pip install pillow==6
# # !pip install albumentations==0.3.3
# + id="-oqAEqp02qTB" colab_type="code" outputId="02665c62-62b0-4812-da6c-82314ef8d2b0" executionInfo={"status": "ok", "timestamp": 1570063845762, "user_tz": 420, "elapsed": 2588553, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)
# + id="gN4bZHbX2k2t" colab_type="code" outputId="1de874d7-3b5d-476c-b142-7843bc76034e" executionInfo={"status": "ok", "timestamp": 1570048894033, "user_tz": 420, "elapsed": 2599, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 683}
visualize_model(model_ft)
# + id="jlRoFIiQF5Zo" colab_type="code" colab={}
torch.save(model_ft, '/content/torch_transfer_resnet_27CAT__20B_100219_no_crop_ClJit.pt')
# + id="7NchGjmJ1bmw" colab_type="code" outputId="ca67e753-d293-452a-b6b5-6dfe9abed378" executionInfo={"status": "ok", "timestamp": 1569438684226, "user_tz": 420, "elapsed": 1699, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# !pwd
# + id="RUr2ZXnmIWGJ" colab_type="code" colab={}
# !cp -r torch_transfer_resnet_25CAT__20B_092819.pt drive/My\ Drive/
# + id="GvGREJ_kJ136" colab_type="code" outputId="b1369e1b-e7b1-46bc-c073-a1d0ccf722bb" executionInfo={"status": "ok", "timestamp": 1569439131169, "user_tz": 420, "elapsed": 2565, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09622378695568340769"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Print model's state_dict
print("Model's state_dict:")
for param_tensor in model_ft.state_dict():
print(param_tensor, "\t", model_ft.state_dict()[param_tensor].size())
# Print optimizer's state_dict
print("Optimizer's state_dict:")
for var_name in optimizer_ft.state_dict():
print(var_name, "\t", optimizer_ft.state_dict()[var_name])
# + id="fP8lRige3aav" colab_type="code" colab={}
| Pytorch_ResNet50_Colab_Notebooks/27_category_transfer_learning_pytorch_100219_ImAug_no_crop_ClJit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# +
import time
import numpy as np
import msaview
import proteinsolver
# -
w = msaview.MSAView()
w
# +
batch_size = 100
for i in range(0, 1_000, batch_size):
values = []
for j in range(batch_size):
idx = i + j
values.append(
{
"id": str(idx),
"name": f"gen-{idx:05d}",
"seq": "".join(np.random.choice(proteinsolver.utils.AMINO_ACIDS, 300)),
}
)
w.value = values
time.sleep(0.5)
# -
w.value = [{"id": "5", "name": "boo", "seq": "DDDCCFFF", "ref": True}]
w.value = {"id": "0", "name": "hello", "seq": "CCC", "ref": False}
w.value = {"id": "0", "name": "boo", "seq": "CCC", "ref": True}
w.value = {"id": "2", "name": "boo", "seq": "CCC"}
w.value = {"id": "3", "name": "boo", "seq": "CCC", "ref": "true"}
w.value = {"id": "4", "name": "boo", "seq": "CCC", "ref": 1}
| examples/introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Window Splitters in Sktime
#
# In this notebook we describe the window splitters included in the [`sktime.forecasting.model_selection`](https://github.com/alan-turing-institute/sktime/blob/master/sktime/forecasting/model_selection/_split.py) module. These splitters can be combined with `ForecastingGridSearchCV` for model selection (see [forecasting notebook](https://github.com/alan-turing-institute/sktime/blob/master/examples/01_forecasting.ipynb)).
#
# **Remark:** It is important to emphasize that for cross-validation in time series we can not randomly shuffle the data as we would be leaking information.
#
# **References:**
# - [Cross-validation: evaluating estimator performance](https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation)
# - [Cross-validation for time series](https://robjhyndman.com/hyndsight/tscv/)
# ## Prepare Notebook
# +
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from matplotlib.ticker import MaxNLocator
from sktime.datasets import load_airline
from sktime.forecasting.base import ForecastingHorizon
from sktime.forecasting.model_selection import (
CutoffSplitter,
SingleWindowSplitter,
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.utils.plotting import plot_series
sns.set_style("whitegrid")
plt.rcParams["figure.figsize"] = [12, 6]
plt.rcParams["figure.dpi"] = 100
# -
# ## Data
#
# We use a fraction of the Box-Jenkins univariate airline data set, which shows the number of international airline passengers per month from 1949 - 1960.
# We are interested on a portion of the total data set.
# (for visualisatiion purposes)
y = load_airline().iloc[:30]
y.head()
fig, ax = plot_series(y)
# ## Window Splitters
#
# Now we describe each of the splitters.
# ### - `temporal_train_test_split`
#
# This one splits the data into a traininig and test sets. You can either (i) set the size of the training or test set or (ii) use a forecasting horizon.
# setting test set size
y_train, y_test = temporal_train_test_split(y=y, test_size=0.25)
fig, ax = plot_series(y_train, y_test, labels=["y_train", "y_test"]);
# using forecasting horizon
fh = ForecastingHorizon([1, 2, 3, 4, 5])
y_train, y_test = temporal_train_test_split(y, fh=fh)
plot_series(y_train, y_test, labels=["y_train", "y_test"]);
# ### - `SingleWindowSplitter`
#
# This class splits the time series once into a training and test window. Note that this is very similar to `temporal_train_test_split`.
#
# Let us define the parameters of our fold:
# Splitter Parameters.
window_length = 10
fh = ForecastingHorizon([1, 2, 3])
fh_length = len(fh)
# +
cv = SingleWindowSplitter(window_length=window_length, fh=fh)
n_splits = cv.get_n_splits(y)
print(f"Number of Folds = {n_splits}")
# -
# Let us plot the unique fold generated. First we define some helper functions:
# +
def get_folds_arrays(y, cv):
"""Store folds as arrays."""
n_splits = cv.get_n_splits(y)
windows = np.empty((n_splits, window_length), dtype=np.int)
fhs = np.empty((n_splits, fh_length), dtype=np.int)
for i, (w, f) in enumerate(cv.split(y)):
windows[i] = w
fhs[i] = f
return windows, fhs
def get_y(length, split):
"""Creates a constant level vector based on the split."""
return np.ones(length) * split
# -
# Now we generate the plot:
# +
windows, fhs = get_folds_arrays(y, cv)
window_color, fh_color = sns.color_palette("colorblind")[:2]
fig, ax = plt.subplots()
for i in range(n_splits):
ax.plot(np.arange(len(y)), get_y(len(y), i), marker="o", c="lightgray")
ax.plot(
windows[i], get_y(window_length, i), marker="o", c=window_color, label="Window"
)
ax.plot(
fhs[i], get_y(fh_length, i), marker="o", c=fh_color, label="Forecasting horizon"
)
ax.invert_yaxis()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set(
title="SingleWindowSplitter Fold",
ylabel="Window number",
xlabel="Time",
xticklabels=y.index,
ylim=(-1, 1),
)
# remove duplicate labels/handles
handles, labels = [(leg[:2]) for leg in ax.get_legend_handles_labels()]
ax.legend(handles, labels);
# -
# ### - `SlidingWindowSplitter`
#
# This splitter generates folds which move with time. The length of the training and test sets for each fold remains constant.
# +
cv = SlidingWindowSplitter(window_length=window_length, fh=fh, start_with_window=True)
n_splits = cv.get_n_splits(y)
print(f"Number of Folds = {n_splits}")
# +
windows, fhs = get_folds_arrays(y, cv)
fig, ax = plt.subplots()
for i in range(n_splits):
ax.plot(np.arange(len(y)), get_y(len(y), i), marker="o", c="lightgray")
ax.plot(
windows[i], get_y(window_length, i), marker="o", c=window_color, label="Window"
)
ax.plot(
fhs[i], get_y(fh_length, i), marker="o", c=fh_color, label="Forecasting horizon"
)
ax.invert_yaxis()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set(
title="SlidingWindowSplitter Folds",
ylabel="Window number",
xlabel="Time",
xticklabels=y.index,
)
# remove duplicate labels/handles
handles, labels = [(leg[:2]) for leg in ax.get_legend_handles_labels()]
ax.legend(handles, labels);
# -
# ### - `CutoffSplitter`
#
# With this splitter we can manually select the cutoff points.
# +
# Specify cutoff points (by array index).
cutoffs = np.array([10, 15, 20, 25])
cv = CutoffSplitter(cutoffs=cutoffs, window_length=window_length, fh=fh)
n_splits = cv.get_n_splits(y)
print(f"Number of Folds = {n_splits}")
# +
windows, fhs = get_folds_arrays(y, cv)
fig, ax = plt.subplots()
for i in range(n_splits):
ax.plot(np.arange(len(y)), get_y(len(y), i), marker="o", c="lightgray")
ax.plot(
windows[i], get_y(window_length, i), marker="o", c=window_color, label="Window"
)
ax.plot(
fhs[i], get_y(fh_length, i), marker="o", c=fh_color, label="Forecasting horizon"
)
ax.invert_yaxis()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set(
title="CutoffSplitter Folds",
ylabel="Window number",
xlabel="Time",
xticklabels=y.index,
)
# remove duplicate labels/handles
handles, labels = [(leg[:2]) for leg in ax.get_legend_handles_labels()]
ax.legend(handles, labels);
# -
| examples/window_splitters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
language = 'Python'
if language == 'Python':
print('Conditional was True')
# +
language = 'Java'
if language == 'Python':
print('Language is Python')
elif language == 'Java':
print('Language is Java')
elif language == 'JavaScript':
print('Language is Java')
else:
print('No Match')
# +
user = 'Admin'
logged_in = False
if user == 'Admin' or logged_in:
print('Admin Page')
else:
print('Bad Creds')
# +
user = 'Admin'
logged_in = False
if not logged_in:
print('Plese Log In')
else:
print('Welcome')
# + tags=[]
a = [1,2,3]
b = a
print(id(a))
print(id(b))
print(id(a) == id(b))
# +
condition = 'Test'
if condition:
print('Evaluated to True')
else:
print('Evaluated to False')
# -
| notebooks/Conditionals_and_Booleans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="3UxE7A9mDc1B"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# + id="Q5nH3nenDmZr" outputId="15feaf25-a396-4d01-a391-b067e6894851" colab={"base_uri": "https://localhost:8080/", "height": 419}
dataset=pd.read_csv('car_evaluation.csv')
dataset.columns =['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'class']
dataset['doors']=dataset['doors'].replace(['5more'],5)
dataset['persons']=dataset['persons'].replace(['more'],5)
df=dataset.dropna(how='any')
df
# + id="in9cwRp9FLfg"
X=df.iloc[:,:-1].values
y=df.iloc[:,-1].values
# + id="4g2LV65WFTVW"
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
X[:,0]=le.fit_transform(X[:,0])
X[:,1]=le.fit_transform(X[:,1])
X[:,4]=le.fit_transform(X[:,4])
X[:,5]=le.fit_transform(X[:,5])
y=le.fit_transform(y)
# + id="MQEuKvl3GOfV"
y=y.reshape(len(y),1)
# + id="6cizE9F6GcRR"
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0)
# + id="CR91rK5MG3Fa" outputId="93be79ce-10e5-4dd3-a9b3-fb2c1486445f" colab={"base_uri": "https://localhost:8080/"}
from sklearn.tree import DecisionTreeClassifier
DTC=DecisionTreeClassifier(criterion='entropy',random_state=0)
DTC.fit(X_train,y_train)
# + id="cKyY6hw3HWGN" outputId="f63816d3-43cd-4b1f-96bf-f09efca1c5c0" colab={"base_uri": "https://localhost:8080/"}
y_pred = DTC.predict(X_test)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# + id="xzeovoy9Hr6q" outputId="50906b9c-a26e-4929-ef7e-055031f51fe7" colab={"base_uri": "https://localhost:8080/"}
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred)*100)
| DTC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import scipy.ndimage as snd
import numpy as np
import scipy
import matplotlib.pyplot as plt
import skimage.measure
import os
import json
from bimmquant.utils import tools
# -
# # Artificial data generation
#
# In this notebook, we generate an artificial dataset that can be used as input for the notebook `examples/art_data_quantification_and_segmentation.ipynb`. The data size is 300 x 300 x 50 voxels and contains two phases (materials).
#
# **Parameters:**
#
# * `sigma_b_0`: Determines image feature size (blob sizes)
# * `V1_target`: Target volume fraction
# * `I1`, `I2`: Phase intensities
# * `sigma_b`: blur level (image resolution)
# * `sigma_n`: noise level
#
# **Data generation:**
#
# First, a Gaussian filter (kernel std `sigma_b_0`) is applied to standard Gaussian noise (mean 0, std 1) (see the figure "Blurred Gaussian noise" below). This volume is then binarized to acheive the target volume fraction `V1_target` (figure "Binarized" below) and intensities are assigned to the two phases according to `I1` and `I2`. This is the final ground truth phantom, where ground truth volume fractions are found by counting voxels, and interface area is measured using marching cubes.
#
# Finally, to mimic the Gaussian blurring and noise resulting from a 3D X-ray CT scan, a Gaussian filter (kernel std `sigma_b`) is applied and Gaussian noise (std `sigma_n`) is added (figure "Final artificial data" below).
#
def generate_2phased_randomstruct(sigma_b_0, Nx, Ny, Nz, sigma_b, sigma_n, I1, I2, V1_target,
plot = False, seed = 999):
#Set random seed
np.random.seed(seed)
#Blurred Gaussian noise
vol_init = np.random.normal(0, 1, size=(Nx, Ny, Nz))
vol_blur=snd.gaussian_filter(vol_init, sigma=sigma_b_0)
if plot:
tools.plot_center_slices(vol_blur, colorbar=True, title='Blurred Gaussian noise')
#Bizarize to reach target volume fractions
binarize_level=scipy.stats.norm.ppf(V1_target, loc=0, scale=np.std(vol_blur)) #inverse of cdf
vol_bin=(vol_blur>binarize_level)*1
if plot:
tools.plot_center_slices(vol_bin, colorbar=True, title='Binarized')
# Measure final volume fractions
V1 = np.sum(vol_bin==0)/len(vol_bin.ravel())
V2 = np.sum(vol_bin==1)/len(vol_bin.ravel())
print('Volume fractions:')
print('V1= ', V1, '\n V2 = ', V2)
# Measure interface area
verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(vol_bin, binarize_level)
A_march=skimage.measure.mesh_surface_area(verts, faces)
print('Interface area, marching cubes: \n A_march = ', A_march)
print('Interface area per volume: A_march/(Nx*Ny*Nz) = ', A_march/(Nx*Ny*Nz))
# Set phase intensities
vol_I = vol_bin.astype(np.float64)
vol_I[vol_bin == 0] = I1
vol_I[vol_bin == 1] = I2
# Blur and add noise
vol = snd.gaussian_filter(vol_I, sigma=sigma_b) + np.random.normal(0, sigma_n, size=np.shape(vol_bin))
if plot:
tools.plot_center_slices(vol, title='Final artificial data')
plt.figure()
_=plt.hist(vol.ravel(), bins=100)
ground_truth_dict = {}
ground_truth_dict['I'] = [I1, I2]
ground_truth_dict['sigma_b'] = sigma_b
ground_truth_dict['sigma_n'] = sigma_n
ground_truth_dict['V'] = [V1, V2]
ground_truth_dict['A_per_volume'] = A_march/(Nx*Ny*Nz)
return vol, ground_truth_dict
# +
Nx, Ny, Nz = 300, 300, 50 #volume size
sigma_b_0 = 20
V1_target = 0.5
I1 = 0.1
I2 = 0.9
sigma_b = 3.
sigma_n = 0.1
# -
vol, ground_truth_dict = generate_2phased_randomstruct(sigma_b_0, Nx, Ny, Nz, sigma_b, sigma_n, I1, I2, V1_target,
plot=True, seed = 999)
# ## Save dataset and ground truth values
#
# +
savepath = os.path.join('example_data', 'artificial_data')
os.makedirs(savepath)
# Artificial data, .npy
np.save(os.path.join(savepath, 'art_data_2phases.npy'), vol)
# Ground truth parameters, .json
json.dump( ground_truth_dict, open( os.path.join(savepath, 'art_data_2phases_GT.json'), 'w' ) )
| examples/art_data_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mutual Funds
# https://www.nerdwallet.com/blog/investing/what-are-the-different-types-of-mutual-funds/
# Equity funds
# Bond funds
# Money market funds
# Balanced funds
# Index funds
# + outputHidden=false inputHidden=false
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
import warnings
warnings.filterwarnings("ignore")
# fix_yahoo_finance is used to fetch data
import fix_yahoo_finance as yf
yf.pdr_override()
# + outputHidden=false inputHidden=false
# input
symbols = ['SPY','FIHBX','FBTAX','DBC']
start = '2014-01-01'
end = '2019-01-01'
# Read data
dataset = yf.download(symbols,start,end)['Adj Close']
# View Columns
dataset.head()
# + outputHidden=false inputHidden=false
dataset.tail()
# -
# ## Starting Cash with 100k to invest in Mutual Funds
# + outputHidden=false inputHidden=false
Cash = 100000
print('Percentage of invest:')
percent_invest = [0.25, 0.25, 0.25, 0.25]
for i, x in zip(dataset.columns, percent_invest):
cost = x * Cash
print('{}: {}'.format(i, cost))
# + outputHidden=false inputHidden=false
print('Number of Shares:')
percent_invest = [0.25, 0.25, 0.25, 0.25]
for i, x, y in zip(dataset.columns, percent_invest, dataset.iloc[0]):
cost = x * Cash
shares = int(cost/y)
print('{}: {}'.format(i, shares))
# + outputHidden=false inputHidden=false
print('Beginning Value:')
percent_invest = [0.25, 0.25, 0.25, 0.25]
for i, x, y in zip(dataset.columns, percent_invest, dataset.iloc[0]):
cost = x * Cash
shares = int(cost/y)
Begin_Value = round(shares * y, 2)
print('{}: ${}'.format(i, Begin_Value))
# + outputHidden=false inputHidden=false
print('Current Value:')
percent_invest = [0.25, 0.25, 0.25, 0.25]
for i, x, y, z in zip(dataset.columns, percent_invest, dataset.iloc[0], dataset.iloc[-1]):
cost = x * Cash
shares = int(cost/y)
Current_Value = round(shares * z, 2)
print('{}: ${}'.format(i, Current_Value))
# + outputHidden=false inputHidden=false
result = []
for i, x, y, z in zip(dataset.columns, percent_invest, dataset.iloc[0], dataset.iloc[-1]):
cost = x * Cash
shares = int(cost/y)
Current_Value = round(shares * z, 2)
result.append(Current_Value)
print('Total Value: $%s' % round(sum(result),2))
# + outputHidden=false inputHidden=false
# Calculate Daily Returns
returns = dataset.pct_change()
returns = returns.dropna()
# + outputHidden=false inputHidden=false
# Calculate mean returns
meanDailyReturns = returns.mean()
print(meanDailyReturns)
# + outputHidden=false inputHidden=false
# Calculate std returns
stdDailyReturns = returns.std()
print(stdDailyReturns)
# + outputHidden=false inputHidden=false
# Define weights for the portfolio
weights = np.array([0.50, 0.10, 0.20, 0.20])
# + outputHidden=false inputHidden=false
# Calculate the covariance matrix on daily returns
cov_matrix = (returns.cov())*250
print (cov_matrix)
# + outputHidden=false inputHidden=false
# Calculate expected portfolio performance
portReturn = np.sum(meanDailyReturns*weights)
# + outputHidden=false inputHidden=false
# Print the portfolio return
print(portReturn)
# + outputHidden=false inputHidden=false
# Create portfolio returns column
returns['Portfolio'] = returns.dot(weights)
# + outputHidden=false inputHidden=false
returns.head()
# + outputHidden=false inputHidden=false
returns.tail()
# + outputHidden=false inputHidden=false
# Calculate cumulative returns
daily_cum_ret=(1+returns).cumprod()
print(daily_cum_ret.tail())
# + outputHidden=false inputHidden=false
returns['Portfolio'].hist()
plt.show()
# + outputHidden=false inputHidden=false
import matplotlib.dates
# Plot the portfolio cumulative returns only
fig, ax = plt.subplots()
ax.plot(daily_cum_ret.index, daily_cum_ret.Portfolio, color='purple', label="portfolio")
ax.xaxis.set_major_locator(matplotlib.dates.YearLocator())
plt.legend()
plt.show()
# + outputHidden=false inputHidden=false
# Print the mean
print("mean : ", returns['Portfolio'].mean()*100)
# Print the standard deviation
print("Std. dev: ", returns['Portfolio'].std()*100)
# Print the skewness
print("skew: ", returns['Portfolio'].skew())
# Print the kurtosis
print("kurt: ", returns['Portfolio'].kurtosis())
# + outputHidden=false inputHidden=false
# Calculate the standard deviation by taking the square root
port_standard_dev = np.sqrt(np.dot(weights.T, np.dot(weights, cov_matrix)))
# Print the results
print(str(np.round(port_standard_dev, 4) * 100) + '%')
# + outputHidden=false inputHidden=false
# Calculate the portfolio variance
port_variance = np.dot(weights.T, np.dot(cov_matrix, weights))
# Print the result
print(str(np.round(port_variance, 4) * 100) + '%')
# + outputHidden=false inputHidden=false
# Calculate total return and annualized return from price data
total_return = returns['Portfolio'][-1] - returns['Portfolio'][0]
# Annualize the total return over 5 year
annualized_return = ((total_return + 1)**(1/5)) - 1
# + outputHidden=false inputHidden=false
annualized_return
# + outputHidden=false inputHidden=false
# Calculate annualized volatility from the standard deviation
vol_port = returns['Portfolio'].std() * np.sqrt(252)
# + outputHidden=false inputHidden=false
# Calculate the Sharpe ratio
rf = 0.01
sharpe_ratio = ((annualized_return - rf) / vol_port)
print(sharpe_ratio)
# + outputHidden=false inputHidden=false
# Calculate the Sharpe ratio
# Different way
rf = 0.01
sharpe_ratio = (returns['Portfolio'].mean() - rf) / (returns['Portfolio'].std()*np.sqrt(252))
print(round(sharpe_ratio,4))
# + outputHidden=false inputHidden=false
# Create a downside return column with the negative returns only
target = 0
downside_returns = returns.loc[returns['Portfolio'] < target]
# Calculate expected return and std dev of downside
expected_return = returns['Portfolio'].mean()
down_stdev = downside_returns.std()
# Calculate the sortino ratio
rf = 0.01
sortino_ratio = (expected_return - rf)/down_stdev
# Print the results
print("Expected return: ", expected_return*100)
print('-' * 50)
print("Downside risk:")
print(down_stdev*100)
print('-' * 50)
print("Sortino ratio:")
print(sortino_ratio)
# + outputHidden=false inputHidden=false
# Calculate the max value
roll_max = returns['Portfolio'].rolling(center=False,min_periods=1,window=252).max()
# Calculate the daily draw-down relative to the max
daily_draw_down = returns['Portfolio']/roll_max - 1.0
# Calculate the minimum (negative) daily draw-down
max_daily_draw_down = daily_draw_down.rolling(center=False,min_periods=1,window=252).min()
# Plot the results
plt.figure(figsize=(15,15))
plt.plot(returns.index, daily_draw_down, label='Daily drawdown')
plt.plot(returns.index, max_daily_draw_down, label='Maximum daily drawdown in time-window')
plt.legend()
plt.show()
# + outputHidden=false inputHidden=false
plt.figure(figsize=(7,7))
corr = returns.corr()
# plot the heatmap
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns,
cmap="Blues")
# + outputHidden=false inputHidden=false
# Box plot
returns.plot(kind='box')
# + outputHidden=false inputHidden=false
rets = returns.dropna()
plt.scatter(rets.mean(), rets.std(),alpha = 0.5)
plt.title('Stocks Risk & Returns')
plt.xlabel('Expected returns')
plt.ylabel('Risk')
plt.grid(which='major')
for label, x, y in zip(rets.columns, rets.mean(), rets.std()):
plt.annotate(
label,
xy = (x, y), xytext = (50, 50),
textcoords = 'offset points', ha = 'right', va = 'bottom',
arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=-0.3'))
# + outputHidden=false inputHidden=false
area = np.pi*20.0
sns.set(style='darkgrid')
plt.figure(figsize=(12,8))
plt.scatter(rets.mean(), rets.std(), s=area)
plt.xlabel("Expected Return", fontsize=15)
plt.ylabel("Risk", fontsize=15)
plt.title("Return vs. Risk for Core and Satellite", fontsize=20)
for label, x, y in zip(rets.columns, rets.mean(), rets.std()) :
plt.annotate(label, xy=(x,y), xytext=(50, 0), textcoords='offset points',
arrowprops=dict(arrowstyle='-', connectionstyle='bar,angle=180,fraction=-0.2'),
bbox=dict(boxstyle="round", fc="w"))
# + outputHidden=false inputHidden=false
print("Stock returns: ")
print(rets.mean())
print('-' * 50)
print("Stock risk:")
print(rets.std())
# + outputHidden=false inputHidden=false
table = pd.DataFrame()
table['Returns'] = rets.mean()
table['Risk'] = rets.std()
table.sort_values(by='Returns')
# + outputHidden=false inputHidden=false
table.sort_values(by='Risk')
# + outputHidden=false inputHidden=false
rf = 0.001
table['Sharpe_Ratio'] = ((table['Returns'] - rf) / table['Risk']) * np.sqrt(252)
table
| src/reference/Python_Stock/Portfolio_Strategies/Mutual_Funds_Portfolio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests as rq
import json
response = rq.get('http://api.open-notify.org/iss-now.json')
print(response.status_code)
print(response.text)
parameters = {"lat": 38.81, "lon": -77.04}
response_new = rq.get('http://api.open-notify.org/iss-now.json', params = parameters)
response_new.text
| module2-consuming-data-from-an-api/space.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Band Excitation data procesing using pycroscopy
# #### <NAME>, <NAME>, <NAME>
# The Center for Nanophase Materials Science and The Institute for Functional Imaging for Materials <br>
# Oak Ridge National Laboratory<br>
# 2/10/2017
#
#
# ## cKPFM analysis
# #### <NAME>
# University College Dublin<br>
# 7/4/2017
# 
#
# Image courtesy of <NAME> from the [neutron imaging](https://github.com/neutronimaging/python_notebooks) GitHub repository.
# ## Configure the notebook
# Make sure needed packages are installed and up-to-date
import sys
# !conda install --yes --prefix {sys.prefix} numpy scipy matplotlib scikit-learn Ipython ipywidgets h5py
# !{sys.executable} -m pip install -U --no-deps sidpy bglib
# +
# Ensure python 3 compatibility
from __future__ import division, print_function, absolute_import
# Import necessary libraries:
# General utilities:
import sys
import os
import math
# Computation:
import numpy as np
import h5py
# Visualization:
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import ipywidgets as widgets
from IPython.display import display, HTML
# Finally, BGlib itself
sys.path.append('..')
from BGlib import be as belib
import pyUSID as usid
import sidpy
# Make Notebook take up most of page width
display(HTML(data="""
<style>
div#notebook-container { width: 95%; }
div#menubar-container { width: 65%; }
div#maintoolbar-container { width: 99%; }
</style>
"""))
# -
# set up notebook to show plots within the notebook
# %matplotlib notebook
# ## Set some basic parameters for computation
# This notebook performs some functional fitting whose duration can be substantially decreased by using more memory and CPU cores. We have provided default values below but you may choose to change them if necessary.
max_mem = 1024*8 # Maximum memory to use, in Mbs. Default = 1024
max_cores = None # Number of logical cores to use in fitting. None uses all but 2 available cores.
# ## Make the data pycroscopy compatible
# Converting the raw data into a pycroscopy compatible hierarchical data format (HDF or .h5) file gives you access to the fast fitting algorithms and powerful analysis functions within pycroscopy
#
# #### H5 files:
# * are like smart containers that can store matrices with data, folders to organize these datasets, images, metadata like experimental parameters, links or shortcuts to datasets, etc.
# * are readily compatible with high-performance computing facilities
# * scale very efficiently from few kilobytes to several terabytes
# * can be read and modified using any language including Python, Matlab, C/C++, Java, Fortran, Igor Pro, etc.
#
# #### You can load either of the following:
# * Any .mat or .txt parameter file from the original experiment
# * A .h5 file generated from the raw data using pycroscopy - skips translation
#
# You can select desired file type by choosing the second option in the pull down menu on the bottom right of the file window
# +
input_file_path = r'C:/Users/yla/Google Drive/PD time files/User Work/SimonFraser_ZuoGuangYe/sample2/BEcKPFM_2um_0012.h5'
(data_dir, filename) = os.path.split(input_file_path)
if input_file_path.endswith('.h5'):
# No translation here
h5_path = input_file_path
force = False # Set this to true to force patching of the datafile.
tl = belib.translators.LabViewH5Patcher()
tl.translate(h5_path, force_patch=force)
else:
# Set the data to be translated
data_path = input_file_path
(junk, base_name) = os.path.split(data_dir)
# Check if the data is in the new or old format. Initialize the correct translator for the format.
if base_name == 'newdataformat':
(junk, base_name) = os.path.split(junk)
translator = belib.translators.BEPSndfTranslator(max_mem_mb=max_mem)
else:
translator = belib.translators.BEodfTranslator(max_mem_mb=max_mem)
if base_name.endswith('_d'):
base_name = base_name[:-2]
# Translate the data
h5_path = translator.translate(data_path, show_plots=True, save_plots=False)
h5_file = h5py.File(h5_path, 'r+')
print('Working on:\n' + h5_path)
h5_main = usid.hdf_utils.find_dataset(h5_file, 'Raw_Data')[0]
# -
# ##### Inspect the contents of this h5 data file
# The file contents are stored in a tree structure, just like files on a conventional computer.
# The data is stored as a 2D matrix (position, spectroscopic value) regardless of the dimensionality of the data. Thus, the positions will be arranged as row0-col0, row0-col1.... row0-colN, row1-col0.... and the data for each position is stored as it was chronologically collected
#
# The main dataset is always accompanied by four ancillary datasets that explain the position and spectroscopic value of any given element in the dataset.
# +
print('Datasets and datagroups within the file:\n------------------------------------')
usid.hdf_utils.print_tree(h5_file)
print('\nThe main dataset:\n------------------------------------')
print(h5_main)
print('\nThe ancillary datasets:\n------------------------------------')
print(h5_file['/Measurement_000/Channel_000/Position_Indices'])
print(h5_file['/Measurement_000/Channel_000/Position_Values'])
print(h5_file['/Measurement_000/Channel_000/Spectroscopic_Indices'])
print(h5_file['/Measurement_000/Channel_000/Spectroscopic_Values'])
print('\nMetadata or attributes in a datagroup\n------------------------------------')
for key in h5_file['/Measurement_000'].attrs:
print('{} : {}'.format(key, h5_file['/Measurement_000'].attrs[key]))
# -
# ## Get some basic parameters from the H5 file
# This information will be vital for futher analysis and visualization of the data
# +
h5_pos_inds = h5_main.h5_pos_inds
pos_dims = h5_main.pos_dim_sizes
pos_labels = h5_main.pos_dim_labels
print(pos_labels, pos_dims)
parm_dict = h5_file['/Measurement_000'].attrs
is_ckpfm = True
num_write_steps = parm_dict['VS_num_DC_write_steps']
num_read_steps = parm_dict['VS_num_read_steps']
num_fields = 2
# -
# ## Visualize the raw data
# Use the sliders below to visualize spatial maps (2D only for now), and spectrograms.
# For simplicity, all the spectroscopic dimensions such as frequency, excitation bias, cycle, field, etc. have been collapsed to a single slider.
fig = belib.viz.be_viz_utils.jupyter_visualize_be_spectrograms(h5_main)
h5_main
# ## Fit the Band Excitation (BE) spectra
# Fit each of the acquired spectra to a simple harmonic oscillator (SHO) model to extract the following information regarding the response:
# * Oscillation amplitude
# * Phase
# * Resonance frequency
# * Quality factor
#
# By default, the cell below will take any previous result instead of re-computing the SHO fit
# +
sho_fit_points = 5 # The number of data points at each step to use when fitting
sho_override = False # Force recompute if True
results_to_new_file = False
h5_sho_targ_grp = None
if results_to_new_file:
h5_sho_file_path = os.path.join(folder_path,
h5_raw_file_name.replace('.h5', '_sho_fit.h5'))
print('\n\nSHO Fits will be written to:\n' + h5_sho_file_path + '\n\n')
f_open_mode = 'w'
if os.path.exists(h5_sho_file_path):
f_open_mode = 'r+'
h5_sho_file = h5py.File(h5_sho_file_path, mode=f_open_mode)
h5_sho_targ_grp = h5_sho_file
sho_fitter = belib.analysis.BESHOfitter(h5_main, cores=max_cores, verbose=False, h5_target_group=h5_sho_targ_grp)
sho_fitter.set_up_guess(guess_func=belib.analysis.be_sho_fitter.SHOGuessFunc.complex_gaussian,
num_points=sho_fit_points)
h5_sho_guess = sho_fitter.do_guess(override=sho_override)
sho_fitter.set_up_fit()
h5_sho_fit = sho_fitter.do_fit(override=sho_override)
h5_sho_grp = h5_sho_fit.parent
# -
# # cKPFM data analysis
#
# - Specify the output file path where the figures should be saved to.
# - Set high_voltage_amplf to 10 if a high voltage amplifier was used, otherwise set to 1
# +
# Specify output file path, default is the same directory as the data
output_file_path = data_dir
# If HV amplifier was used set high_voltage_amplf to 10, else to 1
high_voltage_amplf = 1
# -
# ## Remove phase offset
#
# If removing the instrumental phase offset is not desired, set phase_offset = 0
# +
Nd_mat = h5_sho_fit.get_n_dim_form()
print('Nd_mat shape = ', Nd_mat.shape)
phase_offset = Nd_mat[0, 0, 1, 0, 0]['Phase [rad]']
print('Phase offset [rad] = ', phase_offset)
Nd_mat[:,:,:,:,:]['Phase [rad]'] = Nd_mat[:,:,:,:,:]['Phase [rad]'] - phase_offset
# -
# ## Display cKPFM data
# The figure shows:
#
# - Read and write voltage steps with the selected step marked as a red dot
# - Response map at read and write voltage steps selected with sliders
# - Real component of the response at selected read and write steps and x/y coordinates <b> -> select option</b>
# - Response at each write step (x-axis) after each write pulse (color coded)
#
# The 'Save figure' button saves the displayed figure in the above specified output file path as tiff, png and eps file. Alternatively, figures can be copy pasted into other programs like Powerpoint or Word.
#
# #### Select display option by setting 'option = 1' or 'option = 2' in the second line of the code
# - Option 1: display response curves from the maximum write step to the minimum write step
# - Option 2: display response curves from all write voltages
def plot_line_family(axis, x_vec, line_family, line_names=None, label_prefix='', label_suffix='',
y_offset=0, show_cbar=False, **kwargs):
"""
Plots a family of lines with a sequence of colors
Parameters
----------
axis : matplotlib.axes.Axes object
Axis to plot the curve
x_vec : array-like
Values to plot against
line_family : 2D numpy array
family of curves arranged as [curve_index, features]
line_names : array-like
array of string or numbers that represent the identity of each curve in the family
label_prefix : string / unicode
prefix for the legend (before the index of the curve)
label_suffix : string / unicode
suffix for the legend (after the index of the curve)
y_offset : (optional) number
quantity by which the lines are offset from each other vertically (useful for spectra)
show_cbar : (optional) bool
Whether or not to show a colorbar (instead of a legend)
"""
x_vec = np.array(x_vec)
#assert x_vec.ndim == 1, 'x_vec must be a 1D array'
if not isinstance(line_family, list):
line_family = np.array(line_family)
assert line_family.ndim == 2, 'line_family must be a 2D array'
# assert x_vec.shape[1] == line_family.shape[1], \
# 'The size of the 2nd dimension of line_family must match with of x_vec, but line fam has shape {} whereas xvec has shape {}'.format(line_family.shape, x_vec.shape)
num_lines = line_family.shape[0]
if line_names is None:
# label_prefix = 'Line '
line_names = [str(line_ind) for line_ind in range(num_lines)]
line_names = ['{} {} {}'.format(label_prefix, cur_name, label_suffix) for cur_name in line_names]
print("Line family shape is {}".format(line_family.shape))
for line_ind in range(num_lines):
colors = plt.cm.get_cmap('jet', line_family.shape[-1])
axis.plot(x_vec, line_family[line_ind] + line_ind * y_offset,
color = colors(line_ind),
)
if show_cbar:
# put back the cmap parameter:
kwargs.update({'cmap': cmap})
_ = cbar_for_line_plot(axis, num_lines, **kwargs)
# +
global option, save_figure
# Option 1: only show curves from maximum to minimum write voltage
# Option 2: Show all curves from the whole write voltage waveform
option = 1
save_figure = False # Save the figure after being generated if True
h5_sho_spec_inds = h5_sho_fit.h5_spec_inds
h5_sho_spec_vals = h5_sho_fit.h5_spec_vals
sho_spec_labels = h5_sho_fit.spec_dim_labels
pos_labels = h5_sho_fit.pos_dim_labels
num_fields = h5_sho_fit.spec_dim_sizes[h5_sho_fit.spec_dim_labels.index('Field')]
num_write_steps = h5_sho_fit.spec_dim_sizes[h5_sho_fit.spec_dim_labels.index('write_bias')]
num_read_steps = h5_sho_fit.spec_dim_sizes[h5_sho_fit.spec_dim_labels.index('read_bias')]
# It turns out that the read voltage index starts from 1 instead of 0
# Also the VDC indices are NOT repeating. They are just rising monotonically
write_volt_index = sho_spec_labels.index('write_bias')
read_volt_index = sho_spec_labels.index('read_bias')
h5_sho_spec_inds[read_volt_index, :] -= np.min(h5_sho_spec_inds[read_volt_index, :])
h5_sho_spec_inds[write_volt_index, :] = np.tile(np.repeat(np.arange(num_write_steps), num_fields), num_read_steps)
# Get the bias matrix:
bias_mat, _ = usid.hdf_utils.reshape_to_n_dims(h5_sho_spec_vals, h5_spec=h5_sho_spec_inds)
bias_vec_r_all = bias_mat[read_volt_index]*high_voltage_amplf
bias_vec_w_all = bias_mat[write_volt_index]*high_voltage_amplf
bias_vec_w = bias_vec_w_all.reshape(h5_sho_fit.spec_dim_sizes)[1, :, 1]
bias_vec_r = bias_vec_r_all.reshape(h5_sho_fit.spec_dim_sizes)[1, :, :]
# Option 1: only show curves from maximum to minimum write voltage:
if option == 1:
write_step_start = np.argmax(bias_vec_w)
write_step_end = np.argmin(bias_vec_w)
# ption 2: show all curves from the whole write voltage waveform
if option == 2:
write_step_start = 0
write_step_end = num_write_steps-1
bias_vec_r_display = np.transpose(bias_vec_r[write_step_start:write_step_end+1,:])
print('These are the labels', sho_spec_labels)
nd_labels = h5_sho_fit.n_dim_labels
num_read_steps = Nd_mat.shape[nd_labels.index('read_bias')]
num_write_steps = Nd_mat.shape[nd_labels.index('write_bias')]
num_row = Nd_mat.shape[nd_labels.index('Y')]
num_col = Nd_mat.shape[nd_labels.index('X')]
# Indices of initial data points to plot
row = 0
col = 0
field = 1
dc_step_read = 0
dc_step_write = 0
# Select color scale range for map here
cmin = -0.0001
cmax = 0.0001
# Get the read and write data to plot
resp_vec_w = Nd_mat[row,col,field,:,dc_step_read]['Amplitude [V]'] * np.cos(Nd_mat[row,col,field,:,dc_step_read]['Phase [rad]'])*1000
resp_mat_r = Nd_mat[:,:,1,dc_step_write,dc_step_read]['Amplitude [V]'] * np.cos(Nd_mat[:,:,1,dc_step_write,dc_step_read]['Phase [rad]'])*1000
resp_vec_r = np.squeeze(Nd_mat[:,:,1,write_step_start:write_step_end+1,:]['Amplitude [V]']
* np.cos(Nd_mat[:,:,1,write_step_start:write_step_end+1,:]['Phase [rad]'])*1000)
def make_figure(resp_mat_r, resp_vec_r, resp_vec_w, dc_step_read, dc_step_write, col, row):
global save_figure
plt.clf();
fig = plt.figure(figsize=(13, 9))
fig.set_facecolor('white')
ax_bias_w = plt.subplot2grid((20, 2), (0, 0), colspan=1, rowspan=3)
ax_bias_r = plt.subplot2grid((20, 2), (5, 0), colspan=1, rowspan=3)
ax_loop_w = plt.subplot2grid((20, 2), (0, 1), colspan=1, rowspan=7)
ax_loop_r = plt.subplot2grid((20, 2), (9, 1), colspan=1, rowspan=8)
ax_colorbar = plt.subplot2grid((20, 2), (19, 1), colspan=1, rowspan=1)
ax_map = plt.subplot2grid((20, 2), (10, 0), colspan=1, rowspan=11)
ax_bias_w.set_xlabel('Step', fontsize = 12)
ax_bias_w.set_ylabel('Write voltage [V]', fontsize = 12)
ax_bias_r.set_xlabel('Step', fontsize = 12)
ax_bias_r.set_ylabel('Read voltage [V]', fontsize = 12)
ax_loop_w.set_ylabel('Response [a.u.]', fontsize = 12)
ax_loop_w.set_xlabel('Write voltage [V]', fontsize = 12)
ax_loop_r.set_ylabel('Response [a.u.]', fontsize = 12)
ax_loop_r.set_xlabel('Read voltage [V]', fontsize = 12)
# Title saying read and write voltages
fig.suptitle('Read voltage = '+str(bias_vec_r[1,dc_step_read])+' V, Write voltage = ' +str(bias_vec_w[dc_step_write])+' V'
', x = '+str(col)+', y = '+str(row), fontsize = 14);
co_b = ax_map.imshow(resp_mat_r, cmap = sidpy.plot_utils.cmap_jet_white_center(), origin='upper',
interpolation='none');
cb = fig.colorbar(co_b)
#Graph of DC write voltage
ax_bias_w.plot(bias_vec_w,'b.');
ax_bias_w.plot(dc_step_write,bias_vec_w[dc_step_write],'r.');
ax_bias_w.set_ylim([np.min(bias_vec_w)-0.5, np.max(bias_vec_w)+0.5])
#Graph of DC read voltage
ax_bias_r.plot(np.transpose(bias_vec_r[1]),'b.');
ax_bias_r.plot(dc_step_read,np.transpose(bias_vec_r[1,dc_step_read]),'r.');
ax_bias_r.set_ylim([np.min(bias_vec_r)-0.5, np.max(bias_vec_r)+0.5])
#Graph of response loop (amplitude * cos(phase)) vs write voltage at selected x, y and read step
ax_loop_w.plot(bias_vec_w, resp_vec_w,'.-');
#Response loops (amplitude * cos(phase)) of all write voltage steps (color coded) vs read voltage at selected x, y
plot_line_family(ax_loop_r, bias_vec_r_display[:, :],
resp_vec_r, line_names='None',
label_prefix='Line', label_suffix='')
if option == 1:
colorbar_mat = np.column_stack((range(write_step_start,write_step_end+1),range(write_step_start,write_step_end+1)))
ax_colorbar.imshow(np.transpose(colorbar_mat[:,:]), cmap=plt.cm.jet, origin='lower', interpolation='none')
ax_colorbar.set_yticklabels('')
ax_colorbar.tick_params(axis = 'y', left = 'off', right = 'off')
plt.sca(ax_colorbar)
plt.xticks([0,write_step_end-write_step_start],[bias_vec_w[write_step_start],bias_vec_w[write_step_end]])
ax_colorbar.set_xlabel('Write voltage [V]', fontsize = 12)
if option == 2:
colorbar_mat = np.column_stack((range(0,num_write_steps-1),range(0,num_write_steps-1)))
ax_colorbar.imshow(np.transpose(colorbar_mat[:,:]), cmap=plt.cm.jet, origin='lower', interpolation='none')
ax_colorbar.set_yticklabels('')
ax_colorbar.tick_params(axis = 'y', left = 'off', right = 'off')
ax_colorbar.set_xlabel('Write voltage step', fontsize = 12)
if save_figure == True:
fig.savefig(output_file_path+'\cb_cKPFM_Vr'+str(dc_step_read)+'_Vw'+str(dc_step_write)+'_x='+str(col)+'_y='+str(row)+'.png', format='png')
fig.savefig(output_file_path+'\cb_cKPFM_Vr'+str(dc_step_read)+'_Vw'+str(dc_step_write)+'_x='+str(col)+'_y='+str(row)+'.eps', format='eps')
fig.savefig(output_file_path+'\cb_cKPFM_Vr'+str(dc_step_read)+'_Vw'+str(dc_step_write)+'_x='+str(col)+'_y='+str(row)+'.tif', format='tiff')
save_figure = False
def update_sho_plots(dc_step_read, dc_step_write, col, row, **kwargs):
resp_vec_w = Nd_mat[row,col,field,:,dc_step_read]['Amplitude [V]'] * np.cos(Nd_mat[row,col,field,:,dc_step_read]['Phase [rad]'])*1000
resp_vec_w = Nd_mat[row,col,field,:,dc_step_read]['Amplitude [V]'] * np.cos(Nd_mat[row,col,field,:,dc_step_read]['Phase [rad]'])*1000
resp_mat_r = Nd_mat[:,:,1,dc_step_write,dc_step_read]['Amplitude [V]'] * np.cos(Nd_mat[:,:,1,dc_step_write,dc_step_read]['Phase [rad]'])*1000
resp_vec_r = (Nd_mat[row,col,field,write_step_start:write_step_end+1,:]['Amplitude [V]']
* np.cos(Nd_mat[row,col,field,write_step_start:write_step_end+1,:]['Phase [rad]'])*1000)
make_figure(resp_mat_r, resp_vec_r, resp_vec_w, dc_step_read, dc_step_write, col, row)
def on_save_button_clicked(b):
global save_figure
save_figure = True
dc_step_read = dc_step_read_slider.value
dc_step_write = dc_step_write_slider.value
col = x_slider.value
row = y_slider.value
update_sho_plots(dc_step_read,dc_step_write, col, row)
slider_dict = dict()
dc_step_read_slider = widgets.IntSlider(min = 0, max = num_read_steps-1, step = 1,value = 4,
description = 'Read step',continuous_update = False);
dc_step_write_slider = widgets.IntSlider(min = 0, max = num_write_steps-1, step = 1,value = 0,
description = 'Write step', continuous_update = False);
x_slider = widgets.IntSlider(min = 0, max = num_col-1,step = 1,value = 0,
description='x',continuous_update = False);
y_slider = widgets.IntSlider(min = 0,max = num_row-1,step = 1,value = 0,
description = 'y',continuous_update = False);
widgets.interact(update_sho_plots,dc_step_read=dc_step_read_slider,dc_step_write=dc_step_write_slider, col = x_slider, row = y_slider, **slider_dict);
button = widgets.Button(description = 'Save figure')
display(button)
button.on_click(on_save_button_clicked)
# -
# ## Display averaged cKPFM data
#
# Figures show response at each write step (x-axis) after each write pulse (color coded) averaged over the whole map (left) and averaged over the area selected with x- and y-range sliders (right).
#
# #### Select display option by setting 'option = 1' or 'option = 2' in the first line of the code
# - Option 1: display response curves from the maximum write step to the minimum write step
# - Option 2: display response curves from all write voltages
# +
option = 1
# Option 1: only show curves from maximum to minimum write voltage:
if option == 1:
write_step_start = np.argmax(bias_vec_w)
write_step_end = np.argmin(bias_vec_w)
# Option 2: show all curves from the whole write voltage waveform
if option == 2:
write_step_start = 0
write_step_end = num_write_steps-1
bias_vec_r_display = np.transpose(bias_vec_r[write_step_start:write_step_end+1,:])
num_display_steps = bias_vec_r_display.shape[1]
global save_figure_vsr
save_figure_vsr = False
resp_mat_vsr = np.squeeze(Nd_mat[:,:,1,write_step_start:write_step_end+1,:]['Amplitude [V]']
* np.cos(Nd_mat[:,:,1,write_step_start:write_step_end+1,:]['Phase [rad]'])*1000)
# Calculate response at 0 V read voltage averaged over all pixels
resp_vsr_mean1 = np.nanmean(resp_mat_vsr,axis = 0)
resp_vsr_std1 = np.nanstd(resp_mat_vsr,axis = 0)
resp_vsr_mean = np.nanmean(resp_vsr_mean1,axis = 0)
resp_vsr_std = np.nanstd(resp_vsr_std1,axis = 0)
def make_figure_respvsr(resp_vsr_mean_range, resp_vsr_std_range, x_range, y_range):
global save_figure_vsr
fig_vsr = plt.figure(figsize=(12,5))
fig_vsr.clf()
fig_vsr.set_facecolor('white')
ax_plot = plt.subplot2grid((5, 2), (0, 0), colspan=1, rowspan=4)
ax_plot_range = plt.subplot2grid((5, 2), (0, 1), colspan=1, rowspan=4)
ax_colorbar = plt.subplot2grid((5, 2), (4, 0), colspan=1, rowspan=1)
ax_colorbar2 = plt.subplot2grid((5, 2), (4, 1), colspan=1, rowspan=1)
plot_line_family(ax_plot, bias_vec_r_display[:, :],
resp_vsr_mean[:, :], line_names='None',label_prefix='Line',
label_suffix='', cmap=plt.cm.jet)
plot_line_family(ax_plot_range, bias_vec_r_display[:, :],
resp_vsr_mean_range[:, :], line_names='None',
label_prefix='Line', label_suffix='', cmap=plt.cm.jet)
ax_plot.set_xlabel('Read voltage [V]', fontsize = 12)
ax_plot.set_ylabel('Response [a.u.]', fontsize = 12)
ax_plot.set_title('Averaged over whole map', fontsize = 12)
ax_plot_range.set_xlabel('Read voltage [V]', fontsize = 12)
ax_plot_range.set_ylabel('Response [a.u.]', fontsize = 12)
ax_plot_range.set_title('Averaged x = '+str(x_range[0])+' - '+str(x_range[1])
+', y = '+str(y_range[0])+' - '+str(y_range[1]), fontsize = 12)
if option == 1:
colorbar_mat = np.column_stack((range(write_step_start,write_step_end+1),range(write_step_start,write_step_end+1)))
ax_colorbar.imshow(np.transpose(colorbar_mat[:,:]), cmap=plt.cm.jet, origin='lower', interpolation='none')
ax_colorbar.set_yticklabels('')
ax_colorbar.tick_params(axis = 'y', left = 'off', right = 'off')
plt.sca(ax_colorbar)
plt.xticks([0,write_step_end-write_step_start],[bias_vec_w[write_step_start],bias_vec_w[write_step_end]])
ax_colorbar.set_xlabel('Write voltage [V]', fontsize = 12)
ax_colorbar2.imshow(np.transpose(colorbar_mat[:,:]), cmap=plt.cm.jet, origin='lower', interpolation='none')
ax_colorbar2.set_yticklabels('')
ax_colorbar2.tick_params(axis = 'y', left = 'off', right = 'off')
plt.sca(ax_colorbar2)
plt.xticks([0,write_step_end-write_step_start],[bias_vec_w[write_step_start],bias_vec_w[write_step_end]])
ax_colorbar2.set_xlabel('Write voltage [V]', fontsize = 12)
if option == 2:
colorbar_mat = np.column_stack((range(0,num_write_steps-1),range(0,num_write_steps-1)))
ax_colorbar.imshow(np.transpose(colorbar_mat[:,:]), cmap=plt.cm.jet, origin='lower', interpolation='none')
ax_colorbar.set_yticklabels('')
ax_colorbar.tick_params(axis = 'y', left = 'off', right = 'off')
ax_colorbar.set_xlabel('Write voltage step', fontsize = 12)
ax_colorbar2.imshow(np.transpose(colorbar_mat[:,:]), cmap=plt.cm.jet, origin='lower', interpolation='none')
ax_colorbar2.set_yticklabels('')
ax_colorbar2.tick_params(axis = 'y', left = 'off', right = 'off')
ax_colorbar2.set_xlabel('Write voltage step', fontsize = 12)
fig_vsr.tight_layout();
if save_figure_vsr == True:
fig_vsr.savefig(output_file_path+'\cbColor_Response_vsReadVoltage_x'+str(x_range)+'_y'+str(y_range)+'.png', format='png')
fig_vsr.savefig(output_file_path+'\cbColor_Response_vsReadVoltage_x'+str(x_range)+'_y'+str(y_range)+'.eps', format='eps')
fig_vsr.savefig(output_file_path+'\cbColor_Response_vsReadVoltage_x'+str(x_range)+'_y'+str(y_range)+'.tif', format='tiff')
save_figure_vsr = False
def update_xyrange(x_range, y_range):
# Calculate response averaged over selected pixels
resp_mat_vsr_range = np.squeeze(Nd_mat[y_range[0]:y_range[1], x_range[0]:x_range[1],1,write_step_start:write_step_end+1,:]['Amplitude [V]']
*np.cos(Nd_mat[y_range[0]:y_range[1], x_range[0]:x_range[1],1,write_step_start:write_step_end+1,:]['Phase [rad]'])*1000)
resp_vsr_mean1_range = np.nanmean(resp_mat_vsr_range,axis = 0)
resp_vsr_std1_range = np.nanstd(resp_mat_vsr_range,axis = 0)
resp_vsr_mean_range = np.nanmean(resp_vsr_mean1_range,axis = 0)
resp_vsr_std_range = np.nanstd(resp_vsr_std1_range,axis = 0)
make_figure_respvsr(resp_vsr_mean_range, resp_vsr_std_range, x_range, y_range)
x_range = widgets.IntRangeSlider(min = 0,max = num_col-1,step = 1,value = 0,
description = 'x-range',continuous_update = False)
y_range = widgets.IntRangeSlider(min = 0,max = num_col-1,step = 1,value = 0,
description = 'y-range',continuous_update = False)
widgets.interact(update_xyrange,x_range = x_range, y_range = y_range);
def on_save_button_clicked_vsr(b):
global save_figure_vsr
save_figure_vsr = True
update_xyrange(x_range.value, y_range.value)
button_vsr = widgets.Button(description = 'Save figure')
display(button_vsr)
button_vsr.on_click(on_save_button_clicked_vsr)
# -
# ## Export response after all write steps for all pixels
#
# Response data (\= amplitude * cos(phase)) is exported into separate tab delimited text files for each read voltage (see file names). The columns in the text file represent write voltage steps, the rows represent individual pixels.
# +
#Export response of all write steps (columns) for all pixels (rows) to one delimited text file for each read step
for dc_step_read in range(0,num_read_steps-1):
resp_mat = np.squeeze(Nd_mat[:,:,1,:,dc_step_read]['Amplitude [V]']
* np.cos(Nd_mat[:,:,1,:,dc_step_read]['Phase [rad]'])*1000)
resp_vec = np.reshape(resp_mat, (num_row*num_col, num_write_steps))
np.savetxt(os.path.join(output_file_path,'Response_at_Vread_'+str(bias_vec_r[0,dc_step_read])+'V.txt'),resp_vec, fmt='%f', delimiter='\t')
np.savetxt(os.path.join(output_file_path,'Read_voltage_vec.txt'), np.squeeze(bias_vec_r[0,:]), fmt='%f', delimiter='\t')
np.savetxt(os.path.join(output_file_path,'Write_voltage_vec.txt'), np.squeeze(bias_vec_w[:]), fmt='%f', delimiter='\t')
# -
# ## Display averaged response at 0 V read voltage
#
# The left figure shows response at 0 V read voltage averaged over the whole grid, the right figure shows response at 0 V read voltage averaged over the x- and y- range specified with sliders.
# +
# Plot response at 0 V read voltage averaged over all pixels
global save_figure_0V
save_figure_0V = False
dc_step_read = np.argwhere(np.isclose(bias_vec_r[0,:], 0)).squeeze()
resp_mat_0Vr = np.squeeze(Nd_mat[:,:,1,:,dc_step_read]['Amplitude [V]']
* np.cos(Nd_mat[:,:,1,:,dc_step_read]['Phase [rad]'])*1000)
# Calculate response at 0 V read voltage averaged over all pixels
resp_0V_mean1 = np.nanmean(resp_mat_0Vr,axis = 0)
resp_0V_std1 = np.nanstd(resp_mat_0Vr,axis = 0)
resp_0V_mean = np.nanmean(resp_0V_mean1,axis = 0)
resp_0V_std = np.nanstd(resp_0V_std1,axis = 0)
def make_figure_resp0V(resp_0V_mean_range, resp_0V_std_range, x_range, y_range):
global save_figure_0V
fig_r0V = plt.figure(figsize=(10,4))
fig_r0V.clf()
fig_r0V.set_facecolor('white')
ax_plot = plt.subplot2grid((1, 2), (0, 0), colspan=1, rowspan=1)
ax_plot_range = plt.subplot2grid((1, 2), (0, 1), colspan=1, rowspan=1)
ax_plot.plot(bias_vec_w[:], resp_0V_mean
, '.-')
ax_plot.errorbar(bias_vec_w[:], resp_0V_mean, yerr = resp_0V_std, fmt = '.-')
ax_plot.set_xlabel('Write voltage [V]', fontsize = 12)
ax_plot.set_ylabel('Response [a.u.]', fontsize = 12)
ax_plot.set_title('Averaged over whole map', fontsize = 12)
ax_plot_range.errorbar(bias_vec_w[:], resp_0V_mean_range, yerr = resp_0V_std_range, fmt = '.-')
ax_plot_range.set_xlabel('Write voltage [V]', fontsize = 12)
ax_plot_range.set_ylabel('Response [a.u.]', fontsize = 12)
ax_plot_range.set_title('Averaged from x = '+str(x_range[0])+'-'+str(x_range[1])
+', y = '+str(y_range[0])+'-'+str(y_range[1]), fontsize = 12)
fig_r0V.tight_layout();
fig_r0V.suptitle('Spatially averaged response at read voltage = 0 V', y = 1.05, x=0.55, fontsize = 12)
#save_figure_0V = True
if save_figure_0V == True:
fig_r0V.savefig(output_file_path+'\Response_0V_x'+str(x_range)+'_y'+str(y_range)+'.png', format='png')
fig_r0V.savefig(output_file_path+'\Response_0V_x'+str(x_range)+'_y'+str(y_range)+'.eps', format='eps')
fig_r0V.savefig(output_file_path+'\Response_0V_x'+str(x_range)+'_y'+str(y_range)+'.tif', format='tiff')
save_figure_0V = False
def update_xyrange(x_range, y_range):
# Calculate response at 0 V read voltage averaged over selected pixels
# a=Nd_mat[x_range[0]:x_range[1], y_range[0]:y_range[1],1,:,dc_step_read]['Amplitude [V]']
resp_mat_0Vr_range = np.squeeze(Nd_mat[y_range[0]:y_range[1], x_range[0]:x_range[1],1,:,dc_step_read]['Amplitude [V]']
*np.cos(Nd_mat[y_range[0]:y_range[1], x_range[0]:x_range[1],1,:,dc_step_read]['Phase [rad]'])*1000)
resp_0V_mean1_range = np.nanmean(resp_mat_0Vr_range,axis = 0)
resp_0V_std1_range = np.nanstd(resp_mat_0Vr_range,axis = 0)
resp_0V_mean_range = np.nanmean(resp_0V_mean1_range,axis = 0)
resp_0V_std_range = np.nanstd(resp_0V_std1_range,axis = 0)
print(resp_0V_mean_range, resp_0V_std_range, x_range, y_range)
make_figure_resp0V(resp_0V_mean_range, resp_0V_std_range, x_range, y_range)
x_range = widgets.IntRangeSlider(min = 0,max = num_col-1,step = 0.1,value = 0,
description = 'x-range',continuous_update = False)
y_range = widgets.IntRangeSlider(min = 0,max = num_col-1,step = 0.1,value = 0,
description = 'y-range',continuous_update = False)
widgets.interact(update_xyrange,x_range = x_range, y_range = y_range);
def on_save_button_clicked_0V(b):
global save_figure_0V
save_figure_0V = True
update_xyrange(x_range.value, y_range.value)
button_0V = widgets.Button(description = 'Save figure')
display(button_0V)
button_0V.on_click(on_save_button_clicked_0V)
# -
# ## Display response, amplitude and phase data at 0 V read voltage for a single pixel
#
# Select the pixel of interest below by setting x_select and y_select to the desired coordinates.
# +
x_select = 3
y_select = 2
resp_mat_0Vr = np.squeeze(Nd_mat[:,:,1,:,dc_step_read]['Amplitude [V]']
* np.cos(Nd_mat[:,:,1,:,dc_step_read]['Phase [rad]'])*1000)
amp_mat_0Vr = np.squeeze(Nd_mat[:,:,1,:,dc_step_read]['Amplitude [V]'])*1000
phase_mat_0Vr = np.squeeze(Nd_mat[:,:,1,:,dc_step_read]['Phase [rad]'])
fig_r0V = plt.figure(figsize=(12,4))
fig_r0V.clf()
fig_r0V.set_facecolor('white')
ax_resp = plt.subplot2grid((1, 3), (0, 0), colspan=1, rowspan=1)
ax_amp = plt.subplot2grid((1, 3), (0, 1), colspan=1, rowspan=1)
ax_phase = plt.subplot2grid((1, 3), (0, 2), colspan=1, rowspan=1)
ax_resp.plot(bias_vec_w[:], resp_mat_0Vr[y_select, x_select, :],'.-')
ax_resp.set_xlabel('Write voltage [V]', fontsize = 12)
ax_resp.set_ylabel('Response [a.u.]', fontsize = 12)
ax_amp.plot(bias_vec_w[:], amp_mat_0Vr[y_select, x_select, :],'.-')
ax_amp.set_xlabel('Write voltage [V]', fontsize = 12)
ax_amp.set_ylabel('Amplitude [a.u.]', fontsize = 12)
ax_phase.plot(bias_vec_w[:], phase_mat_0Vr[y_select, x_select, :],'.-')
ax_phase.set_xlabel('Write voltage [V]', fontsize = 12)
ax_phase.set_ylabel('Phase [rad]', fontsize = 12)
ax_phase.set_ylim([-4, 4])
fig_r0V.tight_layout()
# -
# ## Display on-field loops for all read steps
#
# The figure shows on-field response vs. write voltage for all read voltages (color coded) averaged over the whole map.
# +
resp_mat_IF = np.squeeze(Nd_mat[:,:,0,:,:]['Amplitude [V]']
* np.cos(Nd_mat[:,:,0,:,:]['Phase [rad]'])*1000)
# Calculate response at 0 V read voltage averaged over all pixels
resp_IF_mean1 = np.nanmean(resp_mat_IF,axis = 0)
resp_IF_std1 = np.nanstd(resp_mat_IF,axis = 0)
resp_IF_mean = np.nanmean(resp_IF_mean1,axis = 0)
resp_IF_std = np.nanstd(resp_IF_std1,axis = 0)
bias_vec_w_graph = np.matlib.repmat(bias_vec_w,num_read_steps, 1)
# print(resp_IF_mean.shape, resp_IF_std.shape, bias_vec_w_graph.shape)
fig_IF = plt.figure(figsize=(6,6))
fig_IF.clf()
fig_IF.set_facecolor('white')
ax_plot = plt.subplot2grid((15, 5), (0, 0), colspan=5, rowspan=12)
ax_colorbar = plt.subplot2grid((15, 5), (14, 0), colspan=5, rowspan=1)
px.plot_utils.plot_line_family(ax_plot, np.transpose(bias_vec_w_graph), np.transpose(resp_IF_mean), line_names='None',label_prefix='Line', label_suffix='', cmap=plt.cm.jet)
# ax_plot.plot(bias_vec_w[:], resp_IF_mean, '.-')
# ax_plot.errorbar(bias_vec_w[:], resp_IF_mean, yerr = resp_IF_std, fmt = '.-')
colorbar_mat = np.column_stack((range(0,num_read_steps),range(0,num_read_steps)))
ax_colorbar.imshow(np.transpose(colorbar_mat[:,:]), cmap=plt.cm.jet, origin='lower', interpolation='none', aspect = 0.25)
ax_colorbar.set_yticklabels('')
ax_colorbar.tick_params(axis = 'y', left = 'off', right = 'off')
# ax_colorbar.set_xlabel('Read step')
ax_plot.set_xlabel('Write voltage [V]', fontsize = 12)
ax_plot.set_ylabel('In field response [a.u.]', fontsize = 12)
ax_plot.set_title('Averaged over whole map', fontsize = 12)
labels = list(range(num_read_steps))
for i in range(0, num_read_steps-0, 1):
labels[i] = str(bias_vec_r[0,i])
ax_colorbar.set_xticks(np.arange(num_read_steps))
ax_colorbar.set_xticklabels(labels, fontsize = 10)
ax_colorbar.set_xlabel('Read voltage [V]', fontsize = 12)
# fig_IF.tight_layout();
def on_save_button4_clicked(b):
fig_IF.savefig(output_file_path+'\IFresponse.png', format='png')
fig_IF.savefig(output_file_path+'\IFresponse.eps', format='eps')
fig_IF.savefig(output_file_path+'\IFresponse.tif', format='tiff')
button4 = widgets.Button(description = 'Save figure')
display(button4)
button4.on_click(on_save_button4_clicked)
# -
# ## Calculate the jCPD
#
# The junction contact potential difference (jCPD) is extracted for each dc write voltage step by linear fitting of the response vs. read bias and calculating the x-intercept.
#
# If only a certain (linear) regime of the cKPFM curves should be fitted, set v_read_start_index and v_read_end_index to the first and last index of the data that should be considered for the fit.
# +
#Calculate x intercept -> jCPD
v_read_start_index = 0
v_read_end_index = num_read_steps-1
print('v_read_start_index = ', v_read_start_index, ', v_read_end_index = ', v_read_end_index)
# resp_mat_r_all_all = Nd_mat[:,:,1,:,:]['Amplitude [V]'] * np.cos(Nd_mat[:,:,1,:,:]['Phase [rad]'])*1000
resp_mat_r_all = Nd_mat[:,:,1,:,v_read_start_index : v_read_end_index+1]['Amplitude [V]'] * np.cos(Nd_mat[:,:,1,:,v_read_start_index : v_read_end_index+1]['Phase [rad]'])*1000
# print(num_read_steps)
# print(resp_mat_r_all.shape)
# print('resp_mat_r_all', resp_mat_r_all[0,0,0,:])
# print('resp_mat_r_all_all', resp_mat_r_all_all[0,0,0,:])
fit_slope = np.zeros((num_col, num_row, num_write_steps))
fit_yintercept = np.zeros((num_col, num_row, num_write_steps))
jCPD_mat = np.zeros((num_col, num_row, num_write_steps))
print(bias_vec_r[1,v_read_start_index : v_read_end_index+1])
# print(bias_vec_r[1,:])
for row in range(num_row):
for col in range(num_col):
for vw in range(0,num_write_steps-1):
fit_coeff = np.polyfit(bias_vec_r[1,v_read_start_index : v_read_end_index+1], resp_mat_r_all[col, row, vw,:],1)
fit_slope[col, row, vw] = fit_coeff[0]
fit_yintercept[col, row, vw] = fit_coeff[1]
jCPD_mat[col, row, vw] = -fit_coeff[1]/ fit_coeff[0]
# fit_y = fit_coeff[0] * bias_vec_r[1,:] + fit_coeff[1]
# row = 1
# col = 1
# vw = 10
# print(fit_slope[col, row, vw])
# print(fit_yintercept[col, row, vw])
# print(jCPD_mat[col ,row, vw])
# plt.figure(1)
# plt.plot(bias_vec_r[1,:],resp_mat_r_all[col, row, vw,:],'.-')
# plt.plot(bias_vec_r[1,:],fit_y)
# print(fit_coeff[0],fit_coeff[1],jCPD_mat[col, row, vw])
#Filter outliers from jCPD matrix, (-> improve code)
# for r in range(1, num_row):
# for c in range(1, num_col):
# for s in range(1, num_write_steps):
# if jCPD_mat[r-1, c-1, s-1] > 20 or jCPD_mat[r-1, c-1, s-1] < -20:
# # jCPD_mat[r-1, c-1, s-1] = np.nan
# jCPD_mat[r-1, c-1, s-1] = 0
# -
# ## Display jCPD data
# Figure shows
#
# - The jCPD map for the write voltage step selected with the slider
# - Graphs of jCPD cross sections along the selected x- and y- axes for the selected write voltage step
# +
global save_figure
save_figure = False
#Color scale minimum and maximum
cmin = -0.2
cmax = 0.02
def make_jCPD_figure(dc_step_write, x_select, y_select):
global save_figure
fig2 = plt.figure(figsize=(14,8))
fig2.set_facecolor('white')
ax_jCPD_xcross_section = plt.subplot2grid((10, 10), (0, 7), colspan=4, rowspan=3)
plt.title('jCPD cross section at x ='+str(x_select), fontsize = 12)
ax_jCPD_ycross_section = plt.subplot2grid((10, 10), (4, 7), colspan=4, rowspan=3)
plt.title('jCPD cross section at y ='+str(y_select), fontsize = 12)
ax_jCPD_plot = plt.subplot2grid((10, 10), (0, 0), colspan=6, rowspan=7)
plt.title('jCPD [V] at write voltage = '+str(bias_vec_w[dc_step_write])+' V', fontsize = 12)
ax_jCPD_xcross_section.plot(range(num_col),jCPD_mat[:,x_select,dc_step_write],'.-')
ax_jCPD_xcross_section.set_xlabel('Pixel #', fontsize = 12)
ax_jCPD_xcross_section.set_ylabel('jCPD [V]', fontsize = 12)
ax_jCPD_ycross_section.plot(range(num_row),jCPD_mat[y_select,:,dc_step_write],'.-')
ax_jCPD_ycross_section.set_xlabel('Pixel #', fontsize = 12)
ax_jCPD_ycross_section.set_ylabel('jCPD [V]', fontsize = 12)
# co_b2 = ax_jCPD_plot.imshow(jCPD_mat[:,:,dc_step_write],
# cmap=px.plot_utils.cmap_jet_white_center(),
# origin='lower', interpolation='none', clim=(cmin, cmax));
co_b2 = ax_jCPD_plot.imshow(jCPD_mat[:,:,dc_step_write],
cmap=usid.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
cb2 = fig2.colorbar(co_b2)
if save_figure == True:
fig2.savefig(output_file_path+'jCPD_map_WriteStep'+str(dc_step_write)+'y'+str(row)+'.png', format='png')
fig2.savefig(output_file_path+'jCPD_map_WriteStep'+str(dc_step_write)+'y'+str(row)+'.eps', format='eps')
save_figure = False;
slider_dict = dict()
dc_step_write_slider = widgets.IntSlider(min = 0, max = num_write_steps-1, step = 1,value = 0,description='Write Step');
x_slider = widgets.IntSlider(min = 0, max = num_col, step = 1, value = 0, description = 'x')
y_slider = widgets.IntSlider(min = 0, max = num_row, step = 1, value = 0, description = 'y')
widgets.interact(make_jCPD_figure, dc_step_write = dc_step_write_slider, x_select = x_slider, y_select = y_slider, **slider_dict);
def on_save_button2_clicked(b):
global save_figure
save_figure = True
dc_step_write = dc_step_write_slider.value
x_select = x_slider.value
y_select = y_slider.value
make_jCPD_figure(dc_step_write, x_select, y_select)
button2 = widgets.Button(description = 'Save figure')
display(button2)
button2.on_click(on_save_button2_clicked)
# -
# ## Display jCPD and fitting parameters
#
# Figure shows maps of
# - jCPD
# - slope
# - y-intercept
#
# for the write voltage step selected with slider.
# +
global save_figure
save_figure = False
cmin = -3
cmax = 3
def make_fitting_figure(dc_step_write):
global save_figure
fig3 = plt.figure(figsize=(10,3))
fig3.set_facecolor('white')
ax_jCPD_plot = plt.subplot(131)
ax_slope_plot = plt.subplot(132)
ax_yintercept_plot = plt.subplot(133)
ax_slope_plot.set_title('Slope [a.u.]', fontsize = 12)
im3 = ax_jCPD_plot.imshow(jCPD_mat[:,:,dc_step_write],
cmap=usid.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
# im3 = ax_jCPD_plot.imshow(jCPD_mat[:,:,dc_step_write],
# cmap=px.plot_utils.cmap_jet_white_center(),
# origin='upper', interpolation='none', clim=(cmin, cmax));
fig3.colorbar(im3, ax = ax_jCPD_plot)
ax_jCPD_plot.set_title('jCPD [V]', fontsize = 12)
im = ax_slope_plot.imshow(fit_slope[:,:,dc_step_write],
cmap=usid.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
fig3.colorbar(im, ax = ax_slope_plot)
im2 = ax_yintercept_plot.imshow(fit_yintercept[:,:,dc_step_write],
cmap=usid.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_yintercept_plot.set_title('y-intercept [a.u.]', fontsize = 12)
fig3.colorbar(im2, ax = ax_yintercept_plot)
#fig3.suptitle('Write voltage = '+str(bias_vec_w[dc_step_write])+' V', fontsize = 14, y = 1.06)
fig3.suptitle('Write voltage = '+str(bias_vec_w[dc_step_write])+' V', fontsize = 14)
# fig3.add_subplot(132).text(2,11,'Write voltage = '+str(bias_vec_w[dc_step_write])+' V', fontsize = 14, orientation='vertical')
plt.tight_layout()
plt.subplots_adjust(top=0.8)
if save_figure == True:
fig3.savefig(output_file_path+'\jCPD_map_WriteStep'+str(dc_step_write)+'.png', format='png')
fig3.savefig(output_file_path+'\jCPD_map_WriteStep'+str(dc_step_write)+'.eps', format='eps')
fig3.savefig(output_file_path+'\jCPD_map_WriteStep'+str(dc_step_write)+'.tif', format='tif')
save_figure = False;
slider_dict = dict()
dc_step_write_slider = widgets.IntSlider(min = 0, max = num_write_steps-1, step = 1, value = 0, description='Write Step');
widgets.interact(make_fitting_figure, dc_step_write = dc_step_write_slider, **slider_dict);
def on_save_button3_clicked(b):
global save_figure
save_figure = True
dc_step_write = dc_step_write_slider.value
# x_select = x_slider.value
# y_select = y_slider.value
make_fitting_figure(dc_step_write)
button3 = widgets.Button(description = 'Save figure')
display(button3)
button3.on_click(on_save_button3_clicked)
# -
# ## jCPD plots
#
# - Left graph depicts the jCPD averaged over all pixels as a function of write voltage
# - Specify a pixel that jCPD data should be displayed from in graph on the right by setting x_select and y_select.
# +
# specify x select and y select
x_select = 3
y_select = 2
x_range2 = [0, num_col-1]
y_range2 = [0, num_row-1]
# x_range2[0] = 0
# x_range2[1] = num_col-1
# y_range2[0] = 0
# y_range2[1] = num_row-1
jCPD_mean1 = np.nanmean(jCPD_mat[y_range2[0]:y_range2[1], x_range2[0]:x_range2[1],:],axis = 0)
jCPD_std1 = np.nanstd(jCPD_mat[y_range2[0]:y_range2[1], x_range2[0]:x_range2[1],:],axis = 0)
jCPD_mean_w = np.nanmean(jCPD_mean1,axis = 0)
jCPD_std_w = np.nanstd(jCPD_mean1,axis = 0)
#jCPD_mean_w[2] = np.nan
jCPD_mean_all = np.nanmean(jCPD_mean_w,axis = 0)
jCPD_std_all = np.nanstd(jCPD_std_w,axis = 0)
fig3 = plt.figure(figsize=(12,7))
fig3.set_facecolor('white')
ax_jCPD_mean = plt.subplot2grid((10, 10), (0, 0), colspan=4, rowspan=5)
ax_jCPD_xy = plt.subplot2grid((10, 10), (0, 5), colspan=4, rowspan=5)
plt.title('Mean jCPD vs write voltage', fontsize = 12)
# ax_jCPD_mean.errorbar(bias_vec_w, jCPD_mean_w, yerr = jCPD_std_w, fmt = '.-')
ax_jCPD_mean.plot(bias_vec_w, jCPD_mean_w, '.-')
ax_jCPD_mean.set_xlabel('Write voltage [V]', fontsize = 12)
ax_jCPD_mean.set_ylabel('Mean jCPD [V]', fontsize = 12)
# ax_jCPD_mean.set_ylim([-10,10])
ax_jCPD_xy.plot(bias_vec_w, jCPD_mat[y_select,x_select, : ], '.-')
ax_jCPD_xy.set_xlabel('Write voltage [V]', fontsize = 12)
ax_jCPD_xy.set_ylabel('jCPD [V]', fontsize = 12)
# ax_jCPD_xy.set_ylim([-10,15])
plt.title('jCPD at x = '+str(x_select)+' y = '+str(y_select))
print('jCPD averaged over all pixels and write voltage steps = '+ str(jCPD_mean_all)+' +/- '+str(jCPD_std_all)+' V')
# -
# ## PCA analysis
#
# ### Singular value decomposition performed on raw data:
# Do SVD on raw data
print(h5_main.shape)
do_svd = px.svd_utils.SVD(h5_main, num_components=40)
SVD_data = do_svd.compute()
# ### SVD on jCPD, slope, y-intercept data and response at 0 V read voltage:
# +
# Do SVD on jCPD, slope and y-intercept data
from sklearn.utils.extmath import randomized_svd
num_components = 20;
# # #Filter outliers from jCPD matrix, (-> improve code)
# # for r in range(1, num_row):
# # for c in range(1, num_col):
# # for s in range(1, num_write_steps):
# # if jCPD_mat[r-1, c-1, s-1] > 5 or jCPD_mat[r-1, c-1, s-1] < -5:
# # jCPD_mat[r-1, c-1, s-1] = np.nan
# # # jCPD_mat[r-1, c-1, s-1] = 0
x_delete = np.array([])
y_delete = np.array([])
index_delete = (num_col)*y_delete+x_delete
jCPD_mat_SVD = np.reshape(jCPD_mat,(num_row*num_col, num_write_steps))
nan_pos = np.squeeze((np.argwhere(np.isnan(jCPD_mat_SVD).any(axis=1))))
index_delete = np.insert(index_delete, index_delete.shape[0], nan_pos, axis = 0)
index_delete = -np.sort(-index_delete)
jCPD_mat_SVD_clean = jCPD_mat_SVD
for i in range(0,index_delete.shape[0]):
jCPD_mat_SVD_clean = np.delete(jCPD_mat_SVD_clean, index_delete[i], axis = 0)
U_SVD_jCPD, S_SVD_jCPD, V_SVD_jCPD = randomized_svd(jCPD_mat_SVD_clean, n_components = num_components)
index_delete = np.sort(index_delete)
for i in range(0,index_delete.shape[0]):
U_SVD_jCPD = np.insert(U_SVD_jCPD, index_delete[i], np.nan ,axis = 0)
print(i,index_delete[i])
print(U_SVD_jCPD.shape)
slope_mat_SVD = np.reshape(fit_slope,(num_row*num_col, num_write_steps))
U_SVD_slope, S_SVD_slope, V_SVD_slope = randomized_svd(slope_mat_SVD, n_components = num_components)
yintercept_mat_SVD = np.reshape(fit_yintercept,(num_row*num_col, num_write_steps))
U_SVD_yintercept, S_SVD_yintercept, V_SVD_yintercept = randomized_svd(yintercept_mat_SVD, n_components = num_components)
# Do SVD on cKPFM signal at 0 V read voltage (= PFM)
dc_step_read = np.argwhere(np.isclose(bias_vec_r[0,:], 0)).squeeze()
resp_mat_0Vr = np.squeeze(Nd_mat[:,:,1,:,dc_step_read]['Amplitude [V]'] *
np.cos(Nd_mat[:,:,1,:,dc_step_read]['Phase [rad]'])*1000)
resp_mat_0Vr = np.reshape(resp_mat_0Vr,(num_row*num_col, num_write_steps))
U_SVD_resp0Vr, S_SVD_resp0Vr, V_SVD_resp0Vr = randomized_svd(resp_mat_0Vr, n_components = num_components)
# -
# ### Display SVD score maps and eigenvectors of jCPD data:
# +
# Visualize PCA of jCPD data
U_SVD_jCPD = np.reshape(U_SVD_jCPD,(num_row, num_col, num_components))
fig5 = plt.figure(figsize=(12,12))
fig5.clf()
fig5.set_facecolor('white')
n_components = 20
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.imshow(U_SVD_jCPD[:,:,component-1], cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
fig5.colorbar(image, ax = ax_SVD_plot)
fig5.suptitle('PCA scores of jCPD data', weight = 'bold', fontsize = 12, y=1)
fig5.tight_layout()
fig55 = plt.figure(figsize=(12,12))
fig55.clf()
fig55.set_facecolor('white')
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.plot(bias_vec_w, V_SVD_jCPD[component-1,:],'.-');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
ax_SVD_plot.set_xlabel('Write voltage [V]')
fig55.suptitle('PCA eigenvectors of jCPD data', weight = 'bold', fontsize = 12, y=1);
fig55.tight_layout()
def on_save_button4_clicked(b):
fig5.savefig(output_file_path+'\PCA_jCPD_scores.png', format='png')
fig5.savefig(output_file_path+'\PCA_jCPD_scores.eps', format='eps')
fig5.savefig(output_file_path+'\PCA_jCPD_scores.tif', format='tif')
fig55.savefig(output_file_path+'\PCA_jCPD_ev.png', format='png')
fig55.savefig(output_file_path+'\PCA_jCPD_ev.eps', format='eps')
fig55.savefig(output_file_path+'\PCA_jCPD_ev.tif', format='tif')
button4 = widgets.Button(description = 'Save figure')
display(button4)
button4.on_click(on_save_button4_clicked)
# -
# ### Display SVD score maps and eigenvectors of slope data:
# +
# Visualize PCA of slope data
U_SVD_slope = np.reshape(U_SVD_slope,(num_row, num_col, num_components))
fig6 = plt.figure(figsize=(12,12))
fig6.clf()
fig6.set_facecolor('white')
n_components = 20
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.imshow(U_SVD_slope[:,:,component-1], cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
fig6.colorbar(image, ax = ax_SVD_plot)
fig6.tight_layout()
fig6.suptitle('PCA scores of slope data', weight = 'bold', fontsize = 12, y = 1);
fig66 = plt.figure(figsize=(12,12))
fig66.clf()
fig66.set_facecolor('white')
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.plot(bias_vec_w, V_SVD_slope[component-1,:],'.-');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
ax_SVD_plot.set_xlabel('Write voltage [V]')
fig66.tight_layout()
fig66.suptitle('PCA eigenvectors of slope data', weight = 'bold', fontsize = 12, y = 1);
def on_save_button5_clicked(b):
fig6.savefig(output_file_path+'\PCA_slope_scores.png', format='png')
fig6.savefig(output_file_path+'\PCA_slope_scores.eps', format='eps')
fig6.savefig(output_file_path+'\PCA_slope_scores.tif', format='tif')
fig66.savefig(output_file_path+'\PCA_slope_ev.png', format='png')
fig66.savefig(output_file_path+'\PCA_slope_ev.eps', format='eps')
fig66.savefig(output_file_path+'\PCA_slope_ev.tif', format='tif')
button5 = widgets.Button(description = 'Save figure')
display(button5)
button5.on_click(on_save_button5_clicked)
# -
# ### Display SVD score maps and eigenvectors of y-intercept data:
# +
# Visualize PCA of y-intercept data
U_SVD_yintercept = np.reshape(U_SVD_yintercept,(num_row, num_col, num_components))
fig7 = plt.figure(figsize=(12,12))
fig7.clf()
fig7.set_facecolor('white')
n_components = 20
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.imshow(U_SVD_yintercept[:,:,component-1], cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
fig7.colorbar(image, ax = ax_SVD_plot)
fig7.tight_layout()
fig7.suptitle('PCA scores of y-intercept data', weight = 'bold', fontsize = 12, y = 1);
fig77 = plt.figure(figsize=(12,12))
fig77.clf()
fig77.set_facecolor('white')
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.plot(bias_vec_w, V_SVD_yintercept[component-1,:],'.-');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
ax_SVD_plot.set_xlabel('Write voltage [V]')
fig77.tight_layout()
fig77.suptitle('PCA eigenvectors of y-intercept data', weight = 'bold', fontsize = 12, y = 1);
def on_save_button6_clicked(b):
fig7.savefig(output_file_path+'\PCA_yintercept_scores.png', format='png')
fig7.savefig(output_file_path+'\PCA_yintercept_scores.eps', format='eps')
fig7.savefig(output_file_path+'\PCA_yintercept_scores.tif', format='tif')
fig77.savefig(output_file_path+'\PCA_yintercept_ev.png', format='png')
fig77.savefig(output_file_path+'\PCA_yintercept_ev.eps', format='eps')
fig77.savefig(output_file_path+'\PCA_yintercept_ev.tif', format='tif')
button6 = widgets.Button(description = 'Save figure')
display(button6)
button6.on_click(on_save_button6_clicked)
# -
# ### Display SVD score maps and eigenvectors of response at 0 V read voltage:
# +
# Visualize PCA of response at 0 V read voltage
U_SVD_resp0Vr = np.reshape(U_SVD_resp0Vr,(num_row, num_col, num_components))
fig8 = plt.figure(figsize=(12,12))
fig8.clf()
fig8.set_facecolor('white')
n_components = 20
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.imshow(U_SVD_resp0Vr[:,:,component-1], cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
fig8.colorbar(image, ax = ax_SVD_plot)
fig8.tight_layout()
fig8.suptitle('PCA scores of response at V$_{read}$ = 0 V', weight = 'bold', fontsize = 12, y = 1);
fig88 = plt.figure(figsize=(12,12))
fig88.clf()
fig88.set_facecolor('white')
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(5, 4, component)
image = ax_SVD_plot.plot(bias_vec_w, V_SVD_resp0Vr[component-1,:],'.-');
ax_SVD_plot.set_xlabel('Write voltage [V]')
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
fig88.tight_layout()
fig88.suptitle('PCA eigenvectors of response at V$_{read}$ = 0 V', weight = 'bold', fontsize = 12, y = 1);
def on_save_button7_clicked(b):
fig8.savefig(output_file_path+'\PCA_resp0V_scores.png', format='png')
fig8.savefig(output_file_path+'\PCA_resp0V_scores.eps', format='eps')
fig8.savefig(output_file_path+'\PCA_resp0V_scores.tif', format='tif')
fig88.savefig(output_file_path+'\PCA_resp0V_ev.png', format='png')
fig88.savefig(output_file_path+'\PCA_resp0V_ev.eps', format='eps')
fig88.savefig(output_file_path+'\PCA_resp0V_ev.tif', format='tif')
button7 = widgets.Button(description = 'Save figure')
display(button7)
button7.on_click(on_save_button7_clicked)
# -
# ### Display SVD score maps and eigenvectors of raw data:
# +
# Visualize PCA of raw data
S_mat = SVD_data['S'][:]
U_mat = SVD_data['U'][:]
V_mat = SVD_data['V'][:]
n_components = min(40, S_mat.size)
U_mat = np.reshape(U_mat,(num_row, num_col, n_components))
fig4 = plt.figure(figsize=(12,20))
fig4.clf()
fig4.set_facecolor('white')
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(10, 4, component)
image = ax_SVD_plot.imshow(U_mat[:,:,component-1],
cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
fig4.colorbar(image, ax = ax_SVD_plot)
fig4.tight_layout()
fig4.suptitle('PCA scores of raw data', weight = 'bold', fontsize = 12, y = 1);
fig44 = plt.figure(figsize=(12,20))
fig44.clf()
fig44.set_facecolor('white')
for component in range(1, n_components+1):
ax_SVD_plot = plt.subplot(10, 4, component)
image = ax_SVD_plot.plot(V_mat[component-1,:],'.-');
ax_SVD_plot.set_title('#'+str(component), fontsize = 10)
fig44.tight_layout()
fig44.suptitle('PCA eigenvectors of of raw data', weight = 'bold', fontsize = 12, y = 1);
def on_save_button8_clicked(b):
fig4.savefig(output_file_path+'\PCA_rawdata_scores.png', format='png')
fig4.savefig(output_file_path+'\PCA_rawdata_scores.eps', format='eps')
fig4.savefig(output_file_path+'\PCA_rawdata_scores.tif', format='tif')
fig44.savefig(output_file_path+'\PCA_rawdata_ev.png', format='png')
fig44.savefig(output_file_path+'\PCA_rawdata_ev.eps', format='eps')
fig44.savefig(output_file_path+'\PCA_rawdata_ev.tif', format='tif')
button8 = widgets.Button(description = 'Save figure')
display(button8)
button8.on_click(on_save_button8_clicked)
# -
# # BEPS/ FORC viewer
# +
global count
count = 0
cycle = 0
row = 0
col = 0
dc_step_write = 0
step_chan = b'read_bias'
guess_3d_data, success = px.hdf_utils.reshape_to_Ndims(h5_sho_fit)
h5_sho_spec_inds = px.hdf_utils.getAuxData(h5_sho_fit, auxDataName='Spectroscopic_Indices')[0]
h5_sho_spec_vals = px.hdf_utils.getAuxData(h5_sho_fit, auxDataName='Spectroscopic_Values')[0]
spec_nd, _ = px.hdf_utils.reshape_to_Ndims(h5_sho_spec_inds, h5_spec=h5_sho_spec_inds)
spec_nd, _ = px.hdf_utils.reshape_to_Ndims(h5_sho_spec_inds, h5_spec=h5_sho_spec_inds)
# sho_spec_sort = get_sort_order(h5_sho_spec_inds)
sho_spec_dims = np.array(spec_nd.shape[1:])
sho_spec_labels = h5_sho_spec_inds.attrs['labels']
h5_pos_inds = px.hdf_utils.getAuxData(h5_sho_fit, auxDataName='Position_Indices')[-1];
pos_nd, _ = px.hdf_utils.reshape_to_Ndims(h5_pos_inds, h5_pos=h5_pos_inds)
pos_dims = list(pos_nd.shape[:h5_pos_inds.shape[1]])
pos_labels = h5_pos_inds.attrs['labels']
# reshape to X, Y, step, all others
spec_step_dim_ind = np.where(sho_spec_labels == step_chan)[0]
step_dim_ind = len(pos_dims) + spec_step_dim_ind
# move the step dimension to be the first after all position dimensions
rest_sho_dim_order = list(range(len(pos_dims), len(guess_3d_data.shape)))
rest_sho_dim_order.remove(step_dim_ind)
new_order = list(range(len(pos_dims))) + step_dim_ind.tolist() + rest_sho_dim_order
# Transpose the 3D dataset to this shape:
sho_guess_Nd_1 = np.transpose(guess_3d_data, new_order)
# Now move the step dimension to the front for the spec labels as well
new_spec_order = list(range(len(sho_spec_labels)))
new_spec_order.remove(spec_step_dim_ind)
new_spec_order = spec_step_dim_ind.tolist() + new_spec_order
# new_spec_labels = sho_spec_labels[new_spec_order]
new_spec_dims = np.array(sho_spec_dims)[new_spec_order]
# Now collapse all additional dimensions
final_guess_shape = pos_dims + [new_spec_dims[0]] + [-1]
sho_dset_collapsed = np.reshape(sho_guess_Nd_1, final_guess_shape)
# Get the bias matrix:
bias_mat, _ = px.hdf_utils.reshape_to_Ndims(h5_sho_spec_vals, h5_spec=h5_sho_spec_inds)
bias_mat = np.transpose(bias_mat[spec_step_dim_ind].squeeze(), new_spec_order).reshape(sho_dset_collapsed.
shape[len(pos_dims):])
bias_mat = bias_mat*high_voltage_amplf
num_read_steps = sho_dset_collapsed.shape[1]
num_write_steps = sho_dset_collapsed.shape[2]
num_row = sho_dset_collapsed.shape[1]
num_col = sho_dset_collapsed.shape[0]
print(sho_dset_collapsed.shape)
print('bias mat shape', bias_mat.shape)
num_loops = bias_mat.shape[1]
num_loops_h = int(num_loops/2)
# plt.figure()
# plt.plot(bias_mat[:,:]);
# print(sho_dset_collapsed.attrs)
# print(getattr(obj))
amp_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Amplitude [V]']*1000
phase_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Phase [rad]']
frequency_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Frequency [Hz]']/1000
Q_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Quality Factor']
mixed_map = amp_map*np.cos(phase_map)
mixed_mat_h = sho_dset_collapsed[:, :, :, :]['Amplitude [V]'] * np.cos(sho_dset_collapsed[:, :, :, :]['Phase [rad]'])*1000
print('mixed shape = ', mixed_mat_h.shape)
def make_figure_beps_forc(amp_map, phase_map, mixed_map, Q_map, frequency_map, mixed_mat_h, col, row, cycle, dc_step_write):
global fig_bf
fig_bf = plt.figure(figsize=(12,6))
fig_bf.clf()
fig_bf.set_facecolor('white')
ax_amp = plt.subplot(2, 3, 1)
img_a = ax_amp.imshow(amp_map, cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_amp.set_title('Amplitude [a.u.]', fontsize = 10)
fig_bf.colorbar(img_a, ax = ax_amp)
ax_phase = plt.subplot(2, 3, 2)
img_p = ax_phase.imshow(phase_map, cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none', clim = [-3.5, 3.5]);
ax_phase.set_title('Phase [rad]', fontsize = 10)
fig_bf.colorbar(img_p, ax = ax_phase)
ax_m = plt.subplot(2, 3, 3)
img_m = ax_m.imshow(mixed_map, cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_m.set_title('Real component [a.u.]', fontsize = 10)
fig_bf.colorbar(img_m, ax = ax_m)
ax_q = plt.subplot(2, 3, 4)
img_q = ax_q.imshow(Q_map, cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_q.set_title('Quality factor', fontsize = 10)
fig_bf.colorbar(img_q, ax = ax_q)
ax_f = plt.subplot(2, 3, 5)
img_f = ax_f.imshow(frequency_map, cmap=px.plot_utils.cmap_jet_white_center(),
origin='upper', interpolation='none');
ax_f.set_title('Resonance frequency [kHz]', fontsize = 10)
fig_bf.colorbar(img_f, ax = ax_f)
ax_h = plt.subplot(2, 3, 6)
ax_h.plot(bias_mat[:,0:num_loops//2], mixed_mat_h[row, col, :, 0:num_loops//2],'-.', marker ='*')
ax_h.plot(bias_mat[:,num_loops_h:num_loops], mixed_mat_h[row, col, :, num_loops_h:num_loops],'.-')
ax_h.set_ylabel('Response [a.u.]')
ax_h.set_xlabel('Write voltage [V]')
ax_h.set_title('x = '+str(col)+' y = '+str(row), fontsize = 10)
fig_bf.suptitle('Step# = '+str(dc_step_write)+', cycle# = '+str(cycle)
+', V$_{write}$ = '+str(bias_mat[dc_step_write, 0]) +' V'
, fontsize = 12);
fig_bf.tight_layout()
plt.subplots_adjust(top = 0.9)
make_figure_beps_forc(amp_map, phase_map, mixed_map, Q_map, frequency_map, mixed_mat_h, col, row, cycle, dc_step_write)
def update_beps_forc_plot(dc_step_write, cycle, col, row):
amp_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Amplitude [V]']*1000
phase_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Phase [rad]']
frequency_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Frequency [Hz]']/1000
Q_map = sho_dset_collapsed[:, :, dc_step_write, cycle]['Quality Factor']
mixed_map = amp_map*np.cos(phase_map)
mixed_mat_h = sho_dset_collapsed[:, :, :, :]['Amplitude [V]'] * np.cos(sho_dset_collapsed[:, :, :, :]['Phase [rad]'])*1000
make_figure_beps_forc(amp_map, phase_map, mixed_map, Q_map, frequency_map, mixed_mat_h, col, row, cycle, dc_step_write)
num_loops_h =num_loops//2-1
dc_step_write_slider_bf = widgets.IntSlider(min = 0, max = num_write_steps-1, step = 1,value = 0,
description = 'Write step',continuous_update = False)
cycle_slider = widgets.IntSlider(min = 2, max = num_loops-1, step = 1,value = 2,
description = 'Cycle', continuous_update = False)
x_slider_bf = widgets.IntSlider(min = 0, max = num_col-1,step = 1,value = 0,
description='x',continuous_update = False)
y_slider_bf = widgets.IntSlider(min = 0,max = num_row-1,step = 1,value = 0, description = 'y',continuous_update = False)
widgets.interact(update_beps_forc_plot, dc_step_write = dc_step_write_slider_bf, cycle = cycle_slider,
col = x_slider_bf, row = y_slider_bf, **slider_dict);
def on_save_button9_clicked(b):
global count
fig_bf.savefig(output_file_path+'\SHO_parms'+str(count)+'.png', format='png')
fig_bf.savefig(output_file_path+'\SHO_parms'+str(count)+'.eps', format='eps')
fig_bf.savefig(output_file_path+'\SHO_parms'+str(count)+'.tif', format='tif')
count+=1
button9 = widgets.Button(description = 'Save figure')
display(button9)
button9.on_click(on_save_button9_clicked)
# +
amp_data = sho_dset_collapsed[:, :, :, :]['Amplitude [V]']*1000
phase_data = sho_dset_collapsed[:, :, :, :]['Phase [rad]']
amp_mean1 = np.nanmean(amp_data, axis = 0)
amp_mean = np.nanmean(amp_mean1, axis = 0)
phase_mean1 = np.nanmean(phase_data, axis = 0)
phase_mean = np.nanmean(phase_mean1, axis = 0)
mixed_mat_mean1 = np.nanmean(mixed_mat_h, axis = 0)
mixed_mat_mean = np.nanmean(mixed_mat_mean1, axis = 0)
fig_loops = plt.figure(figsize=(16,4))
fig_loops.clf()
fig_loops.set_facecolor('white')
ax_mixed = plt.subplot(1, 3, 1)
img_a = ax_mixed.plot(bias_mat[:, 2:4], mixed_mat_mean[:, 2:4],'.-');
ax_mixed.set_xlabel('DC voltage [V]', fontsize = 12)
ax_mixed.set_ylabel('Mixed response [a.u.]', fontsize = 12)
ax_phase = plt.subplot(1, 3, 2)
img_c = ax_phase.plot(bias_mat[:,2:4], phase_mean[:, 2:4],'.-');
ax_phase.set_xlabel('DC voltage [V]', fontsize = 12)
ax_phase.set_ylabel('Phase [rad]', fontsize = 12)
ax_amp = plt.subplot(1, 3, 3)
img_b = ax_amp.plot(bias_mat[:,2:4], amp_mean[:,2:4],'.-');
ax_amp.set_xlabel('DC voltage [V]', fontsize = 12)
ax_amp.set_ylabel('Amplitude [a.u.]', fontsize = 12)
def on_save_button10_clicked(b):
fig_loops.savefig(output_file_path+'\Loops.png', format='png')
fig_loops.savefig(output_file_path+'\Loops.eps', format='eps')
fig_loops.savefig(output_file_path+'\Loops.tif', format='tif')
button10 = widgets.Button(description = 'Save figure')
display(button10)
button10.on_click(on_save_button10_clicked)
# +
# PCA on real component
from sklearn.utils.extmath import randomized_svd
mixed_mat_h[:, :, :, num_loops_h:num_loops]
# resp_mat_0Vr = np.reshape(resp_mat_0Vr,(num_row*num_col, num_write_steps))
# U_SVD_resp0Vr, S_SVD_resp0Vr, V_SVD_resp0Vr = randomized_svd(resp_mat_0Vr, n_components = num_components)
# +
from matplotlib.widgets import Cursor
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
x, y = 4*(np.random.rand(2, 100) - .5)
ax.plot(x, y, 'o')
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
# set useblit = True on gtkagg for enhanced performance
cursor = Cursor(ax, useblit=True, color='red', linewidth=2)
plt.show()
# -
# ## Visualize the SHO results of BEline, BEPS and FORC data
# Here, we visualize the parameters for the SHO fits. BE-line (3D) data is visualized via simple spatial maps of the SHO parameters while more complex BEPS datasets (4+ dimensions) can be visualized using a simple interactive visualizer below.
#
# You can choose to visualize the guesses for SHO function or the final fit values from the first line of the cell below.
#
# Use the sliders below to inspect the BE response at any given location.
# +
use_sho_guess = False
use_static_viz_func = False
if use_sho_guess:
sho_dset = h5_sho_guess
else:
sho_dset = h5_sho_fit
if hdf.file.attrs['data_type'] == 'BELineData' or len(pos_dims) != 2:
use_static_viz_func = True
step_chan = None
else:
if h5_main.parent.parent.attrs['VS_mode'] not in ['AC modulation mode with time reversal',
'DC modulation mode']:
use_static_viz_func = True
else:
if h5_main.parent.parent.attrs['VS_mode'] == 'DC modulation mode':
step_chan = 'DC_Offset'
else:
step_chan = 'AC_Amplitude'
if not use_static_viz_func:
try:
# use interactive visualization
px.be_viz_utils.jupyter_visualize_beps_sho(sho_dset, step_chan)
except:
print('There was a problem with the interactive visualizer')
use_static_viz_func = True
if use_static_viz_func:
# show plots of SHO results vs. applied bias
px.be_viz_utils.visualize_sho_results(sho_dset, show_plots=True,
save_plots=False)
# +
h5_sho_dset = sho_dset
resp_func = None
guess_3d_data, success = px.io.hdf_utils.reshape_to_Ndims(h5_sho_dset)
h5_sho_spec_inds = px.io.hdf_utils.getAuxData(h5_sho_dset, 'Spectroscopic_Indices')[0]
h5_sho_spec_vals = px.io.hdf_utils.getAuxData(h5_sho_dset, 'Spectroscopic_Values')[0]
spec_nd, _ = px.io.hdf_utils.reshape_to_Ndims(h5_sho_spec_inds, h5_spec=h5_sho_spec_inds)
# sho_spec_sort = get_sort_order(h5_sho_spec_inds)
sho_spec_dims = np.array(spec_nd.shape[1:])
sho_spec_labels = h5_sho_spec_inds.attrs['labels']
h5_pos_inds = px.io.hdf_utils.getAuxData(h5_sho_dset, auxDataName='Position_Indices')[-1]
pos_nd, _ = px.io.hdf_utils.reshape_to_Ndims(h5_pos_inds, h5_pos=h5_pos_inds)
print(pos_nd.shape)
pos_dims = list(pos_nd.shape[:h5_pos_inds.shape[1]])
print(pos_dims)
pos_labels = h5_pos_inds.attrs['labels']
print(pos_labels)
# reshape to X, Y, step, all others
spec_step_dim_ind = np.argwhere(sho_spec_labels == step_chan)[0][0]
step_dim_ind = len(pos_dims) + spec_step_dim_ind
# move the step dimension to be the first after all position dimensions
rest_sho_dim_order = list(range(len(pos_dims), len(guess_3d_data.shape)))
rest_sho_dim_order.remove(step_dim_ind)
new_order = list(range(len(pos_dims))) + [step_dim_ind] + rest_sho_dim_order
# Transpose the 3D dataset to this shape:
sho_guess_Nd_1 = np.transpose(guess_3d_data, new_order)
# Now move the step dimension to the front for the spec labels as well
new_spec_order = list(range(len(sho_spec_labels)))
new_spec_order.remove(spec_step_dim_ind)
new_spec_order = [spec_step_dim_ind] + new_spec_order
# new_spec_labels = sho_spec_labels[new_spec_order]
new_spec_dims = np.array(sho_spec_dims)[new_spec_order]
# Now collapse all additional dimensions
final_guess_shape = pos_dims + [new_spec_dims[0]] + [-1]
sho_dset_collapsed = np.reshape(sho_guess_Nd_1, final_guess_shape)
# Get the bias matrix:
bias_mat, _ = px.io.hdf_utils.reshape_to_Ndims(h5_sho_spec_vals, h5_spec=h5_sho_spec_inds)
bias_mat = np.transpose(bias_mat[spec_step_dim_ind], new_spec_order).reshape(sho_dset_collapsed.shape[len(pos_dims):])
# This is just the visualizer:
sho_quantity = 'Amplitude [V]'
step_ind = 0
row_ind = 1
col_ind = 1
def dc_spectroscopy_func(resp_vec):
return resp_vec['Amplitude [V]'] * np.cos(resp_vec['Phase [rad]']) * 1E+3
def ac_spectroscopy_func(resp_vec):
return resp_vec['Amplitude [V]']
if resp_func is None:
if step_chan == 'DC_Offset':
resp_func = dc_spectroscopy_func
resp_label = 'A cos($\phi$) (a. u.)'
else:
resp_func = ac_spectroscopy_func
resp_label = 'Amplitude (a. u.)'
spatial_map = sho_dset_collapsed[:, :, step_ind, 0][sho_quantity]
resp_vec = sho_dset_collapsed[row_ind, col_ind, :, :]
resp_vec = resp_func(resp_vec)
# -
# ## Fit loops to a function
# This is applicable only to DC voltage spectroscopy datasets from BEPS. The PFM hysteresis loops in this dataset will be projected to maximize the loop area and then fitted to a function.
#
# Note: This computation generally takes a while for reasonably sized datasets.
# Do the Loop Fitting on the SHO Fit dataset
loop_success = False
h5_loop_group = px.hdf_utils.findH5group(h5_sho_fit, 'Loop_Fit')
if len(h5_loop_group) == 0:
try:
loop_fitter = px.BELoopModel(h5_sho_fit, variables=['read_bias'], parallel=True)
print('No loop fits found. Fitting now....')
h5_loop_guess = loop_fitter.do_guess(processors=max_cores, max_mem=max_mem)
h5_loop_fit = loop_fitter.do_fit(processors=max_cores, max_mem=max_mem)
loop_success = True
except ValueError:
print('Loop fitting is applicable only to DC spectroscopy datasets!')
raise
else:
loop_success = True
print('Taking previously computed loop fits')
h5_loop_guess = h5_loop_group[-1]['Guess']
h5_loop_fit = h5_loop_group[-1]['Fit']
# ## Prepare datasets for visualization
# Prepare some variables for plotting loops fits and guesses
# Plot the Loop Guess and Fit Results
if loop_success:
h5_projected_loops = h5_loop_guess.parent['Projected_Loops']
h5_proj_spec_inds = px.hdf_utils.getAuxData(h5_projected_loops,
auxDataName='Spectroscopic_Indices')[-1]
h5_proj_spec_vals = px.hdf_utils.getAuxData(h5_projected_loops,
auxDataName='Spectroscopic_Values')[-1]
# reshape the vdc_vec into DC_step by Loop
sort_order = px.hdf_utils.get_sort_order(h5_proj_spec_inds)
dims = px.hdf_utils.get_dimensionality(h5_proj_spec_inds[()],
sort_order[::-1])
vdc_vec = np.reshape(h5_proj_spec_vals[h5_proj_spec_vals.attrs['read_bias']], dims).T
#Also reshape the projected loops to Positions-DC_Step-Loop
# Also reshape the projected loops to Positions-DC_Step-Loop
proj_nd, _ = px.hdf_utils.reshape_to_Ndims(h5_projected_loops)
proj_3d = np.reshape(proj_nd, [h5_projected_loops.shape[0],
proj_nd.shape[2], -1])
# ## Visualize Loop fits
use_static_plots = False
if loop_success:
if not use_static_plots:
try:
px.be_viz_utils.jupyter_visualize_beps_loops(h5_projected_loops, h5_loop_guess, h5_loop_fit)
except:
print('There was a problem with the interactive visualizer')
use_static_plots = True
if use_static_plots:
for iloop in range(h5_loop_guess.shape[1]):
fig, ax = px.be_viz_utils.plot_loop_guess_fit(vdc_vec[:, iloop], proj_3d[:, :, iloop],
h5_loop_guess[:, iloop], h5_loop_fit[:, iloop],
title='Loop {} - All Positions'.format(iloop))
# ## Save and close
# * Save the .h5 file that we are working on by closing it. <br>
# * Also, consider exporting this notebook as a notebook or an html file. <br> To do this, go to File >> Download as >> HTML
# * Finally consider saving this notebook if necessary
hdf.close()
| notebooks/be/BE_Processing_cKPFM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Variable
x=1
y=3.14159
z='Hello'
print(x)
print(y)
print(z)
type(x)
type(y)
type(z)
# # Basic operator
x=1
y=2
print(x+y*2)
x="Hello "
y="World! "
print(x+y*2)
# # List and operator
x=['a','b','c']
x.append('d')
print(x)
x.pop(0)
x.pop()
x
# # List Access by Index
x=['a','b','c','d','e','f']
x[0]
x[1]
x[-1]
x[-2]
# # List Slicing
x[0:3]
x[2:5]
x[2:-1]
x[0:-1:2]
x[::-1]
x
x[1:]
# # Dictionary and operator
x={'wasit':217, 'einstein':218}
x['wasit']
x['wasit']=224
x
x['david']=225
x
x['bob']
x.get('bob','not found')
x.get('david','not found')
x={
'wasit':224,
'david':225,
'bob':226,
'einstein':227
}
name=input('Please enter name:')
number=x.get(name,'not found')
print('{} number is {}'.format(name,number))
| dsi200_demo/03 Python Variables and Operators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from Block import *
from Day import *
first_block = Block()
second_block = Block()
tomorrow_block = Block()
today = Day()
tomorrow = Day()
today.add_block(first_block)
today.add_block(second_block)
today.set_date("20180915")
tomorrow.add_block(tomorrow_block)
tomorrow.set_date("20180916")
first_block.set_text("We went on a trip to Seattle, the Space Needle was amazing!")
first_block.set_keywords()
second_block.set_text("Bob went on a run in Central Park")
second_block.set_keywords()
tomorrow_block.set_text("Under the Space Needle, Max scored 30 points in the basketball game")
tomorrow_block.set_keywords()
print tomorrow_block.get_keywords()
today.add_all_keywords()
tomorrow.add_all_keywords()
print(today.get_keywords())
# print(today.blocks[0].get_keywords())
# -
asdf = {"Alpha": 2,
"Beta": 30,
"Theta": 123}
print str(asdf)
print first_block
print first_block.return_json()
# +
from corpus_dictionary import *
corpus = Corpus()
corpus.add_words(today)
corpus.add_words(tomorrow)
print corpus.words
# +
corpus.add_text_to_corpus(today)
corpus.add_text_to_corpus(tomorrow)
corpus.calculate_tfidf()
print corpus.tfidf_weights
print corpus.corpus
# -
import json
s = json.dumps(today, default=lambda o: o.__dict__)
print s
import requests
headers = {
'Ocp-Apim-Subscription-Key': '59bf3be460ad434585a4b4143c470a92',
}
azure_url = "https://eastus.api.cognitive.microsoft.com/text/analytics/v2.0/KeyPhrases"
document = {'documents': [
{'id': '1', 'text': 'We went on a trip to Seattle, the Space Needle was amazing!'}
]}
response = requests.post(azure_url, headers=headers, json=document)
keywords = response.json()
print keywords
[keyword.encode('utf-8') for keyword in keywords['documents'][0]['keyPhrases']]
# +
from sklearn.feature_extraction.text import TfidfVectorizer
import operator
corpus = ["This is very strange", "This is very nice"]
vectorizer = TfidfVectorizer(min_df=1)
X = vectorizer.fit_transform(corpus)
idf = vectorizer.idf_
weights = dict(zip([word.encode('utf-8') for word in vectorizer.get_feature_names()], idf))
weights = sorted(weights.items(), key=operator.itemgetter(1), reverse=True)
print weights[0][0]
| backend/nlp playground.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # JuliaでUnionFindを実装する
# > Julia言語でUnionFindを実装します
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [AtCoder]
# - image: images/chart-preview.png
# + active=""
# fieldの値が更新されているのにもかかわらず、structとなっていますが、fieldがmutableなオブジェクトであればmutable structにせずとも中身が更新できます。
# また、rootは経路圧縮を、uniteは統合する処理を行う破壊的な関数なので、Juliaの慣習にしたがって!をつけています
# +
struct UnionFind
par :: Array{Int, 1}
size :: Array{Int, 1}
UnionFind(N) = new(collect(1:N), collect(1:N))
end
function root!(uf::UnionFind, x::Int)
if uf.par[x] == x
return x
else
return uf.par[x] = root(uf, uf.par[x])
end
end
function issame(uf::UnionFind, x::Int, y::Int)
return root(uf, x) == root(uf, y)
end
function unite!(uf::UnionFind, x::Int, y::Int)
x = root(uf, x)
y = root(uf, y)
(x == y) && (return true)
if (uf.size[x] < uf.size[y])
uf.par[x] = y
uf.size[y] += uf.size[x]
else
uf.par[y] = x
uf.size[x] += uf.size[y]
end
return true
end
# -
uf = UnionFind(5)
unite(uf, 2, 3)
unite(uf, 1, 4)
unite(uf, 1, 5)
root(uf, 2)
# [https://atcoder.jp/contests/atc001/submissions/18284457]
| _notebooks/2020-12-20-UnionFind.ipynb |