code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + language="bash"
# ls -lrt
# + language="bash"
# # RAM
# free -m
# + language="bash"
# # CPU and processed
# # top
# # htop
# + language="bash"
# # processes
# ps aux | grep "jupy"
# echo
# pgrep -fa "jupy"
# + language="bash"
# du -h ..
# + language="bash"
# # du --help
# # man du
# + language="bash"
# df -hT
# + language="bash"
# which ls
# which du
# + language="bash"
# ip addr
# + language="bash"
# # nload -u H wlp58s0
# + language="bash"
# echo ${PATH}
# + language="bash"
# echo ciao
# + language="bash"
# echo $?
# + language="bash"
# false
# echo $?
# echo $?
# + language="bash"
# echo $0
# # works for bash ans zsh, not for fish
# + language="bash"
# echo $PIPPO
# export PIPPO="pluto"
# echo $PIPPO
# + language="bash"
# cat /dev/urandom | tr -dc '[:graph:]' | fold -w 100 | head -n 1
# -
# Bash
| step0/.ipynb_checkpoints/bash-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.0
# language: julia
# name: julia-1.0
# ---
# # The Path Problem
# The path problem is the determine whether there is a path between each pair of vertices of a graph
# ## Random Graphs
# In order to test algorithms for the path problem, we first write code to generate random graphs.
#
# The function below generates a random graph
#
# A new function is defined in julia with the following elements:
#
# 1. The ```function``` keyword
# 2. The name of the function
# 3. The names of the arguments to the function in parentheses and separated by commas
# 4. The body or code of the function
# 5. The ```end``` keyword
#
# The function below generates the incidence matrix of a graph with ```n``` vertices and probability ```p``` that each edge exists
function randomgraph( n, p )
return rand(n,n) .< p
end
# The following code show how to call this function to generate a random graph
using Random
Random.seed!(1)
graph = randomgraph(5, 0.25)
# ## The Brute Force Approach
# The following code implements the brute force approach to the path problem Some of the features of this code include:
#
# 1. The default arguments for the function
# 1. The ```if``` statement
# 2. The ```isempty``` function
# 3. The ```push!``` and ```pop!``` functions
# 4. The function is called recursively (calls itself)
function findpaths!( edge, path = falses(size(edge,1),size(edge,1)), currpath = Int[] )
for v in 1:size(edge,1)
if !( v in currpath )
if isempty(currpath) || edge[currpath[end], v]
push!( currpath, v )
path[currpath[1], v] = true
findpaths!( edge, path, currpath )
pop!( currpath )
end
end
end
return path
end
# findpaths! can be called as follows:
findpaths!( graph )
# ## The dynamic programming approach
function findpaths2!( edge )
path = copy(edge)
n = size(edge,1)
for m = 1:n
path[m,m] = true
for v1 = 1:n
for vk = 1:n
if path[v1,m] && path[m,vk]
path[v1,vk] = true
end
end
end
end
return path
end
# Generate a bunch of random graphs to see if the two functions generate the same results
Random.seed!(1)
n = 10
for i = 1:10
edge = randomgraph(n, 0.3)
path = findpaths!( edge )
path2 = findpaths2!( edge )
@assert( path == path2 )
end
# Let's time the code for the fully connected graph with 1 through 10 vertices
# Note that the ```time_ns``` function gives a measure of the current time in nanoseconds
times = Float64[]
for n = 1:10
edge = trues(n,n)
start = time_ns()
findpaths!( edge )
push!( times, (time_ns() - start)/10^9 )
end
# Notice how rapidly the time increases
times
# Let's plot the times
using Plots
plot(times, legend=:none, title="Time to Compute Paths")
# Save the last plot
savefig("time.png")
# To calculate the time per iteration, we take a look at the time divided by the factorial
times ./ factorial.(1:10)
timeperiteration = times[end]/factorial(10)
methods(factorial)
factorial(BigFloat(25))
# This is how long we think it would take (in years) to perform this calculate for 25 vertices
#
# Note that the age of the universe is believed to be about 1.4e10 years
time25 = timeperiteration * factorial(BigFloat(25))/(365 * 24 * 60 * 60)
# How many years would an exponential time algorithm take for 100 vertices
timeperiteration*BigFloat(2)^100/(365*24*60*60)
# Let's calculate the times for the dynamic programming version
#
# For this one, we can go to 100
times2 = Float64[]
for n = 1:100
edge = trues(n,n)
start = time_ns()
findpaths2!( edge )
push!( times2, (time_ns() - start)/10^9 )
end
# The dynamic programming algorithm takes more than a factor of about 300 times less time to calculate for 100 vertices than the brute force algorithm took for 10 vertices
times2
# Here is the speedup factor
#
# The dynamic programming algorithm is over 200,000 times faster for 10 vertices
times./times2[1:10]
# Let's work out how big we can run with the dynamic programming algorithm
#
# First, we look at the amount of time per iteration
times2 ./ (1:100).^3
# The time per iteration is about 4e-9
timeperiteration2 = 4e-9
# How long do we think it would take to calculate for 10,000 vertices
timeperiteration2*(10000^3)/60
# How long would it take the brute force approach for 14 vertices
timeperiteration*factorial(14)/60
# # Communicating Classes
# Recall the random graph we created earlier
graph
# Find all vertices which are connected via paths in this graph
path = findpaths2!( graph )
# Calculate the communicating classes
function classes( path )
# vertices are labeled by positive integers
# we label classes with the first vertex known to be in that class
n = size(path,1)
# list of all classes so far
allclasses = Int[]
# list of which class each vertex belongs to
class = zeros( Int, n )
for v1 = 1:n
for v2 in allclasses
if path[v1,v2] && path[v2,v1]
# v1 is in the class labeled v2
class[v1] = v2
end
end
if class[v1] == 0
# v1 is in a new class
push!( allclasses, v1 )
class[v1] = v1
end
end
return class
end
# Check the classes
graph
classes( path )
# A topic called "Random Graph Theory" studies this kind of random graph
#
# It is known that when the probability of edges $\gt\frac{1}{n}$, there is a high probability of the graph being connected and when it is less, there are only small components
Random.seed!(1)
graph = randomgraph( 1000, 2/1000 )
path = [
true false true;
false true false;
true true true;
]
path = findpaths2!( graph )
classes( path )
sum( classes(path) .== 3 )
| lessons/Dynamic Programming.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.014055, "end_time": "2021-09-11T18:52:00.267751", "exception": false, "start_time": "2021-09-11T18:52:00.253696", "status": "completed"} tags=[]
# # 1. Parameters
# + papermill={"duration": 0.023587, "end_time": "2021-09-11T18:52:00.299644", "exception": false, "start_time": "2021-09-11T18:52:00.276057", "status": "completed"} tags=["parameters"]
# Defaults
simulation_dir = 'simulations/unset'
ncores = 48
# + papermill={"duration": 0.015215, "end_time": "2021-09-11T18:52:00.321998", "exception": false, "start_time": "2021-09-11T18:52:00.306783", "status": "completed"} tags=["injected-parameters"]
# Parameters
read_coverage = 30
mincov = 10
simulation_dir = "simulations/alpha-1.0-cov-30"
iterations = 3
sub_alpha = 1.0
# + papermill={"duration": 0.018258, "end_time": "2021-09-11T18:52:00.345768", "exception": false, "start_time": "2021-09-11T18:52:00.327510", "status": "completed"} tags=[]
from pathlib import Path
import os
simulation_data_dir = Path(simulation_dir) / 'simulated_data'
initial_reads_dir = simulation_data_dir / 'reads_initial'
reads_dir = simulation_data_dir / 'reads'
assemblies_dir = simulation_data_dir / 'assemblies'
if not reads_dir.exists():
os.mkdir(reads_dir)
# + [markdown] papermill={"duration": 0.005644, "end_time": "2021-09-11T18:52:00.357003", "exception": false, "start_time": "2021-09-11T18:52:00.351359", "status": "completed"} tags=[]
# # 2. Fix reads
#
# Fix read file names and data so they can be indexed.
# + papermill={"duration": 0.187785, "end_time": "2021-09-11T18:52:00.549045", "exception": false, "start_time": "2021-09-11T18:52:00.361260", "status": "completed"} tags=[]
import os
# Fix warning about locale unset
os.environ['LANG'] = 'en_US.UTF-8'
# !pushd {initial_reads_dir}; prename 's/data_//' *.fq.gz; popd
# + [markdown] papermill={"duration": 0.008182, "end_time": "2021-09-11T18:52:00.569903", "exception": false, "start_time": "2021-09-11T18:52:00.561721", "status": "completed"} tags=[]
# Jackalope produces reads with non-standard identifiers where pairs of reads don't have matching identifiers. For example:
#
# * Pair 1: `@SH08-001-NC_011083-3048632-R/1`
# * Pair 2: `@SH08-001-NC_011083-3048396-F/2`
#
# In order to run snippy, these paired identifiers need to match (except for the `/1` and `/2` suffix).
#
# So, I have to replace them all with something unique, but which matches in each pair of files. I do this by replacing the position (I think) with the read number (as it appears in the file). So the above identifiers become:
#
# * Pair 1: `@SH08-001-NC_011083-1/1`
# * Pair 2: `@SH08-001-NC_011083-1/2`
# + papermill={"duration": 1.010042, "end_time": "2021-09-11T18:52:01.585822", "exception": false, "start_time": "2021-09-11T18:52:00.575780", "status": "completed"} tags=[]
import glob
import os
files = [os.path.basename(f) for f in glob.glob(f'{initial_reads_dir}/*.fq.gz')]
# !parallel -j {ncores} -I% 'gzip -d --stdout {initial_reads_dir}/% | perl scripts/replace-fastq-header.pl | gzip > {reads_dir}/%' \
# ::: {' '.join(files)}
# + papermill={"duration": 0.033416, "end_time": "2021-09-11T18:52:01.633684", "exception": false, "start_time": "2021-09-11T18:52:01.600268", "status": "completed"} tags=[]
import shutil
shutil.rmtree(initial_reads_dir)
# + [markdown] papermill={"duration": 0.009069, "end_time": "2021-09-11T18:52:01.656136", "exception": false, "start_time": "2021-09-11T18:52:01.647067", "status": "completed"} tags=[]
# # 3. Fix assemblies
#
# Fix assembly genome names
# + papermill={"duration": 0.184968, "end_time": "2021-09-11T18:52:01.847741", "exception": false, "start_time": "2021-09-11T18:52:01.662773", "status": "completed"} tags=[]
# !pushd {assemblies_dir}; prename 's/data__//' *.fa.gz; popd
| evaluations/simulation/2-fix-simulated-files.simulation-alpha-1.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/walassicoelho/My-first-web-project/blob/master/Aula_de_Python_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="UIk5ELIWG9lL" outputId="d188bfb9-e828-4dda-97da-f11f744c34c1"
nome = "Walassi"
print(nome)
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="8IKyvgNuHoO6" outputId="0597e816-660e-4323-c3eb-9bbd6fcfe133"
nome = "Walassi"
nome
# + colab={"base_uri": "https://localhost:8080/"} id="5kV_OLAhIG-J" outputId="5e23ebdf-4efd-4ffc-cc74-47077fe1bd6e"
idade = 22
idade
# + colab={"base_uri": "https://localhost:8080/"} id="7T7Ku58AINBH" outputId="28d98e36-1b68-44ed-d41f-7d7d4e8743ba"
idade + 3
# + colab={"base_uri": "https://localhost:8080/"} id="5Hr6vwIBIRJr" outputId="7c92e65a-f3e0-4a54-8bde-2f95ee95e28a"
idade = idade + 3
idade
# + id="hKrREzlqIb5s"
def mais_um_ano(idade):
print("ta dentro dessa função")
return idade + 1
# + colab={"base_uri": "https://localhost:8080/"} id="WwaNPxAbJUF7" outputId="6bf9d4e0-6d06-4748-a534-21db6c75f603"
mais_um_ano(23)
# + id="AFeNoy6uJgBa"
filme1 = "Toy Story 1"
filme2 = "A Múmia"
filme3 = "Matrix 1"
# + id="m6RUEdvdJ3xM"
filmes = ["Toy Story 1", "A Múmia", "Matrix 1"]
# + colab={"base_uri": "https://localhost:8080/"} id="sVihME70KF9e" outputId="448d8463-cb35-42e3-cd72-d2948d255623"
filmes
# + colab={"base_uri": "https://localhost:8080/"} id="rKt30xgZKHRE" outputId="2153d3c2-7dba-4af1-8f7e-09bbc66bdce3"
filmes = [filme1, filme2, filme3]
filmes
# + id="Sur4ZKZXKRrD"
def imprime_filmes(filmes):
print("A lista de filmes que eu tenho disponivel")
print(filmes)
# + colab={"base_uri": "https://localhost:8080/"} id="TQHPNEL4Kp4Y" outputId="db704c84-8ecd-481b-e491-6a51edbc0fb9"
imprime_filmes(filmes)
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="jagZ7-LqKsNN" outputId="cfda846c-c003-462e-d3e3-ff160ec0b467"
filmes[1]
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="0SdaOpriK7QC" outputId="bd4a182f-0ff6-485f-cd7f-0413c38d6a98"
filmes[-2]
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="3ROsqTE3LUtM" outputId="5c4e03bf-3568-4742-c996-48058315a525"
filmes[-1]
# + id="MKtyuVKtLYBv"
| Aula_de_Python_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: jupyter
# language: python
# name: jupyter
# ---
# +
#pip install pennylane
# +
import pennylane as qml
from pennylane import numpy as np
np.random.seed(0)
import flatnetwork_simple as fns
# -
# # REAL S&P 500 DATASET
from dataloader_class import *
dl = data_loader('data/covid2020.csv')
dl.split_windows(180)
dl.plot_trajectories()
dl.filter_assets(num_assets=2, method='relevant')
dl.plot_trajectories()
mut = dl.return_mut()
sigmat = dl.return_sigmat()
print(mut.shape)
# # DMRG SOLUTION
# +
L = [mut.shape[0],mut.shape[1],2]
d = 2
#Parameters
mu = lambda t,i: mut[t,i]
rho = 5.0
fnet = fns.FlatNetwork(L,d)
fnet.loadParams(mu, rho)
fnet.make_mpos()
S, D = fnet.getHamiltonian()
# +
sweepD = [ 1, 2, 2, 3, 3, 4, 5, 8, 10]
sweepI = [ 10, 10, 10, 10, 10, 10, 10, 10, 10]
sweepN = [1.0e-2,1.0e-3,1.0e-4,1.0e-5,1.0e-6,1.0e-6,1.0e-6,1.0e-6,1.0e-6]
sweepM = [ 5, 5, 5, 5, 5, 5, 5, 5, 5]
cc = 5
e, mps = fnet.run(sweepd = sweepD[:cc], sweepi = sweepI[:cc], sweepn = sweepN[:cc], sweepmin = sweepM[:cc])
# +
fvs = fnet.computeWeights(mps)
fvm = np.zeros([L[1],L[0]])
for pair in fvs:
fvm[pair[1],pair[0]] = fvs[pair]
if(abs(fvs[pair]>1.0e-8)):
print(pair, fvs[pair], mu(pair[0],pair[1]))
# +
import matplotlib.pyplot as plt
ts = [t for t in range(0,L[0])]
for i in range(L[1]):
plt.plot(ts,fvm[i,:],'-x')
plt.xlabel('Time', fontsize=20)
plt.ylabel('Weight', fontsize=20)
# +
for i in range(L[1]):
plt.plot(ts,mut[:,i],'-x')
plt.xlabel('Time', fontsize=20)
plt.ylabel('Relative Profit', fontsize=20)
# -
# # QAOA
# +
#Must convert all parameters to pauli basis
Snew = {}
Inew = {}
for pair in S:
Snew[pair[0]] = -S[pair]/2 if not pair[0] in Snew else Snew[pair[0]] - S[pair]/2
Inew[pair[0]] = S[pair]/2 if not pair[0] in Inew else Inew[pair[0]] + S[pair]/2
for pair in D:
if(pair[0]==pair[1]): #n^2 term
Snew[pair[0]] = -D[pair]/2 if not pair[0] in Snew else Snew[pair[0]] -D[pair]/2
Inew[pair[0]] = D[pair]/4 if not pair[0] in Inew else Inew[pair[0]] + D[pair]/4
else:
Snew[pair[0]] = -D[pair]/2 if not pair[0] in Snew else Snew[pair[0]] -D[pair]/2
Snew[pair[1]] = -D[pair]/2 if not pair[1] in Snew else Snew[pair[1]] -D[pair]/2
#Z*Z is I
S2new = {}
for pair in D:
if(pair[0]==pair[1]): #n^2 term
Inew[pair[0]] = -D[pair]/4 if not pair[0] in Inew else Inew[pair[0]] - D[pair]/4
Dnew = {}
I2new = {}
for pair in D:
if(pair[0]!=pair[1]): #n_a n_b term
Dnew[pair] = D[pair]/4 if not pair in Dnew else Dnew[pair[0]] + D[pair]/4
I2new[pair] = D[pair]/4 if not pair in I2new else I2new[pair[0]] + D[pair]/4
print(S)
print(D)
print('\n')
print(Snew)
print(S2new)
print(Dnew)
print(Inew)
print(I2new)
# +
n_wires = int(np.prod(L))
H = qml.Hamiltonian(
[Snew[wire] for wire in Snew] + [Inew[wire] for wire in Inew] + [Dnew[wire] for wire in Dnew] +
[I2new[wire] for wire in I2new],
[qml.PauliZ(wire) for wire in Snew] + [qml.Identity(wire) for wire in Inew] + [qml.PauliZ(wire[0])@qml.PauliZ(wire[1]) for wire in Dnew]
+ [qml.Identity(wire[0]) @ qml.Identity(wire[1]) for wire in I2new]
)
print(H)
# +
Hm = qml.Hamiltonian(
[1.0 for wire in range(n_wires)],
[qml.PauliX(wire) for wire in range(n_wires)]
)
print(Hm)
# -
from pennylane import qaoa
def qaoa_layer(gamma, alpha):
qaoa.cost_layer(gamma, H)
qaoa.mixer_layer(alpha, Hm)
# +
depth = 2
wires = range(n_wires)
def circuit(params, **kwargs):
for w in wires:
qml.Hadamard(wires=w)
qml.layer(qaoa_layer, depth, params[0], params[1])
# -
dev = qml.device("qulacs.simulator", wires=wires)
cost_function = qml.ExpvalCost(circuit, H, dev)
optimizer = qml.GradientDescentOptimizer()
steps = 200
params = [[0.5,0.5], [0.5,0.5]]
# +
for i in range(steps):
params = optimizer.step(cost_function, params)
if (i+1)%5 == 0:
print('Step {:5d}: {: .7f}'.format(i+1, cost_function(params)))
print("Optimal Parameters")
print(params)
# +
@qml.qnode(dev)
def probability_circuit(gamma, alpha):
circuit([gamma,alpha])
return qml.probs(wires=wires)
probs = probability_circuit(params[0], params[1])
# +
import matplotlib.pyplot as plt
plt.style.use("seaborn")
plt.bar(range(2**len(wires)), probs)
plt.show()
# -
print(np.argmax(probs))
# +
for i in range(steps):
params = optimizer.step(cost_function, params)
if (i+1)%5 == 0:
print('Step {:5d}: {: .7f}'.format(i+1, cost_function(params)))
print("Optimal Parameters")
print(params)
# +
import matplotlib.pyplot as plt
plt.style.use("seaborn")
plt.bar(range(2**len(wires)), probs)
plt.show()
# -
#Most probabable State
sv = bin(np.argmax(probs))[2:].zfill(len(wires))
print(sv,max(probs))
# +
fvs = fnet.returnWeights(sv)
fvm = np.zeros([L[1],L[0]])
print(fvs)
for t in range(L[0]):
for i in range(L[1]):
fvm[i,t] = fvs[t,i]
if(abs(fvs[t,i]>1.0e-8)):
print(t,i, fvs[t,i], mu(t,i))
# +
import matplotlib.pyplot as plt
ts = [t for t in range(0,L[0])]
for i in range(L[1]):
plt.plot(ts,fvm[i,:],'-x')
plt.xlabel('Time', fontsize=20)
plt.ylabel('Weight', fontsize=20)
# +
for i in range(L[1]):
plt.plot(ts,mut[:,i],'-x')
plt.xlabel('Time', fontsize=20)
plt.ylabel('Relative Profit', fontsize=20)
# -
| CosmiQ/xanadu/PennylaneQAOA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="_Y9oaZ6vcNHp" executionInfo={"status": "ok", "timestamp": 1614735797540, "user_tz": 300, "elapsed": 406, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="29fc7688-5e9e-4f68-ccd5-e1553249c588"
import numpy as np
import os
import gzip
import matplotlib.pyplot as plt
import tensorflow as tf
print('Versión de TensorFlow: ' + tf.__version__)
# + id="3zlSyA_TdNyS" executionInfo={"status": "ok", "timestamp": 1614733867210, "user_tz": 300, "elapsed": 421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}}
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte.gz'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte.gz'
% kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8,
offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(len(labels), 784)
return images, labels
# + id="v5659ZKAdNwY" executionInfo={"status": "ok", "timestamp": 1614733869501, "user_tz": 300, "elapsed": 968, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}}
ruta = '/content/drive/MyDrive/DIPLOMADO INTELIGENCIA ARTIFICIAL/TAREAS/2. Entendiendo Google Colab/fashion_mnist_data'
X_train, Y_train = load_mnist(ruta, kind='train')
X_test, Y_test = load_mnist(ruta, kind='test')
# + colab={"base_uri": "https://localhost:8080/"} id="0Pt7OMGNgVL6" executionInfo={"status": "ok", "timestamp": 1614733870338, "user_tz": 300, "elapsed": 421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="b9054d3c-3c38-4d9e-a3e2-414f5a38597d"
X_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="UVPCr7dfhCAB" executionInfo={"status": "ok", "timestamp": 1614733871024, "user_tz": 300, "elapsed": 443, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="07144f98-eefe-4db2-9364-a7cb6382f12a"
Y_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="aM5892y_hGA4" executionInfo={"status": "ok", "timestamp": 1614733872148, "user_tz": 300, "elapsed": 416, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="2809618c-a688-4e5e-c443-44c7f5e332bd"
X_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="IkOZoPd4hIRw" executionInfo={"status": "ok", "timestamp": 1614733873231, "user_tz": 300, "elapsed": 409, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="7f96b689-09c5-4aa4-849d-45a442caa71c"
Y_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="dDz-JiNJhL4S" executionInfo={"status": "ok", "timestamp": 1614733873966, "user_tz": 300, "elapsed": 409, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="8261fd41-58e5-4e3b-9700-d374eae3e6d0"
28*28 #IMAGENES DE 28px x 28px a 1 solo canal (blanco y negro)
# + colab={"base_uri": "https://localhost:8080/"} id="Q5J33T10o5jJ" executionInfo={"status": "ok", "timestamp": 1614734166687, "user_tz": 300, "elapsed": 421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="f52b440c-2f54-4eb0-880f-a87919fb1fad"
59904/128
# + colab={"base_uri": "https://localhost:8080/"} id="g7NPLEduqBKH" executionInfo={"status": "ok", "timestamp": 1614734751335, "user_tz": 300, "elapsed": 412, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="30f49c73-7cd1-48c7-d8fe-d8d31f2f8870"
9984/128
# + [markdown] id="2Kk5ymtVsd24"
# Reshape de los datos
# + id="FnilGF7BqDom" executionInfo={"status": "ok", "timestamp": 1614734729391, "user_tz": 300, "elapsed": 417, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}}
X_train = X_train[0:59904,:]
X_test = X_test[0:9984,:]
Y_train = Y_train[0:59904]
Y_test = Y_test[0:9984]
X_train = np.reshape(X_train,(59904,28,28,1))
X_test = np.reshape(X_test,(9984,28,28,1))
Y_train = np.reshape(Y_train,(59904,1))
Y_test = np.reshape(Y_test,(9984,1))
# + colab={"base_uri": "https://localhost:8080/"} id="bFYsbnf7qvIF" executionInfo={"status": "ok", "timestamp": 1614734364023, "user_tz": 300, "elapsed": 418, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="cdd7e779-8a30-48ba-99d0-7bc34ee20aae"
X_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="6V-LlSfwqxRW" executionInfo={"status": "ok", "timestamp": 1614734732889, "user_tz": 300, "elapsed": 426, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="0fc54ee6-3292-41c0-890f-a705155aada2"
Y_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="J8aSbdxYufGH" executionInfo={"status": "ok", "timestamp": 1614735341775, "user_tz": 300, "elapsed": 417, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="4cff9ac3-7715-4081-b61f-772cde074d56"
np.unique(Y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="Whih40pPtz9O" executionInfo={"status": "ok", "timestamp": 1614735321385, "user_tz": 300, "elapsed": 462, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="477e574c-66aa-41df-fb07-222b4f450cb5"
K = len(np.unique(Y_train))
print("Numero de clases:", K)
# + [markdown] id="l9B_BqF2saKZ"
# Creacion del modelo con Keras
#
# + id="hqywfwTvqzMU" executionInfo={"status": "ok", "timestamp": 1614734849626, "user_tz": 300, "elapsed": 492, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}}
tf.random.set_seed(200)
model = tf.keras.models.Sequential()
# + colab={"base_uri": "https://localhost:8080/"} id="02NC2uBnuwC6" executionInfo={"status": "ok", "timestamp": 1614735429202, "user_tz": 300, "elapsed": 408, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="0e9cdef1-928d-4521-8110-bcad602624fb"
X_train.shape[1:]
# + colab={"base_uri": "https://localhost:8080/"} id="Fti9LKS9snlJ" executionInfo={"status": "ok", "timestamp": 1614736732347, "user_tz": 300, "elapsed": 614, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="2dfab421-b7c8-4926-dbe2-0099df22b39b"
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.BatchNormalization(input_shape=X_train.shape[1:]))
model.add(tf.keras.layers.Conv2D(64, (5,5), padding='same', activation='elu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization(input_shape=X_train.shape[1:]))
model.add(tf.keras.layers.Conv2D(128, (5,5), padding='same', activation='elu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization(input_shape=X_train.shape[1:]))
model.add(tf.keras.layers.Conv2D(256, (5,5), padding='same', activation='elu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.Activation('elu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(K))
model.add(tf.keras.layers.Activation('softmax'))
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Mu_DjJWSykp-" executionInfo={"status": "ok", "timestamp": 1614736735688, "user_tz": 300, "elapsed": 913, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="914ddf07-3112-474b-a5ed-ff59a14d56e2"
folder = '/content/drive/MyDrive/DIPLOMADO INTELIGENCIA ARTIFICIAL/TAREAS/2. Entendiendo Google Colab'
tf.keras.utils.plot_model(model, to_file= folder + 'Model1.png', show_shapes=True)
from IPython.display import Image
Image(retina=True, filename= folder + 'Model1.png')
# + id="8ptKjlCCuhfd" executionInfo={"status": "ok", "timestamp": 1614736749725, "user_tz": 300, "elapsed": 413, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}}
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + [markdown] id="X3dLTWDhx7rf"
# Entrenamiento!!
# + colab={"base_uri": "https://localhost:8080/"} id="fX1xAENYx5VR" executionInfo={"status": "ok", "timestamp": 1614738268596, "user_tz": 300, "elapsed": 1509269, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="c04a2dfa-4411-47d1-a154-6510760611ec"
import timeit
def entrenamiento_cpu():
with tf.device('/cpu:0'):
r = model.fit(X_train,Y_train,validation_data=(X_test,Y_test),batch_size=128,epochs=2,verbose=1)
return None
cpu_time = timeit.timeit('entrenamiento_cpu()', number=1, setup='from __main__ import entrenamiento_cpu')
# + colab={"base_uri": "https://localhost:8080/"} id="LSa08IxPz42r" executionInfo={"status": "ok", "timestamp": 1614738268599, "user_tz": 300, "elapsed": 1484931, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg8pIl0V_AaKknYqxYKaqtY229xAnEwl2aKBp-rAA=s64", "userId": "06929284856658854847"}} outputId="4155b58f-ba1e-44df-f49b-85f05bfd98bd"
print('Tiempo de entrenamiento: ' + str(cpu_time) + ' segundos')
| 2. Entendiendo Google Colab/CPU_FashionMNIST.ipynb |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# This notebook runs some pre-analyses using spectral clustering to explore the best set of parameters to cluster `pca` data version.
# %% [markdown] tags=[]
# # Environment variables
# %% tags=[]
from IPython.display import display
import conf
N_JOBS = conf.GENERAL["N_JOBS"]
display(N_JOBS)
# %% tags=[]
# %env MKL_NUM_THREADS=$N_JOBS
# %env OPEN_BLAS_NUM_THREADS=$N_JOBS
# %env NUMEXPR_NUM_THREADS=$N_JOBS
# %env OMP_NUM_THREADS=$N_JOBS
# %% [markdown] tags=[]
# # Modules loading
# %% tags=[]
# %load_ext autoreload
# %autoreload 2
# %% tags=[]
from pathlib import Path
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from utils import generate_result_set_name
# %% [markdown] tags=[]
# # Settings
# %% tags=[]
INITIAL_RANDOM_STATE = 40000
# %% [markdown] tags=[]
# # PCA
# %% tags=[]
INPUT_SUBSET = "pca"
# %% tags=[]
INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores"
# %% tags=[]
DR_OPTIONS = {
"n_components": 50,
"svd_solver": "full",
"random_state": 0,
}
# %% tags=[]
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
generate_result_set_name(
DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl"
),
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
# %% tags=[]
data = pd.read_pickle(input_filepath)
# %% tags=[]
data.shape
# %% tags=[]
data.head()
# %% [markdown] tags=[]
# # Clustering
# %% tags=[]
from sklearn.cluster import SpectralClustering
from clustering.utils import compute_performance
# %% [markdown] tags=[]
# ## `gamma` parameter
# %% [markdown] tags=[]
# ### Using default value (`gamma=1.0`)
# %% tags=[]
with warnings.catch_warnings():
warnings.filterwarnings("always")
clus = SpectralClustering(
eigen_solver="arpack",
eigen_tol=1e-3,
n_clusters=2,
n_init=1,
affinity="rbf",
gamma=1.00,
random_state=INITIAL_RANDOM_STATE,
)
part = clus.fit_predict(data)
# %% tags=[]
# show number of clusters and their size
_tmp = pd.Series(part).value_counts()
display(_tmp)
assert _tmp.shape[0] == 2
assert _tmp.loc[1] == 1
# %% [markdown] tags=[]
# For default values of `gamma` (`1.0`), the algorithm takes a lot of time to converge (here I used `eigen_tol=1e-03` to force convergence).
# %% [markdown] tags=[]
# ### Using `gamma=5.00`
# %% tags=[]
with warnings.catch_warnings():
warnings.filterwarnings("always")
clus = SpectralClustering(
eigen_solver="arpack",
eigen_tol=1e-3,
n_clusters=2,
n_init=1,
affinity="rbf",
gamma=5.00,
random_state=INITIAL_RANDOM_STATE,
)
part = clus.fit_predict(data)
# %% tags=[]
# show number of clusters and their size
_tmp = pd.Series(part).value_counts()
display(_tmp)
assert _tmp.shape[0] == 1
# %% [markdown] tags=[]
# The algorithm does not work either with `gamma>1.0`.
# %% [markdown] tags=[]
# ### Using `gamma=0.01`
# %% tags=[]
with warnings.catch_warnings():
warnings.filterwarnings("always")
clus = SpectralClustering(
eigen_solver="arpack",
# eigen_tol=1e-3,
n_clusters=2,
n_init=1,
affinity="rbf",
gamma=0.01,
random_state=INITIAL_RANDOM_STATE,
)
part = clus.fit_predict(data)
# %% tags=[]
# show number of clusters and their size
_tmp = pd.Series(part).value_counts()
display(_tmp)
assert _tmp.shape[0] == 2
assert _tmp.loc[1] == 12
# %% tags=[]
# show some clustering performance measures to assess the quality of the partition
_tmp = compute_performance(data, part)
assert 0.50 < _tmp["si"] < 0.58
assert 95.0 < _tmp["ch"] < 97.00
assert 0.50 < _tmp["db"] < 0.55
# %% [markdown] tags=[]
# For values around `gamma=0.01` the algorithm seems to work.
# %% [markdown] tags=[]
# ### Using `gamma=0.001`
# %% tags=[]
with warnings.catch_warnings():
warnings.filterwarnings("always")
clus = SpectralClustering(
eigen_solver="arpack",
# eigen_tol=1e-3,
n_clusters=2,
n_init=1,
affinity="rbf",
gamma=0.001,
random_state=INITIAL_RANDOM_STATE,
)
part = clus.fit_predict(data)
# %% tags=[]
# show number of clusters and their size
_tmp = pd.Series(part).value_counts()
display(_tmp)
assert _tmp.shape[0] == 2
assert _tmp.loc[1] == 112
# %% tags=[]
# show some clustering performance measures to assess the quality of the partition
_tmp = compute_performance(data, part)
assert 0.50 < _tmp["si"] < 0.58
assert 280.0 < _tmp["ch"] < 290.00
assert 1.90 < _tmp["db"] < 2.00
# %% [markdown] tags=[]
# For values around `gamma=0.001`, the algorithm converges faster, although not necessary finds better solutions. This suggests smaller values should be explored for this parameter.
# %% [markdown] tags=[]
# ## Extended test
# %% [markdown] tags=[]
# Here I run some test across several `k` and `gamma` values; then I check how results perform with different clustering quality measures.
# %% tags=[]
CLUSTERING_OPTIONS = {}
CLUSTERING_OPTIONS["K_RANGE"] = [2, 4, 6, 8, 10, 20, 30, 40, 50, 60]
CLUSTERING_OPTIONS["N_REPS_PER_K"] = 5
CLUSTERING_OPTIONS["KMEANS_N_INIT"] = 10
CLUSTERING_OPTIONS["GAMMAS"] = [
1e-02,
1e-03,
# 1e-04,
# 1e-05,
1e-05,
# 1e-06,
# 1e-07,
# 1e-08,
# 1e-09,
1e-10,
# 1e-11,
# 1e-12,
# 1e-13,
# 1e-14,
1e-15,
1e-17,
1e-20,
1e-30,
1e-40,
1e-50,
]
CLUSTERING_OPTIONS["AFFINITY"] = "rbf"
display(CLUSTERING_OPTIONS)
# %% tags=[]
CLUSTERERS = {}
idx = 0
random_state = INITIAL_RANDOM_STATE
for k in CLUSTERING_OPTIONS["K_RANGE"]:
for gamma_value in CLUSTERING_OPTIONS["GAMMAS"]:
for i in range(CLUSTERING_OPTIONS["N_REPS_PER_K"]):
clus = SpectralClustering(
eigen_solver="arpack",
n_clusters=k,
n_init=CLUSTERING_OPTIONS["KMEANS_N_INIT"],
affinity=CLUSTERING_OPTIONS["AFFINITY"],
gamma=gamma_value,
random_state=random_state,
)
method_name = type(clus).__name__
CLUSTERERS[f"{method_name} #{idx}"] = clus
random_state = random_state + 1
idx = idx + 1
# %% tags=[]
display(len(CLUSTERERS))
# %% tags=[]
_iter = iter(CLUSTERERS.items())
display(next(_iter))
display(next(_iter))
# %% tags=[]
clustering_method_name = method_name
display(clustering_method_name)
# %% [markdown] tags=[]
# ## Generate ensemble
# %% tags=[]
import tempfile
from clustering.ensembles.utils import generate_ensemble
# %% tags=[]
ensemble = generate_ensemble(
data,
CLUSTERERS,
attributes=["n_clusters", "gamma"],
)
# %% tags=[]
ensemble.shape
# %% tags=[]
ensemble.head()
# %% tags=[]
ensemble["gamma"] = ensemble["gamma"].apply(lambda x: f"{x:.1e}")
# %% tags=[]
ensemble["n_clusters"].value_counts()
# %% tags=[]
_tmp = ensemble["n_clusters"].value_counts().unique()
assert _tmp.shape[0] == 1
assert _tmp[0] == int(
CLUSTERING_OPTIONS["N_REPS_PER_K"] * len(CLUSTERING_OPTIONS["GAMMAS"])
)
# %% tags=[]
ensemble_stats = ensemble["n_clusters"].describe()
display(ensemble_stats)
# %% [markdown] tags=[]
# ## Testing
# %% tags=[]
assert ensemble_stats["min"] > 1
# %% tags=[]
assert not ensemble["n_clusters"].isna().any()
# %% tags=[]
assert ensemble.shape[0] == len(CLUSTERERS)
# %% tags=[]
# all partitions have the right size
assert np.all(
[part["partition"].shape[0] == data.shape[0] for idx, part in ensemble.iterrows()]
)
# %% tags=[]
# no partition has negative clusters (noisy points)
assert not np.any([(part["partition"] < 0).any() for idx, part in ensemble.iterrows()])
assert not np.any(
[pd.Series(part["partition"]).isna().any() for idx, part in ensemble.iterrows()]
)
# %% tags=[]
# check that the number of clusters in the partitions are the expected ones
_real_k_values = ensemble["partition"].apply(lambda x: np.unique(x).shape[0])
display(_real_k_values)
assert np.all(ensemble["n_clusters"].values == _real_k_values.values)
# %% [markdown] tags=[]
# ## Add clustering quality measures
# %% tags=[]
from sklearn.metrics import (
silhouette_score,
calinski_harabasz_score,
davies_bouldin_score,
)
# %% tags=[]
ensemble = ensemble.assign(
si_score=ensemble["partition"].apply(lambda x: silhouette_score(data, x)),
ch_score=ensemble["partition"].apply(lambda x: calinski_harabasz_score(data, x)),
db_score=ensemble["partition"].apply(lambda x: davies_bouldin_score(data, x)),
)
# %% tags=[]
ensemble.shape
# %% tags=[]
ensemble.head()
# %% [markdown] tags=[]
# # Cluster quality
# %% tags=[]
with pd.option_context("display.max_rows", None, "display.max_columns", None):
_df = ensemble.groupby(["n_clusters", "gamma"]).mean()
display(_df)
# %% tags=[]
with sns.plotting_context("talk", font_scale=0.75), sns.axes_style(
"whitegrid", {"grid.linestyle": "--"}
):
fig = plt.figure(figsize=(14, 6))
ax = sns.pointplot(data=ensemble, x="n_clusters", y="si_score", hue="gamma")
ax.set_ylabel("Silhouette index\n(higher is better)")
ax.set_xlabel("Number of clusters ($k$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.grid(True)
plt.tight_layout()
# %% tags=[]
with sns.plotting_context("talk", font_scale=0.75), sns.axes_style(
"whitegrid", {"grid.linestyle": "--"}
):
fig = plt.figure(figsize=(14, 6))
ax = sns.pointplot(data=ensemble, x="n_clusters", y="ch_score", hue="gamma")
ax.set_ylabel("Calinski-Harabasz index\n(higher is better)")
ax.set_xlabel("Number of clusters ($k$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.grid(True)
plt.tight_layout()
# %% tags=[]
with sns.plotting_context("talk", font_scale=0.75), sns.axes_style(
"whitegrid", {"grid.linestyle": "--"}
):
fig = plt.figure(figsize=(14, 6))
ax = sns.pointplot(data=ensemble, x="n_clusters", y="db_score", hue="gamma")
ax.set_ylabel("Davies-Bouldin index\n(lower is better)")
ax.set_xlabel("Number of clusters ($k$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.grid(True)
plt.tight_layout()
# %% [markdown] tags=[]
# # Stability
# %% [markdown] tags=[]
# ## Group ensemble by n_clusters
# %% tags=[]
parts = ensemble.groupby(["gamma", "n_clusters"]).apply(
lambda x: np.concatenate(x["partition"].apply(lambda x: x.reshape(1, -1)), axis=0)
)
# %% tags=[]
parts.shape
# %% tags=[]
parts.head()
# %% tags=[]
parts.iloc[0].shape
# %% tags=[]
assert np.all(
[
parts.loc[k].shape == (int(CLUSTERING_OPTIONS["N_REPS_PER_K"]), data.shape[0])
for k in parts.index
]
)
# %% [markdown] tags=[]
# ## Compute stability
# %% tags=[]
from sklearn.metrics import adjusted_rand_score as ari
from scipy.spatial.distance import pdist
# %% tags=[]
parts_ari = pd.Series(
{k: pdist(parts.loc[k], metric=ari) for k in parts.index}, name="n_clusters"
)
# %% tags=[]
parts_ari_stability = parts_ari.apply(lambda x: x.mean())
display(parts_ari_stability.sort_values(ascending=False).head(15))
# %% tags=[]
parts_ari_df = pd.DataFrame.from_records(parts_ari.tolist()).set_index(
parts_ari.index.copy()
)
parts_ari_df.index.rename(["gamma", "n_clusters"], inplace=True)
# %% tags=[]
parts_ari_df.shape
# %% tags=[]
_n_total_parts = int(
CLUSTERING_OPTIONS["N_REPS_PER_K"]
) # * len(CLUSTERING_OPTIONS["GAMMAS"]))
assert int(_n_total_parts * (_n_total_parts - 1) / 2) == parts_ari_df.shape[1]
# %% tags=[]
parts_ari_df.head()
# %% [markdown] tags=[]
# ## Stability plot
# %% tags=[]
parts_ari_df_plot = (
parts_ari_df.stack().reset_index().rename(columns={"level_2": "idx", 0: "ari"})
)
# %% tags=[]
parts_ari_df_plot.dtypes
# %% tags=[]
parts_ari_df_plot.head()
# %% tags=[]
with pd.option_context("display.max_rows", None, "display.max_columns", None):
_df = parts_ari_df_plot.groupby(["n_clusters", "gamma"]).mean()
display(_df)
# %% tags=[]
with sns.plotting_context("talk", font_scale=0.75), sns.axes_style(
"whitegrid", {"grid.linestyle": "--"}
):
fig = plt.figure(figsize=(14, 6))
ax = sns.pointplot(data=parts_ari_df_plot, x="n_clusters", y="ari", hue="gamma")
ax.set_ylabel("Averange ARI")
ax.set_xlabel("Number of clusters ($k$)")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.grid(True)
plt.tight_layout()
# %% [markdown] tags=[]
# # Conclusions
# %% [markdown] tags=[]
# We choose `1.0e-03` as the `gamma` parameter for this data version.
# %% tags=[]
| nbs/12_cluster_analysis/pre_analysis/05_02-spectral_clustering-pca.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Problem 80
# ==========
#
# It is well known that if the square root of a natural number is not an
# integer, then it is irrational. The decimal expansion of such square roots
# is infinite without any repeating pattern at all.
#
# The square root of two is 1.41421356237309504880..., and the digital sum
# of the first one hundred decimal digits is 475.
#
# For the first one hundred natural numbers, find the total of the digital
# sums of the first one hundred decimal digits for all the irrational square
# roots.
#
# Successfully created the directory /Users/cdr/Dropbox/Projects/projecteuler/080
# +
from decimal import *
getcontext().prec = 102
def solve():
summ = 0
for num in range(100):
dec = Decimal(num).sqrt().as_tuple().digits[:100]
if len(dec) == 100:
summ += sum(dec)
return summ
# %time solve()
| 080/p080.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # MIRI LRS Optimal Spectral Extraction
# -
# **Use case:** Extract spectra with different locations, extraction apertures, and techniques.<br>
# **Data:** Simulated MIRI LRS spectrum.<br>
# **Tools:** jwst, gwcs, matplotlib, astropy.<br>
# **Cross-intrument:** NIRSpec, MIRI.<br>
# **Documentation:** This notebook is part of a STScI's larger [post-pipeline Data Analysis Tools Ecosystem](https://jwst-docs.stsci.edu/jwst-post-pipeline-data-analysis).<br>
#
# ## Introduction
#
# This notebook extracts a 1D spectra from a 2D MIRI LRS spectral observation (single image). The goal is to provide the ability to extract spectra with different locations, extraction apertures, and techniques than are done in the JWST pipeline.
#
# The simpliest spectral extraction is "boxcar" where all the pixels within some fixed width centered on the source position are summed at each wavelength. Background subtraction can be done using regions offset from the source center.
#
# For spectra taken with a diffraction limited telescope like JWST, a modification boxcar extraction is to vary the extraction width linearly with wavelength. Such a scaled boxcar extraction keeps the fraction of the source flux within the extraction region approximately constant with wavelength.
#
# For point sources, a PSF-weighted spectral extraction can be done. Using the PSF to weight the extraction uses the actual PSF as a function of wavelength to optimize the extraction to the pixels with the greatest signal. PSF-weighted extractions show the largest differences with boxcar extractions at lower S/N values.
# **Note:** Corrections for the finite aperture used in all the extractions have not been applied. Thus, the physical flux densities of all the extracted spectra are lower than the actual values.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Imports
#
# - *matplotlib.pyplot* for plotting data
# - *numpy* to handle array functions
# - *astropy.io fits* for accessing FITS files
# - *astropy.visualization* for scaling image for display
# - *astropy.table Table* for reading the pipeline 1d extractions
# - *jwst datamodels* for reading/access the jwst data
# + slideshow={"slide_type": "fragment"}
import matplotlib.pyplot as plt
import matplotlib as mpl
# %matplotlib inline
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.visualization import simple_norm
from jwst import datamodels
# +
# useful function that work for boxcar, boxcar scaled with wavelength,
# and psf-weighted extractions
import numpy as np
from gwcs.wcstools import grid_from_bounding_box
def get_boxcar_weights(center, hwidth, npix):
"""
Compute the weights given an aperture center, half widths, and number of pixels
"""
weights = np.zeros((npix))
# pixels with full weight
fullpixels = [max(0, int(center - hwidth + 1)), min(int(center + hwidth), npix)]
weights[fullpixels[0] : fullpixels[1]] = 1.0
# pixels at the edges of the boxcar with partial weight
if fullpixels[0] > 0:
weights[fullpixels[0] - 1] = hwidth - (center - fullpixels[0])
if fullpixels[1] < npix:
weights[fullpixels[1]] = hwidth - (fullpixels[1] - center)
return weights
def ap_weight_images(
center, width, bkg_offset, bkg_width, image_size, waves, wavescale=None
):
"""
Create a weight image that defines the desired extraction aperture
and the weight image for the requested background regions
Parameters
----------
center : float
center of aperture in pixels
width : float
width of apeture in pixels
bkg_offset : float
offset from the extaction edge for the background
never scaled for wavelength
bkg_width : float
width of background region
never scaled with wavelength
image_size : tuple with 2 elements
size of image
waves : array
wavelegth values
wavescale : float
scale the width with wavelength (default=None)
wavescale gives the reference wavelenth for the width value
Returns
-------
wimage, bkg_wimage : (2D image, 2D image)
wimage is the weight image defining the aperature
bkg_image is the weight image defining the background regions
"""
wimage = np.zeros(image_size)
bkg_wimage = np.zeros(image_size)
hwidth = 0.5 * width
# loop in dispersion direction and compute weights
for i in range(image_size[1]):
if wavescale is not None:
hwidth = 0.5 * width * (waves[i] / wavescale)
wimage[:, i] = get_boxcar_weights(center, hwidth, image_size[0])
# bkg regions
if (bkg_width is not None) & (bkg_offset is not None):
bkg_wimage[:, i] = get_boxcar_weights(
center - hwidth - bkg_offset, bkg_width, image_size[0]
)
bkg_wimage[:, i] += get_boxcar_weights(
center + hwidth + bkg_offset, bkg_width, image_size[0]
)
else:
bkg_wimage = None
return (wimage, bkg_wimage)
def extract_1dspec(jdatamodel, center, width, bkg_offset, bkg_width, wavescale=None):
"""
Extract the 1D spectrum using the boxcar method.
Does a background subtraction as part of the extraction.
Parameters
----------
jdatamodel : jwst.DataModel
jwst datamodel with the 2d spectral image
center : float
center of aperture in pixels
width : float
width of apeture in pixels
bkg_offset : float
offset from the extaction edge for the background
never scaled for wavelength
bkg_width : float
width of background region
never scaled with wavelength
wavescale : float
scale the width with wavelength (default=None)
wavescale gives the reference wavelenth for the width value
Returns
-------
waves, ext1d : (ndarray, ndarray)
2D `float` array with wavelengths
1D `float` array with extracted 1d spectrum in Jy
"""
# should be determined from the gWCS in cal.fits
image = np.transpose(jdatamodel.data)
grid = grid_from_bounding_box(jdatamodel.meta.wcs.bounding_box)
ra, dec, lam = jdatamodel.meta.wcs(*grid)
lam_image = np.transpose(lam)
# compute a "rough" wavelength scale to allow for aperture to scale with wavelength
rough_waves = np.average(lam_image, axis=0)
# images to use for extraction
wimage, bkg_wimage = ap_weight_images(
center,
width,
bkg_width,
bkg_offset,
image.shape,
rough_waves,
wavescale=wavescale,
)
# extract the spectrum using the weight image
if bkg_wimage is not None:
ext1d_boxcar_bkg = np.average(image, weights=bkg_wimage, axis=0)
data_bkgsub = image - np.tile(ext1d_boxcar_bkg, (image.shape[0], 1))
else:
data_bkgsub = image
ext1d = np.sum(data_bkgsub * wimage, axis=0)
# convert from MJy/sr to Jy
ext1d *= 1e6 * jdatamodel.meta.photometry.pixelarea_steradians
# compute the average wavelength for each column using the weight image
# this should correspond directly with the extracted spectrum
# wavelengths account for any tiled spectra this way
waves = np.average(lam_image, weights=wimage, axis=0)
return (waves, ext1d, data_bkgsub)
# -
# ### Devloper notes
#
# The difference between the pipeline (x1d) and the extractions done in this notebook are quite large. Help in understanding the origin of these differences is needed.
#
# Not clear how to use the JWST pipeline `extract_1d` (quite complex) code.
# Help to determine how to use the JWST pipeline code instead of the custom code for boxcar is needed.
#
# Applying aperture corrections for the finite extraction widths is needed. Help in how to get the needed informatinom for different (user set) extraction widths is needed.
# ## Download Files
# +
from astropy.utils.data import download_file
calfilename = "det_image_seq5_MIRIMAGE_P750Lexp1_cal.fits"
s2dfilename = "det_image_seq5_MIRIMAGE_P750Lexp1_s2d.fits"
x1dfilename = "det_image_seq5_MIRIMAGE_P750Lexp1_x1d.fits"
spatialprofilefilename = "det_image_seq1_MIRIMAGE_P750Lexp1_s2d.fits"
mainurl = "https://data.science.stsci.edu/redirect/JWST/jwst-data_analysis_tools/MIRI_LRS_notebook/"
calfile_dld = download_file(mainurl + calfilename)
s2dfile_dld = download_file(mainurl + s2dfilename)
x1dfile_dld = download_file(mainurl + x1dfilename)
spatialprofilefile_dld = download_file(mainurl + spatialprofilefilename)
# -
# rename files so that they have the right extensions
# required for the jwst datamodels to work
import os
calfile = calfile_dld + '_cal.fits'
os.rename(calfile_dld, calfile)
s2dfile = s2dfile_dld + '_s2d.fits'
os.rename(s2dfile_dld, s2dfile)
x1dfile = x1dfile_dld + '_x1d.fits'
os.rename(x1dfile_dld, x1dfile)
spatialprofilefile = spatialprofilefile_dld + '_s2d.fits'
os.rename(spatialprofilefile_dld, spatialprofilefile)
# ## File information
#
# The data used is a simulation of a LRS slit observation for a blackbody with a similar flux density to the star BD+60d1753, a flux calibration star. This simulation was created with MIRISim.
# The simulated exposure was reduced using the JWST pipeline (v0.16.1) through the Detector1 and Spec2 stages.
#
# The cal file is one of the Spec2 products and is the calibration full frame image. It contains:
#
# 1. (Primary): This HDU contains meta-data related to the observation and data reduction.
# 2. (SCI): The calibrated image. Units are MJy/sr.
# 3. (ERR): Uncertainty image. Units are MJy/sr.
# 4. (DQ): Data quality image.
# 5. (VAR_POISSON): Unc. component 1: Poisson uncertainty image. Units are (MJy/sr)^2.
# 6. (VAR_RNOISE): Unc. component 2: Read Noise uncertainty image. Units are (MJy/sr)^2.
# 7. (VAR_FLAT): Unc. component 3: Flat Field uncertainty image. Units are (MJy/sr)^2.
# 8. (ASDF_METADATA): Metadata.
#
# The s2d file is one of the Spec2 products and containes the calibrated rectified cutout of the LRS Slit region. It has:
#
# 1. (Primary): This HDU contains meta-data related to the observation and data reduction.
# 2. (WGT): Weight.
# 3. (CON): ??
# 4. (ASDF_METADATA): Metadata.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Loading data
# -
# use a jwst datamodel to provide a good interface to the data and wcs info
cal = datamodels.open(calfile)
s2d = datamodels.open(s2dfile)
# Basic information about the image.
print("cal image")
print(cal.data.shape)
print(np.mean(cal.data))
print(np.amin(cal.data), np.amax(cal.data))
print("s2d image")
print(s2d.data.shape)
print(np.mean(s2d.data))
print(np.amin(s2d.data), np.amax(s2d.data))
# Display the full 2D image
norm_data = simple_norm(cal.data, 'sqrt')
plt.figure(figsize=(6, 6))
plt.imshow(cal.data, norm=norm_data, origin="lower")
plt.title("The full image from the MIRI IMAGER detector")
# Display the LRS Slit region only (use s2d)
# transpose to make it display better
data_lrs_reg = np.transpose(s2d.data)
norm_data = simple_norm(data_lrs_reg, "sqrt")
plt.figure(figsize=(10, 3))
plt.imshow(data_lrs_reg, norm=norm_data, origin="lower")
plt.title("The LRS region")
# JWST pipeline 1D extraction
# for reference read in the JWST pipeline extracted spectrum
jpipe_x1d = Table.read(x1dfile, hdu=1)
print(jpipe_x1d.columns)
# plot
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(jpipe_x1d['WAVELENGTH'], jpipe_x1d['FLUX'], 'k-', label="jpipe_x1d")
ax.set_title("JWST Pipeline x1d extracted spectrum")
ax.set_xlabel("wavelength")
ax.set_ylabel("Flux Density [Jy]")
ax.set_yscale("log")
# ## Boxcar Extraction
#
# Extract a 1D spectrum using a simple boxcar. Basically collapse the spectrum in the cross-dispersion direction over a specified number of pixels.
#
# Limitation: currently it is assumed there are no bad pixels.
# ### Fixed width boxcar
# Define extraction parameters
ext_center = 30
ext_width = 8
bkg_offset = 4
bkg_width = 2
# Plot cross-disperion cut showing the extraction parameters
fig, ax = plt.subplots(figsize=(6, 6))
y = np.arange(data_lrs_reg.shape[0])
ax.plot(y, data_lrs_reg[:,300], 'k-')
mm = np.array([ext_center, ext_center])
mm_y = ax.get_ylim()
ax.plot(mm, mm_y, 'b--')
ax.plot(mm - ext_width/2., mm_y, 'g:')
ax.plot(mm + ext_width/2., mm_y, 'g:')
ax.set_title("Cross-dispersion Cut at Pixel=300")
# Do the extraction
# +
# visualize the weight images used in the fixed boxcar extraction
wimage_fixedboxcar, wimage_fb_bkg = ap_weight_images(ext_center, ext_width, bkg_offset,
bkg_width, data_lrs_reg.shape, None)
norm_data = simple_norm(wimage_fixedboxcar)
plt.figure(figsize=(10, 3))
plt.imshow(wimage_fixedboxcar, norm=norm_data, origin="lower")
plt.title("Fixed boxcar weight image")
norm_data = simple_norm(wimage_fb_bkg)
plt.figure(figsize=(10, 3))
plt.imshow(wimage_fb_bkg, norm=norm_data, origin="lower")
plt.title("Fixed boxcar backgound weight image")
# +
# extract the spectrum using the weight images
# without background subtraction
waves_boxcar, ext1d_boxcar, tmpval = extract_1dspec(s2d, ext_center, ext_width, None, None)
# with background subtraction
waves_boxcar_bkgsub, ext1d_boxcar_bkgsub, tmpval = extract_1dspec(s2d, ext_center, ext_width,
bkg_offset, bkg_width)
# plot
fig, ax = plt.subplots(figsize=(6, 6))
gpts = ext1d_boxcar_bkgsub > 0.
ax.plot(waves_boxcar[gpts], ext1d_boxcar[gpts], 'k-', label="boxcar")
ax.plot(waves_boxcar_bkgsub[gpts], ext1d_boxcar_bkgsub[gpts], 'k:', label="boxcar (bkgsub)")
ax.plot(jpipe_x1d['WAVELENGTH'], jpipe_x1d['FLUX'], 'k-', label="jpipe_x1d")
ax.set_title("Fixed boxcar 1D extracted spectrum")
ax.set_xlabel(r"wavelength [$\mu$m]")
ax.set_ylabel("Flux Density [Jy]")
ax.set_yscale("log")
ax.legend()
# -
# ### Wavelength scaled width boxcar
#
# The LRS spatial profile changes as a function of wavelength as JWST is diffraction limited at these wavelengths. Nominally this means that the FWHM is changing linearly with wavelength. Scaling the width of the extraction aperture with wavelength accounts for the changing diffraction limit with wavelength to first order.
# +
# visualize the weight images used in the scaled boxcar extraction
wimage_scaledboxcar, wimage_sb_bkg = ap_weight_images(ext_center, ext_width, bkg_offset, bkg_width,
data_lrs_reg.shape, waves_boxcar, wavescale=10.0)
norm_data = simple_norm(wimage_scaledboxcar)
plt.figure(figsize=(10, 3))
plt.imshow(wimage_scaledboxcar, norm=norm_data, origin="lower")
plt.title("Scaled boxcar weight image")
norm_data = simple_norm(wimage_sb_bkg)
plt.figure(figsize=(10, 3))
plt.imshow(wimage_sb_bkg, norm=norm_data, origin="lower")
plt.title("Scaled boxcar backgound weight image")
# +
# extract the spectrum using the weight image
# with background subtraction
waves_sboxcar_bkgsub, ext1d_sboxcar_bkgsub, sboxcar_bkgsub_image = extract_1dspec(s2d, ext_center,
ext_width, bkg_offset,
bkg_width, wavescale=10)
# plot
fig, ax = plt.subplots(figsize=(6, 6))
gpts = ext1d_boxcar_bkgsub > 0.
ax.plot(waves_boxcar_bkgsub[gpts], ext1d_boxcar_bkgsub[gpts], 'k:', label="fixed boxcar (bkgsub)")
gpts = ext1d_sboxcar_bkgsub > 0.
ax.plot(waves_sboxcar_bkgsub[gpts], ext1d_sboxcar_bkgsub[gpts], 'k-', label="scaled boxcar (bkgsub)")
ax.set_title("Scaled boxcar 1D extracted spectrum")
ax.set_xlabel("wavelength [$\mu$m]")
ax.set_ylabel("Flux Density [Jy]")
ax.set_yscale("log")
ax.set_ylim(1e-3, 1e-1)
ax.legend()
# -
# Note that the impact of the scaled boxcar is largest at shorter wavelengths. This is the result of using the same aperature at 10 microns for both the boxcar and scaled boxcar.
#
# ## PSF based Extraction
#
# While to first order the PSF FHWM changes linearly with wavelength, this is an approximation. It is better to use the measured spatial profile as a function of wavelength to extract the spectrum. This tracks the actual variation with wavelength and optimizes the extraction to the higher S/N measurements. In general, PSF based extractions show the most improvements over boxcar extractions at lower the S/N.
#
# There are two PSF based extraction methods.
#
# 1. PSF weighted: the spatial profile at each wavelength is used to weight the extraction.
# 2. PSF fitting: the spatial profile is fit at each wavelength with the scale parameter versus wavelength giving the spectrum.
#
# Only the PSF weighted technique is currently part of this notebook.
#
# Note 1: calibration reference file for the specific LRS slit position should be used.
#
# Note 2: Small shifts in the centering of the source in the slit should be investigated to see if they impact the PSF based extractions.
#
# Limitation: currently it is assumed there are no bad pixels.
# ### PSF weighted extaction
# #### Generate the PSF profile as a function of wavelength
# For MIRI LRS slit observations, observations are made at two nod position in the slit after target acquisition. This means that the location of the sources in the slit is very well known. Hence, spatial profile (PSF) as a function of wavelength for the two nod positions is straightforward to measure using observations of a bright source.
#
# The next few steps generate the needed information for the nod position for which we are extracting spectra based on a simulation of a bright source at the same nod position.
# lrs spatial profile (PSF) as a function of wavelength
# currently, this is just a "high" S/N observation of a flat spectrum source at the same slit position
psf = datamodels.open(spatialprofilefile)
# transpose to make it display better
lrspsf = np.transpose(psf.data)
norm_data = simple_norm(lrspsf, "sqrt")
plt.figure(figsize=(10, 3))
plt.imshow(lrspsf, norm=norm_data, origin="lower")
plt.title("The LRS Spatial Profile (PSF) Observation")
# +
# Mock a LRS spectral profile reference file
# Sum along the spatial direction and normalize to 1
# assume there is no background (none was included in the MIRISim for the flat spectrum source observation)
# ignore regions far from the source using a scaled boxcar weight image
# the aperture (psf_width) used in the scaled boxcar weight image could be varied
psf_width = 12.0
(wimage_scaledboxcar, tmpvar) = ap_weight_images(ext_center, psf_width, bkg_offset, bkg_width, data_lrs_reg.shape, waves_boxcar, wavescale=10.0)
psf_weightimage = lrspsf*wimage_scaledboxcar
# generate a 2D image of the column sums for division
max_psf = np.max(psf_weightimage, axis=0)
div_image = np.tile(max_psf, (psf_weightimage.shape[0], 1))
div_image[div_image == 0.0] = 1.0 # avoid divide by zero issues
# normalize
psf_weightimage /= div_image
# display
norm_data = simple_norm(psf_weightimage, "sqrt")
plt.figure(figsize=(10, 3))
plt.imshow(psf_weightimage, norm=norm_data, origin="lower")
plt.title("The LRS Spatial Profile Reference Image (Normalized)")
# -
fig, ax = plt.subplots(figsize=(6, 6))
y = np.arange(psf_weightimage.shape[0])
ax.plot(y, psf_weightimage[:,150], label="pixel=150")
ax.plot(y, psf_weightimage[:,225], label="pixel=225")
ax.plot(y, psf_weightimage[:,300], label="pixel=300")
ax.plot(y, psf_weightimage[:,370], label="pixel=370")
ax.set_title("Cross-dispersion Cuts")
ax.set_xlim(ext_center-psf_width, ext_center+psf_width)
ax.legend()
# Note that the spatial profile becomes narrower as the pixel values increases as this corresponds to the wavelength decreasing.
# #### Extract spectrum using wavelength dependent PSF profiles
# +
# use the normalized PSF weight image to extract the specrum
# use the background subtracted image from the scaled boxcar extraction
ext1d_psfweight = np.sum(sboxcar_bkgsub_image * psf_weightimage, axis=0)
ext1d_psfweight *= 1e6 * s2d.meta.photometry.pixelarea_steradians
# plot
fig, ax = plt.subplots(figsize=(6, 6))
gpts = ext1d_psfweight > 0.
ax.plot(waves_boxcar_bkgsub[gpts], ext1d_psfweight[gpts], 'k-', label="psf weighted (bkgsub)")
gpts = ext1d_sboxcar_bkgsub > 0.
ax.plot(waves_sboxcar_bkgsub[gpts], ext1d_sboxcar_bkgsub[gpts], 'k:', label="scaled boxcar (bkgsub)")
ax.set_title("PSF weigthed extracted spectrum")
ax.set_xlabel("wavelength [$\mu$m]")
ax.set_ylabel("Flux Density [Jy]")
ax.set_yscale("log")
ax.set_ylim(1e-3, 1e-1)
ax.legend()
# -
# Note that the psf weighted extraction has visabily higher S/N, especially at the longer wavelengths where the S/N is lowest overall.
# #### Plotting in Rayleigh-Jeans units
#
# For sources that have stellar continuum, it can be useful to plot MIR spectra in Rayleigh-Jeans units. This just means removing the spectral shape expected for a blackbody with a peak at much shorter wavelengths than the MIR. This is easily done by multiplying the spectrum by lambda^4 or nu^2.
#
# An example of this is given below.
# Rayleigh-Jeans plot
fig, ax = plt.subplots(figsize=(6, 6))
gpts = ext1d_psfweight > 0.
ax.plot(waves_boxcar_bkgsub[gpts], (waves_boxcar_bkgsub[gpts]**4)*ext1d_psfweight[gpts], 'k-', label="psf weighted (bkgsub)")
gpts = ext1d_sboxcar_bkgsub > 0.
ax.plot(waves_sboxcar_bkgsub[gpts], (waves_sboxcar_bkgsub[gpts]**4)*ext1d_sboxcar_bkgsub[gpts], 'k:', label="scaled boxcar (bkgsub)")
gpts = ext1d_boxcar_bkgsub > 0.
ax.plot(waves_boxcar_bkgsub[gpts], (waves_boxcar_bkgsub[gpts]**4)*ext1d_boxcar_bkgsub[gpts], 'k--', label="fixed boxcar (bkgsub)")
ax.set_title("Rayleigh-Jeans plot for all extractions")
ax.set_xlabel("wavelength [$\mu$m]")
ax.set_ylabel("Rayleigh-Jeans Flux Density [$\mu$m$^4$ Jy]")
ax.set_yscale("log")
ax.set_ylim(10, 100)
ax.legend()
# ## Additional Resources
#
# - [MIRI LRS](https://jwst-docs.stsci.edu/mid-infrared-instrument/miri-observing-modes/miri-low-resolution-spectroscopy)
# - [MIRISim](http://www.stsci.edu/jwst/science-planning/proposal-planning-toolbox/mirisim)
# - [JWST pipeline](https://jwst-docs.stsci.edu/jwst-data-reduction-pipeline)
# - PSF weighted extraction [Horne 1986, PASP, 98, 609](https://ui.adsabs.harvard.edu/abs/1986PASP...98..609H/abstract).
# + [markdown] slideshow={"slide_type": "slide"}
# ## About this notebook
#
# **Author:** <NAME>, JWST
# **Updated On:** 2020-07-07
# -
# ***
# [Top of Page](#top)
# <img style="float: right;" src="https://raw.githubusercontent.com/spacetelescope/notebooks/master/assets/stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="Space Telescope Logo" width="200px"/>
| _notebooks/MIRI_LRS_spectral_extraction/miri_lrs_spectral_extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # Introduction
# In this program, we look at creating Caesar encryption for letters and numbers. A Caesar shift of 3 is used in this program. What this means is, for example, if we provide character "C", the encrypter character would be "A". Similarly, for "F", the encrypted character would be "C". More information about this simple encryption technique is [provided in this link](https://en.wikipedia.org/wiki/Caesar_cipher)
import string
from sys import argv, exit
# Here, we import the module `string`. The `string` module has a number of constants and classes which can be used to obtain the required ASCII characters. In this case, we require lower and upper case English characters to compare the user provided string and subsequently get the index of the alphabets. For example, the index of "A" woulld be `0`, likewise, "B" and "C" would be `1` and `2` respectively.
baseAlphabetsLower = string.ascii_lowercase
baseAlphabetsUpper = string.ascii_uppercase
baseNumbers = range(0, 10)
finalString = []
# We store the upper case and lower case string constants in the two variables, `baseAlphabetsLower` and `baseAlphabetsUpper` respectively. For numbers, we use the `range` function to get a list from `0` to `9`.
userString = raw_input("Enter your alphanumeric characters: ")
# The above line is self-explanatory where in the user input is stored in `userString` variable
for char in userString:
if char == " ":
finalString.append(char)
else:
if char in baseAlphabetsLower:
charIndexLower = baseAlphabetsLower.index(char)
finalString.append(baseAlphabetsLower[charIndexLower - 3])
elif char in baseAlphabetsUpper:
charIndexUpper = baseAlphabetsUpper.index(char)
finalString.append(baseAlphabetsUpper[charIndexUpper - 3])
else:
numIndex = baseNumbers.index(int(char))
finalString.append(str(baseNumbers[numIndex - 3]))
# The lines shown above is where the left shift of 3 happens to every character and number the user provides. We first iterate through the characters and based on upper/lower case or number, `if` `else` statements are used to divert these apply the Caesar shift of 3 towards the left. The shifted characters are then appended to a `finalString` variable.
encryptedString = "".join(finalString)
print "Your encrypted characters: ", encryptedString
# Finally, we join the `finalString` list to get the encrypted characters. Note this works for with and without space in the string provided by the user. We can verify the result from the [wiki link here](https://en.wikipedia.org/wiki/Caesar_cipher)
| Python-Caesar-encryption.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/piyushjain220/TSAI/blob/main/NLP/Session10/Session10Ass_Model1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="wNyqu7O32Ndx"
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import spacy
import numpy as np
import random
import math
import time
# + colab={"base_uri": "https://localhost:8080/"} id="qYPKey7r2Nd8" outputId="194d273d-2b29-4263-fe2d-58a7d100c1de"
# !python -m spacy download en #English - en spacy.load('en')
# !python -m spacy download de # German - de spacy.load('de')
#French - fr
# + id="e4nY5oam2Nd9"
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# + id="DTgvmtty2Nd-"
# After linking spacy.load('de_core_news_sm') -> spacy.load('de')
spacy_de = spacy.load('de')
# After linking spacy.load('en_core_web_sm') -> spacy.load('en')
spacy_en = spacy.load('en')
# + id="aToD4o-R2Nd-"
def tokenize_de(text):
"""
Tokenizes German text from a string into a list of strings (tokens) and reverses it
"""
return [tok.text for tok in spacy_de.tokenizer(text)]
def tokenize_en(text):
"""
Tokenizes English text from a string into a list of strings (tokens)
"""
return [tok.text for tok in spacy_en.tokenizer(text)]
# + id="FQMWyyXS2Nd_"
SRC = Field(tokenize = tokenize_en, init_token = '<SOS>', eos_token = '<EOS>', lower = True)
TRG = Field(tokenize = tokenize_de, init_token = '<SOS>', eos_token = '<EOS>', lower = True)
# + colab={"base_uri": "https://localhost:8080/"} id="PCtbffK02Nd_" outputId="5d94e50f-5258-4af4-84f5-65b85f02353f"
SRC.__dict__
# + colab={"base_uri": "https://localhost:8080/"} id="gxkmLjOW2NeA" outputId="f381de49-c7d9-48b1-d961-1ff12acd329c"
train_data , valid_data , test_data = Multi30k.splits(exts=('.en','.de'), fields=(SRC,TRG))
# + colab={"base_uri": "https://localhost:8080/"} id="HbMvI_pK2NeA" outputId="a7ef91b8-98ec-4bbf-8ed9-6136f984d287"
print(f"Number of training examples: {len(train_data.examples)}")
print(f"Number of validation examples: {len(valid_data.examples)}")
print(f"Number of testing examples: {len(test_data.examples)}")
# + colab={"base_uri": "https://localhost:8080/"} id="yxWcTFnT2NeA" outputId="9afc8b0e-b2a7-47f9-af53-18e49f4faaf0"
print(vars(train_data.examples[0]))
# + id="KqT_iYlG2NeB"
SRC.build_vocab(train_data,min_freq=2)
TRG.build_vocab(train_data,min_freq=2)
# + colab={"base_uri": "https://localhost:8080/"} id="5ArEJOI42NeB" outputId="54b2a758-042c-49e9-87d5-fd46dd2fa66c"
print(f"Unique tokens in source (de) vocabulary: {len(SRC.vocab)}")
print(f"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}")
# + colab={"base_uri": "https://localhost:8080/"} id="xH-CGeSN2NeB" outputId="b13eac4d-6fc5-4b3c-dd75-e0dacb748ca4"
# SRC.vocab dict has the following keys - freqs(provides word and its frequencies), itos(mapping of integer to string),
# stoi(mapping of string to its integer) and vectors()
print(SRC.vocab.__dict__.keys())
SRC.vocab.__dict__
# + id="A5jjb5qo2NeB"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# + id="lYmTURfQ2NeC"
BATCH_SIZE = 128
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
# + id="kd57z6DJ2NeC"
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
#src = [src len, batch size]
embedded = self.dropout(self.embedding(src))
#embedded = [src len, batch size, emb dim]
outputs, hidden = self.rnn(embedded)
#outputs = [src len, batch size, hid dim * num directions]
#hidden = [n layers * num directions, batch size, hid dim]
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
#outputs = [src len, batch size, enc hid dim * 2]
#hidden = [batch size, dec hid dim]
return outputs, hidden
# + id="WPK-h0SB2NeC"
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)
self.v = nn.Linear(dec_hid_dim, 1, bias = False)
def forward(self, hidden, encoder_outputs):
#hidden = [batch size, dec hid dim]
#encoder_outputs = [src len, batch size, enc hid dim * 2]
batch_size = encoder_outputs.shape[1]
src_len = encoder_outputs.shape[0]
#repeat decoder hidden state src_len times
hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
#hidden = [batch size, src len, dec hid dim]
#encoder_outputs = [batch size, src len, enc hid dim * 2]
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim = 2)))
#energy = [batch size, src len, dec hid dim]
attention = self.v(energy).squeeze(2)
#attention= [batch size, src len]
return F.softmax(attention, dim=1)
# + id="JlH1F2G02NeD"
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super().__init__()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs):
#input = [batch size]
#hidden = [batch size, dec hid dim]
#encoder_outputs = [src len, batch size, enc hid dim * 2]
input = input.unsqueeze(0)
#input = [1, batch size]
embedded = self.dropout(self.embedding(input))
#embedded = [1, batch size, emb dim]
a = self.attention(hidden, encoder_outputs)
#a = [batch size, src len]
a = a.unsqueeze(1)
#a = [batch size, 1, src len]
encoder_outputs = encoder_outputs.permute(1, 0, 2)
#encoder_outputs = [batch size, src len, enc hid dim * 2]
weighted = torch.bmm(a, encoder_outputs)
#weighted = [batch size, 1, enc hid dim * 2]
weighted = weighted.permute(1, 0, 2)
#weighted = [1, batch size, enc hid dim * 2]
rnn_input = torch.cat((embedded, weighted), dim = 2)
#rnn_input = [1, batch size, (enc hid dim * 2) + emb dim]
output, hidden = self.rnn(rnn_input, hidden.unsqueeze(0))
#output = [seq len, batch size, dec hid dim * n directions]
#hidden = [n layers * n directions, batch size, dec hid dim]
#seq len, n layers and n directions will always be 1 in this decoder, therefore:
#output = [1, batch size, dec hid dim]
#hidden = [1, batch size, dec hid dim]
#this also means that output == hidden
assert (output == hidden).all()
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim = 1))
#prediction = [batch size, output dim]
return prediction, hidden.squeeze(0), a.squeeze(1)
# + id="38YHm-ge2NeD"
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#src = [src len, batch size]
#trg = [trg len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#encoder_outputs is all hidden states of the input sequence, back and forwards
#hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src)
#first input to the decoder is the <sos> tokens
input = trg[0,:]
for t in range(1, trg_len):
#insert input token embedding, previous hidden state and all encoder hidden states
#receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#get the highest predicted token from our predictions
top1 = output.argmax(1)
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
# + id="kgizCemI2NeE"
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
ENC_HID_DIM = 512
DEC_HID_DIM = 512
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
attn = Attention(ENC_HID_DIM, DEC_HID_DIM)
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)
model = Seq2Seq(enc, dec, device).to(device)
# + colab={"base_uri": "https://localhost:8080/"} id="cQQ46U5-2NeF" outputId="807d657d-b26f-4f6d-988c-85d088144d76"
def init_weights(m):
for name,param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data,mean=0,std=0.01)
else:
nn.init.constant_(param.data,0)
model.apply(init_weights)
# + colab={"base_uri": "https://localhost:8080/"} id="_ju0T9Wp2NeF" outputId="3b52113f-a505-4d19-8642-0828308ad4cd"
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# + id="VNQLyl7L2NeG"
optimizer = optim.Adam(model.parameters())
# + id="ZfiScJWJ2NeG"
pad_idx = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
# + id="NXTJMojI2NeG"
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, trg)
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# + id="eWlnn9OU2NeH"
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output = model(src, trg, 0) #turn off teacher forcing
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# + id="8JmyLHnz2NeH"
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# + colab={"base_uri": "https://localhost:8080/"} id="y1-zPZKi2NeH" outputId="1f65317d-d9c0-4661-dc7e-80550f96e244"
N_EPOCHS = 20
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut3-model.pt')
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
# + colab={"base_uri": "https://localhost:8080/"} id="Lp6OjKjo2NeI" outputId="637403b1-2abf-4d2b-9d96-ca52f18d69e5"
model.load_state_dict(torch.load('tut3-model.pt'))
test_loss = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
# + id="GURX4HGX2NeI"
def translate_sentence(sentence, src_field, trg_field, model, device, max_len = 50):
model.eval()
if isinstance(sentence, str):
nlp = spacy.load('de')
tokens = [token.text.lower() for token in nlp(sentence)]
else:
tokens = [token.lower() for token in sentence]
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = [src_field.vocab.stoi[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(1).to(device)
# src_len = torch.LongTensor([len(src_indexes)]).to(device)
# src_len=src_len.cpu()
with torch.no_grad():
encoder_outputs, hidden = model.encoder(src_tensor)
# mask = model.create_mask(src_tensor)
trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
attentions = torch.zeros(max_len, 1, len(src_indexes)).to(device)
for i in range(max_len):
trg_tensor = torch.LongTensor([trg_indexes[-1]]).to(device)
with torch.no_grad():
output, hidden, attention = model.decoder(trg_tensor, hidden, encoder_outputs)
attentions[i] = attention
pred_token = output.argmax(1).item()
trg_indexes.append(pred_token)
if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
break
trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
return trg_tokens[1:], attentions[:len(trg_tokens)-1]
# + id="laxxxZ6C2NeI"
def display_attention(sentence, translation, attention):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
attention = attention.squeeze(1).cpu().detach().numpy()
cax = ax.matshow(attention, cmap='bone')
ax.tick_params(labelsize=15)
ax.set_xticklabels(['']+['<sos>']+[t.lower() for t in sentence]+['<eos>'],
rotation=45)
ax.set_yticklabels(['']+translation)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
plt.close()
# + id="pGQZcqPV2NeI"
example_idx = 12
src = vars(train_data.examples[example_idx])['src']
trg = vars(train_data.examples[example_idx])['trg']
print(f'src = {src}')
print(f'trg = {trg}')
# + id="CBaVuSQD2NeJ"
translation, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'predicted trg = {translation}')
# + id="Dwlf6uKB2NeJ"
display_attention(src, translation, attention)
# + id="EujglZtB2NeJ"
example_idx = 14
src = vars(valid_data.examples[example_idx])['src']
trg = vars(valid_data.examples[example_idx])['trg']
print(f'src = {src}')
print(f'trg = {trg}')
# + id="R1a40IdU2NeJ"
translation, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'predicted trg = {translation}')
display_attention(src, translation, attention)
# + id="Of4Qd_2A2NeJ"
# + id="bZcLcgI_2NeJ"
# + id="hJ00jXJx2NeK"
# + id="6SJ6o_Ia2NeK"
# + id="nMtrz_Mx2NeK"
| NLP/Session10/Session10Ass_Model1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# How to create and use a Secret
# ================
#
# A [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) is an object that contains a small amount of sensitive data such as a password, a token, or a key. In this notebook, we would learn how to create a Secret and how to use Secrets as files from a Pod as seen in https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets
# + deletable=true editable=true
from kubernetes import client, config
# + [markdown] deletable=true editable=true
# ### Load config from default location
# + deletable=true editable=true
config.load_kube_config()
client.configuration.assert_hostname = False
# + [markdown] deletable=true editable=true
# ### Create API endpoint instance and API resource instances
# + deletable=true editable=true
api_instance = client.CoreV1Api()
sec = client.V1Secret()
# + [markdown] deletable=true editable=true
# ### Fill required Secret fields
# + deletable=true editable=true
sec.metadata = client.V1ObjectMeta(name="mysecret")
sec.type = "Opaque"
sec.data = {"username": "bXl1c2VybmFtZQ==", "password": "<PASSWORD>=="}
# + [markdown] deletable=true editable=true
# ### Create Secret
# + deletable=true editable=true
api_instance.create_namespaced_secret(namespace="default", body=sec)
# + [markdown] deletable=true editable=true
# ### Create test Pod API resource instances
# + deletable=true editable=true
pod = client.V1Pod()
spec = client.V1PodSpec()
pod.metadata = client.V1ObjectMeta(name="mypod")
container = client.V1Container()
container.name = "mypod"
container.image = "redis"
# + [markdown] deletable=true editable=true
# ### Add volumeMount which would be used to hold secret
# + deletable=true editable=true
volume_mounts = [client.V1VolumeMount()]
volume_mounts[0].mount_path = "/data/redis"
volume_mounts[0].name = "foo"
container.volume_mounts = volume_mounts
# + [markdown] deletable=true editable=true
# ### Create volume required by secret
# + deletable=true editable=true
spec.volumes = [client.V1Volume(name="foo")]
spec.volumes[0].secret = client.V1SecretVolumeSource(secret_name="mysecret")
# + deletable=true editable=true
spec.containers = [container]
pod.spec = spec
# + [markdown] deletable=true editable=true
# ### Create the Pod
# + deletable=true editable=true
api_instance.create_namespaced_pod(namespace="default",body=pod)
# + [markdown] deletable=true editable=true
# ### View secret being used within the pod
#
# Wait for atleast 10 seconds to ensure pod is running before executing this section.
# + deletable=true editable=true
user = api_instance.connect_get_namespaced_pod_exec(name="mypod", namespace="default", command=[ "/bin/sh", "-c", "cat /data/redis/username" ], stderr=True, stdin=False, stdout=True, tty=False)
print(user)
passwd = api_instance.connect_get_namespaced_pod_exec(name="mypod", namespace="default", command=[ "/bin/sh", "-c", "cat /data/redis/password" ], stderr=True, stdin=False, stdout=True, tty=False)
print(passwd)
# + [markdown] deletable=true editable=true
# ### Delete Pod
# + deletable=true editable=true
api_instance.delete_namespaced_pod(name="mypod", namespace="default", body=client.V1DeleteOptions())
# + [markdown] deletable=true editable=true
# ### Delete Secret
# + deletable=true editable=true
api_instance.delete_namespaced_secret(name="mysecret", namespace="default", body=sec)
# + deletable=true editable=true
| examples/notebooks/create_secret.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # VISDOM Feature Creation
# <NAME> - <EMAIL>
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import timeit
import os
from datetime import datetime
from __future__ import division
import matplotlib.dates as mdates
from matplotlib import ticker
import datetime
import matplotlib
import rpy2
sns.set_style("whitegrid")
# %matplotlib inline
# %load_ext rpy2.ipython
from pylab import *
repos_path = "/Users/Clayton/temporal-features-for-buildings/"
meta = pd.read_csv(os.path.join(repos_path,"data/raw/meta_open.csv"), index_col='uid', parse_dates=["datastart","dataend"], dayfirst=True)
temp = pd.read_csv((os.path.join(repos_path,"data/interim/temp_open_utc_complete.csv")), index_col='timestamp', parse_dates=True).tz_localize('utc')
# # Create a 1 building example of a Visdom Data Source and Feature Extraction
#
# First, we need to create a custom DataSource for our data to be passed to R
# + language="R"
# library(visdom)
# library(plyr)
# -
building = "UnivClass_Clifford"
timezone = meta.T[building].timezone
start = meta.T[building].datastart
end = meta.T[building].dataend
building_data = pd.DataFrame(temp[building].tz_convert(timezone).truncate(before=start,after=end))
weatherfilename = meta.T[building].newweatherfilename
weather = pd.read_csv(os.path.join(repos_path,"data/external/weather/"+weatherfilename),index_col='timestamp', parse_dates=True, na_values='-9999')
#weather = weather.tz_localize(timezone, ambiguous = 'infer')
outdoor_temp_index = pd.DatetimeIndex(start=str(start.date()), freq='H', periods=len(building_data))
outdoor_temp = pd.DataFrame(weather[[col for col in weather.columns if 'Temperature' in col]]).resample("H").mean()
outdoor_temp = outdoor_temp.reindex(outdoor_temp_index)
outdoor_temp.info()
outdoor_temp = outdoor_temp.ffill()
outdoor_temp.plot(figsize=(15,4));
building_data.plot(figsize=(15,4));
outdoor_temp.info()
building_data['Time'] = building_data.index.map(lambda t: t.time())
building_data['Date'] = building_data.index.map(lambda t: t.date())
building_data.head()
dailyblocks = pd.pivot_table(building_data, values=building, index='Date', columns='Time', aggfunc='mean').ffill()
dailyblocks.head()
len(dailyblocks)
rawdata = dailyblocks.reset_index(drop=True).T.reset_index(drop=True).T
rawdata.head()
numberofdays = len(rawdata)
numberofdays
rawdata.info()
outdoordrybulb = np.array(outdoor_temp.reset_index(drop=True).T.reset_index(drop=True).T)
outdoordrybulb
building
begindate = str(start.date())
enddate = str(end.date())
enddate
# %Rpush building
# %Rpush begindate
# %Rpush enddate
# %Rpush timezone
# %Rpush rawdata
# %Rpush outdoordrybulb
# %Rpush numberofdays
# %R rawdata <- as.matrix(rawdata)
# %R outdoordrybulb <- c(outdoordrybulb)
# %R ids <- c(building)
# %R customerIDs <- c(building)
# +
# #%R print(rawdata)
# + language="R"
# LoadTemporalData = function(n=1, ids, customerIDs, begindate, timezone, rawdata, outdoordrybulb, numberofdays) {
# obj = DataSource( )
#
# obj$n = n
#
# obj$getHourlyAlignedData = function( n=NULL ) {
# if(is.null(n)) { n = obj$n }
# dates = as.Date(begindate) + 0:(numberofdays - 1)
# data = data.frame( id = rep(ids,each=numberofdays),
# customerID = rep(customerIDs,each=numberofdays),
# geocode = '99999',
# dates = rep(dates,n))
#
# data$id = as.character(data$id)
# data$customerID = as.character(data$customerID)
# data$geocode = as.character(data$geocode)
#
# obs = rawdata
# data = cbind(data,obs)
# names(data)[5:28] = paste('H',1:24,sep='')
# return(data)
# }
#
# obj$getAllData = function(geocode,useCache=T) {
# return( obj$getHourlyAlignedData( ) )
# }
#
# obj$getMeterData = function(id, geo=NULL) {
# return( obj$getHourlyAlignedData( n=1 ) )
# }
#
# obj$getIds = function(geocode,useCache=T) {
# return( unique(obj$getHourlyAlignedData()$id ) )
# }
#
# obj$getGeoForId = function(id) {
# return('99999')
# }
#
# obj$getWeatherData = function( geocode, useCache=T ) {
# dates = as.POSIXct(begindate, tz=timezone) + 0:(numberofdays * 24 - 1) * 3600
# data = data.frame(
# dates = dates,
# temperaturef = rep( c(outdoordrybulb)),
# #Left these in here for now -- need to load these data from the weather files....
# pressure = rep( rep(19,24), numberofdays ),
# hourlyprecip = rep( c( rep(0,12),rep(1,2),rep(0,10) ), numberofdays ),
# dewpointf = rep( rep(55,24), numberofdays )
# )
#
# return(data)
# }
#
# class(obj) = append(class(obj),"TestData")
#
# return(obj)
# }
# -
# %R n=1
# %R DATA_SOURCE = LoadTemporalData(n, ids, customerIDs, begindate, timezone, rawdata, outdoordrybulb, numberofdays)
# + language="R"
# DATA_SOURCE$getIds() # all known ids
# # DATA_SOURCE$getAllData() # all meter data from the first geocoded region
# #DATA_SOURCE$getWeatherData()
# + language="R"
# print(DATA_SOURCE)
# + language="R"
# basicFeaturesfn = function(cust,ctx,...) {
# return( as.list(basicFeatures(cust)) )
# }
# -
# %R ctx=new.env()
# %R ctx$a='hi'
# %R ctx$fnVector = c(basicFeaturesfn)
# + language="R"
# adddatesctx = function(ctx, begindate, enddate) {
# ctx$start.date = as.Date(begindate)
# ctx$end.date = as.Date(enddate)
# return(ctx)
# }
# -
# %R ctx = adddatesctx(ctx, begindate, enddate)
# %R aRunOut = iterator.iterateCustomers(DATA_SOURCE$getIds()[0:1], iterator.callAllFromCtx, ctx=ctx)
# %R runDF = iterator.todf( aRunOut )
# %R colnames = colnames(runDF)
# %R results = as.matrix(runDF)
# %R print(runDF)
# %R exportData(runDF,name='myFeatures',format='csv')
# %Rpull results
# %Rpull colnames
features = pd.DataFrame(results, index=colnames)
features = features.replace('NA', np.nan).dropna()
features = features.T
features.index = features["id"]
features = features.drop(["id"],axis=1)
features = features.astype(float).T
features.info()
features.info()
features.head(49)
features.tail(48)
# # Create a function which gets the features for a single building
def get_stats_features(temp, meta, building):
#Prepare input data in Python
timezone = meta.T[building].timezone
start = meta.T[building].datastart
end = meta.T[building].dataend
building_data = pd.DataFrame(temp[building].tz_convert(timezone).truncate(before=start,after=end))
weatherfilename = meta.T[building].newweatherfilename
weather = pd.read_csv(os.path.join(repos_path,"data/external/weather/"+weatherfilename),index_col='timestamp', parse_dates=True, na_values='-9999')
outdoor_temp_index = pd.DatetimeIndex(start=str(start.date()), freq='H', periods=len(building_data))
outdoor_temp = pd.DataFrame(weather[[col for col in weather.columns if 'Temperature' in col]]).resample("H").mean()
outdoor_temp = outdoor_temp.reindex(outdoor_temp_index)
outdoor_temp = outdoor_temp.ffill()
building_data['Time'] = building_data.index.map(lambda t: t.time())
building_data['Date'] = building_data.index.map(lambda t: t.date())
dailyblocks = pd.pivot_table(building_data, values=building, index='Date', columns='Time', aggfunc='mean').ffill()
rawdata = dailyblocks.reset_index(drop=True).T.reset_index(drop=True).T
numberofdays = len(rawdata)
outdoordrybulb = np.array(outdoor_temp.reset_index(drop=True).T.reset_index(drop=True).T)
begindate = str(start.date())
enddate = str(end.date())
#Push data to R
# %Rpush building
# %Rpush begindate
# %Rpush enddate
# %Rpush timezone
# %Rpush rawdata
# %Rpush outdoordrybulb
# %Rpush numberofdays
# %R rawdata <- as.matrix(rawdata)
# %R outdoordrybulb <- c(outdoordrybulb)
# %R ids <- c(building)
# %R customerIDs <- c(building)
# %R n=1
#Create VISDOM data source
# %R DATA_SOURCE = LoadTemporalData(n, ids, customerIDs, begindate, timezone, rawdata, outdoordrybulb, numberofdays)
# %R ctx=new.env()
# %R ctx$a='hi'
# %R ctx$fnVector = c(basicFeaturesfn)
# %R ctx = adddatesctx(ctx, begindate, enddate)
#Create features
# %R aRunOut = iterator.iterateCustomers(DATA_SOURCE$getIds()[0:1], iterator.callAllFromCtx, ctx=ctx)
# %R runDF = iterator.todf( aRunOut )
# %R colnames = colnames(runDF)
# %R results = as.matrix(runDF)
#Pull back to Python
# %Rpull results
# %Rpull colnames
features = pd.DataFrame(results, index=colnames)
features = features.replace('NA', np.nan).dropna()
features = features.T
features.index = features["id"]
features = features.drop(["id"],axis=1)
features = features.astype(float).T
return features
features = get_stats_features(temp, meta, building)
# +
#features
# -
# # Loop though all buildings and get features
# +
overall_start_time = timeit.default_timer()
all_features = pd.DataFrame()
for building in meta.index:
try:
features = get_stats_features(temp, meta, building)
all_features = pd.merge(all_features, features, right_index=True, left_index=True, how='outer')
except:
print building+" not succesful"
print "Calculated all building in "+str(timeit.default_timer() - overall_start_time)+" seconds"
# -
all_features.info()
all_features_forvisdomweb = all_features.T
all_features_forvisdomweb.index.name = "id"
all_features_forvisdomweb
len(features.T.columns)
features.info()
all_features_forvisdomweb = all_features_forvisdomweb[list(features.T.columns)]
all_features_forvisdomweb.head()
all_features_forvisdomweb.to_csv("buildingdatagenome_featuresforvisdom.csv")
columns = pd.Series(all_features_forvisdomweb.columns)
columns.to_csv("buildingdatagenome_featuresforvisdom_cols.csv")
corr = all_features.T.corr()
# +
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(20, 20))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax); #xticklabels=5, yticklabels=5,
# -
all_features = all_features.T
all_features
all_features.to_csv(os.path.join(repos_path,"data/processed/features_visdom.csv"))
# # Make feature set
all_features = pd.read_csv(os.path.join(repos_path,"data/processed/features_visdom.csv"), index_col='building_name')
list(all_features.columns)
collist = ['BG_seasonal_Aug_dur',
'BG_consumpstats_Aug_max',
'BG_consumpstats_Aug_mean',
'BG_consumpstats_Aug_min',
'BG_consumpstats_Aug_mn2mx',
'BG_seasonal_Aug_n2d',
'BG_seasonal_Aug_range',
'BG_hourlystats_HOD.mean.1',
'BG_hourlystats_HOD.mean.10',
'BG_hourlystats_HOD.mean.11',
'BG_hourlystats_HOD.mean.12',
'BG_hourlystats_HOD.mean.13',
'BG_hourlystats_HOD.mean.14',
'BG_hourlystats_HOD.mean.15',
'BG_hourlystats_HOD.mean.16',
'BG_hourlystats_HOD.mean.17',
'BG_hourlystats_HOD.mean.18',
'BG_hourlystats_HOD.mean.19',
'BG_hourlystats_HOD.mean.2',
'BG_hourlystats_HOD.mean.20',
'BG_hourlystats_HOD.mean.21',
'BG_hourlystats_HOD.mean.22',
'BG_hourlystats_HOD.mean.23',
'BG_hourlystats_HOD.mean.24',
'BG_hourlystats_HOD.mean.3',
'BG_hourlystats_HOD.mean.4',
'BG_hourlystats_HOD.mean.5',
'BG_hourlystats_HOD.mean.6',
'BG_hourlystats_HOD.mean.7',
'BG_hourlystats_HOD.mean.8',
'BG_hourlystats_HOD.mean.9',
'BG_seasonal_Jan_dur',
'BG_consumpstats_Jan_max',
'BG_consumpstats_Jan_mean',
'BG_consumpstats_Jan_min',
'BG_seasonal_Jan_mn2mx',
'BG_seasonal_Jan_n2d',
'BG_seasonal_Jan_range',
'BG_consumpstats_daily.kw.max.var',
'BG_consumpstats_daily.kw.min.var',
'BG_consumpstats_daily.kw.var',
'BG_meta_date.first',
'BG_meta_date.last',
'BG_stats_dur',
'BG_consumpstats_kw.mean',
'BG_consumpstats_kw.mean.annual',
'BG_consumpstats_kw.mean.summer',
'BG_consumpstats_kw.mean.winter',
'BG_consumpstats_kw.total',
'BG_consumpstats_kw.total.Apr',
'BG_consumpstats_kw.total.Aug',
'BG_consumpstats_kw.total.Dec',
'BG_consumpstats_kw.total.Feb',
'BG_consumpstats_kw.total.Jan',
'BG_consumpstats_kw.total.Jul',
'BG_consumpstats_kw.total.Jun',
'BG_consumpstats_kw.total.Mar',
'BG_consumpstats_kw.total.May',
'BG_consumpstats_kw.total.Nov',
'BG_consumpstats_kw.total.Oct',
'BG_consumpstats_kw.total.Sep',
'BG_stats_kw.tout.cor',
'BG_consumpstats_kw.var',
'BG_consumpstats_kw.var.summer',
'BG_consumpstats_kw.var.winter',
'BG_consumpstats_kw90',
'BG_consumpstats_max',
'BG_consumpstats_max.97.',
'BG_consumpstats_max.MA',
'BG_consumpstats_max.day.date',
'BG_consumpstatsmax.day.kw',
'BG_consumpstats_max.day.pct',
'BG_consumpstats_max.day.tout',
'BG_consumpstats_max.hr.date',
'BG_consumpstats_max.hr.kw',
'BG_consumpstats_max.hr.tout',
'BG_hourlystats_maxHOD',
'BG_consumpstats_mean',
'BG_consumpstats_min',
'BG_consumpstats_min.3.',
'BG_stats_min.day.date',
'BG_consumpstats.day.kw',
'BG_stats_min.day.pct',
'BG_consumpstats.day.tout',
'BG_stats_min.hr.date',
'BG_stats_min.hr.kw',
'BG_stats_min.hr.tout',
'BG_stats_mn2mx',
'BG_stats_n2d',
'BG_stats_nv2dv',
'BG_consumpstats_range',
'BG_consumpstats_t10kw',
'BG_consumpstats_t10t',
'BG_consumpstatst_90kw',
'BG_consumpstats_t90t']
all_features.columns = collist
all_features.index.name = "building_name"
all_features.columns.name = "feature_name"
all_features.info()
all_features.to_csv(os.path.join(repos_path,"data/processed/features_visdom.csv"))
| 09_VISDOM Feature Creation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Euler Problem 9
# ===============
#
# A Pythagorean triplet is a set of three natural numbers, $a < b < c$, for which
# $a^2 + b^2 = c^2$. For example, $3^2 + 4^2 = 9 + 16 = 25 = 5^2$.
#
# There exists exactly one Pythagorean triplet for which $a + b + c = 1000$.
# Find the product $abc$.
from math import gcd
for m in range(2, 23, 2):
for n in range(1 + (m % 2), m, 2):
if gcd(m,n) == 1:
a = (m + n) * (m - n)
b = 2 * m * n
c = m * m + n * n
p = a + b + c
if (1000 % p) == 0:
k = 1000 // p
print((k*a, k*b, k*c))
print(k**3 * a * b * c)
# *Explanation:* A Pythagorean triplet is said to be *primitive* if the three numbers have no common factor. Every Pythagorean triplet is a multiple of a primitive triplet.
#
# If $m > n$ then $(m^2 - n^2, 2mn, m^2 + n^2)$ is a Pythagorean triple.
# The triple is primitive if and only if $m-n$ is odd and $\gcd(m, n) = 1$.
# Furthermore, every primitive Pythagorean triple can be generated in this manner.
#
# Our strategy is to find all primitive Pythagorean triplets whose sums are factors of 1000, and scale them so their sums are equal to 1000.
#
| Euler 009 - Special Pythagorean triplet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: LSST
# language: python
# name: lsst
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # An Inventory of the Shared Datasets in the LSST Science Platform
# <br>Owner(s): **<NAME>** ([@drphilmarshall](https://github.com/LSSTScienceCollaborations/StackClub/issues/new?body=@drphilmarshall)), **<NAME>** ([@rmorgan10](https://github.com/LSSTScienceCollaborations/StackClub/issues/new?body=@rmorgan10))
# <br>Last Verified to Run: **2019-08-13**
# <br>Verified Stack Release: **18.1**
#
# In this notebook we'll take a look at some of the datasets available on the LSST Science Platform.
#
# ### Learning Objectives:
#
# After working through this tutorial you should be able to:
# 1. Start figuring out which of the available datasets is going to be of most use to you in any given project;
#
# When it is finished, you should be able to use the `stackclub.Taster` to:
# 2. Report on the available data in a given dataset;
# 3. Plot the patches and tracts in a given dataset on the sky.
#
# **Outstanding Issue:** The `Taster` augments the functionality of the Gen-2 butler, which provides limited capabilities for discovery what data *actually* exist. Specifically, the `Taster` is relying heavily on the `queryMetadata` functionality of the Gen-2 butler, which is limited to a small number of datasets and does not actually guarentee that those datasets exist. The user should beware of over-interpreting the true *existence* of datasets queried by the `Taster`. This should be improved greatly with the Gen-3 butler.
#
# ### Logistics
# This notebook is intended to be runnable on `lsst-lsp-stable.ncsa.illinois.edu` from a local git clone of https://github.com/LSSTScienceCollaborations/StackClub.
#
# ## Set-up
# -
# We'll need the `stackclub` package to be installed. If you are not developing this package, you can install it using `pip`, like this:
# ```
# pip install git+git://github.com/LSSTScienceCollaborations/StackClub.git#egg=stackclub
# ```
# If you are developing the `stackclub` package (eg by adding modules to it to support the Stack Club tutorial that you are writing, you'll need to make a local, editable installation. In the top level folder of the `StackClub` repo, do:
# ! cd .. && python setup.py -q develop --user && cd -
# You may need to restart the kernel after doing this. When editing the `stackclub` package files, we want the latest version to be imported when we re-run the import command. To enable this, we need the `%autoreload` magic command.
# %load_ext autoreload
# %autoreload 2
# + [markdown] slideshow={"slide_type": "subslide"}
# To just get a taste of the data that the Butler will deliver for a chosen dataset, we have added a `taster` class to the `stackclub` library. All needed imports are contained in that file, so we only need to import the `stackclub` library to work through this notebook.
# +
import numpy as np
# %matplotlib inline
import stackclub
# -
# You can find the Stack version that this notebook is running by using eups list -s on the terminal command line:
# What version of the Stack am I using?
# ! echo $HOSTNAME
# ! eups list -s lsst_distrib
# + [markdown] slideshow={"slide_type": "slide"}
# ## Listing the Available Datasets
# First, let's look at what is currently available. There are several shared data folders in the LSP, the read-only `/datasets` folder, the project-group-writeable folder `/project/shared/data`, and the Stack Club shared directory `/project/stack-club`. Let's take a look at what's in `/project/shared/data`. Specifically, we want to see butler-friendly data _repositories_, distinguished by their containing a file called `_mapper`, or `repositoryCfg.yaml` in their top level.
# -
# **`/project/shared/data`:** These datasets are designed to be small test sets, ideal for tutorials.
# +
shared_repos_with_mappers = ! ls -d /project/shared/data/*/_mapper | grep -v README | cut -d'/' -f1-5 | sort | uniq
shared_repos_with_yaml_files = ! ls -d /project/shared/data/*/repositoryCfg.yaml | grep -v README | cut -d'/' -f1-5 | sort | uniq
shared_repos = np.unique(shared_repos_with_mappers + shared_repos_with_yaml_files)
shared_repos
# -
for repo in shared_repos:
# ! du -sh $repo
# **`/datasets`:**
# These are typically much bigger: to measure the size, uncomment the second cell below and edit it to target the dataset you are interested in. Running `du` on all folders takes several minutes.
# +
repos_with_mappers = ! ls -d /datasets/*/repo/_mapper |& grep -v "No such" | cut -d'/' -f1-4 | sort | uniq
repos_with_yaml_files = ! ls -d /datasets/*/repo/repositoryCfg.yaml |& grep -v "No such" | cut -d'/' -f1-4 | sort | uniq
repos = np.unique(repos_with_mappers + repos_with_yaml_files)
repos
# -
"""
for repo in repos:
! du -sh $repo
""";
# ## Exploring the Data Repo with the Stack Club `Taster`
#
# The `stackclub` library provides a `Taster` class, to explore the datasets in a given repo. As an example, let's take a look at some HSC data using the `Taster`. When instantiating the `Taster`, if you plan to use it for visualizing sky coverage, you can provide it with a path to the tracts from the main repo.
#
# ### Initializing the `Taster`
# +
# Parent repo
repo = '/datasets/hsc/repo/'
#Location of tracts for a particular rerun and depth relative to main repo
rerun = 'DM-13666' # DM-13666, DM-10404
depth = 'WIDE' # WIDE, DEEP, UDEEP
tract_location = 'rerun/' + rerun + '/' + depth
# -
# Execute one of the following two cells. The latter will make `tarquin` aware of the tracts for the dataset while the former will just look at the repo as a whole and not visualize any sky area.
tarquin = stackclub.Taster(repo, vb=True)
tarquin = stackclub.Taster(repo, vb=True, path_to_tracts=tract_location)
# ### Properties of the `Taster`
# The taster, `tarquin`, carries a butler around with it:
type(tarquin.butler)
# If we ask the taster to investigate a folder that is not a repo, its butler will be `None`
failed = stackclub.Taster('not-a-repo', vb=True)
print(failed.butler)
# The taster uses its butler to query the metadata of the repo for datasets, skymaps etc.
tarquin.look_for_datasets_of_type(['raw', 'calexp', 'deepCoadd_calexp', 'deepCoadd_mergeDet'])
# > **PROBLEM: these last two datatypes are not listed in the repo metadata. This is one of the issues with the Gen-2 butler and the`Taster` is not smart enough to search the tract folders for catalog files. This should be updated with Gen-3.**
tarquin.look_for_skymap()
# The `what_exists` method searches for everything "interesting". In the `taster.py` class, interesting currently consists of
# * `'raw'`
# * `'calexp'`
# * `'src'`
# * `'deepCoadd_calexp'`
# * `'deepCoadd_meas'`
#
# but this method can easily be updated to include more dataset types.
tarquin.what_exists()
# If one wishes to check the existance of all dataset types, you can use the `all` parameter of the `what_exists()` method to do exactly that. Checking all dataset types may take a minute or so (while the `Taster` does a lot of database queries).
tarquin.what_exists(all=True)
# A dictionary with existence information is stored in the `exists` attribute:
tarquin.exists
# The `Taster` can report on the data available, counting the number of visits, sources, etc, according to what's in the repo. It uses methods like this one:
tarquin.estimate_sky_area()
# and this one:
# +
tarquin.count_things()
print(tarquin.counts)
# -
# When the `estimate_sky_area` method runs, `tarquin` collects all the tracts associated with the repo. A list of the tracts is stored in the attribute `tarquin.tracts`.
tarquin.tracts
# Using the tracts, we can get a rough estimate for what parts of the sky have been targeted in the dataset. The method for doing this is `tarquin.plot_sky_coverage`, and follows the example code given in [Exploring_A_Data_Repo.ipynb](Exploring_A_Data_Repo.ipynb).
tarquin.plot_sky_coverage()
# To have your `Taster` do all the above, and just report on what it finds, do:
tarquin.report()
# If you are interested in learning which fields, filters, visits, etc. have been counted by `tarquin`, remember that `tarquin` carries an instance of the `Butler` with it, so you can run typical `Butler` methods. For example, if you found the number of filters being 13 odd, you can look at the filters like this:
tarquin.butler.queryMetadata('calexp', ['filter'])
# For more on the `Taster`'s methods, do, for example:
# +
# help(tarquin)
# -
# ## Example Tastings
# Let's compare the WIDE, DEEP and UDEEP parts of the HSC dataset.
# +
repo = '/datasets/hsc/repo/'
rerun = 'DM-13666'
for depth in ['WIDE', 'DEEP', 'UDEEP']:
tract_location = 'rerun/' + rerun + '/' + depth
taster = stackclub.Taster(repo, path_to_tracts=tract_location)
taster.report()
# -
# You may notice that all **Metadata Characteristics** beginning with "Number of" are the same for the three depths. This is a result of `tarquin`'s `Butler` getting this information from the repo as a whole, rather than the specific depth we specified for the tracts. There is more information on why the `Butler` works in this way in the [Exploring_A_Data_Repo.ipynb](https://github.com/LSSTScienceCollaborations/StackClub/blob/project/data_inventory/drphilmarshall/Basics/Exploring_A_Data_Repo.ipynb) notebook.
# ## Summary
#
# In this notebook we took a first look at the datasets available to us in two shared directories in the LSST science platform filesystem, and used the `stackclub.Taster` class to report on their basic properties, and their sky coverage. Details on the methods used by the `Taster` can be found in the [Exploring_A_Data_Repo.ipynb](https://github.com/LSSTScienceCollaborations/StackClub/blob/project/data_inventory/drphilmarshall/Basics/Exploring_A_Data_Repo.ipynb) notebook, or by executing the follwoing cell:
help(tarquin)
# # STILL TODO
# * Build defensiveness into the `Taster` so that it can handle a wider variety of datasets.
# * Update `Taster` to use Gen-3 butler
#
# ### Looking at other shared datasets and repos
#
# The following loops over all shared datasets fails in interesting ways: some folders don't seem to be `Butler`-friendly. We need to do a bit more work to identify the actual repos available to us, and then use the `Taster` to provide a guide to all of them.
for repo in shared_repos:
try:
taster = stackclub.Taster(repo)
taster.report()
except:
print("Taster failed to explore repo ",repo)
for repo in repos:
try:
taster = stackclub.Taster(repo)
taster.report()
except:
print("Taster failed to explore repo ",repo)
| Graveyard/DataInventory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Programming Mini Project
# +
# Import packages
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
# +
# Read in both text files
#with open('/Users/zarawaheed/Documents/BostonUniversity/MA676/Assignments and Projects/Python Mini Project 1/friends.txt') as f:
#lines = f.readlines()
friendsraw = pd.read_csv("/Users/zarawaheed/Documents/BostonUniversity/MA676/Assignments and Projects/Python Mini Project 1/friends.txt", header=None)
statesraw = pd.read_csv("/Users/zarawaheed/Documents/BostonUniversity/MA676/Assignments and Projects/Python Mini Project 1/states.txt", header=None)
# +
# Create a function where you input your file path, the name of your friends file and the name of state info file and get the state each friend's number belongs to
def zwp(filepath, friendfile, statefile):
data = pd.read_csv(filepath + "/" + friendfile + ".txt", header = None)
state_data = pd.read_csv(filepath + "/" + statefile + ".txt", header = None)
#Wrangling data from friends.txt
names = data.iloc[::2].reset_index()
ph = data.iloc[1::2].reset_index()
friends_c = pd.merge(names,ph,how="inner",left_index=True,right_index=True)
friends = friends_c.rename(columns={'0_x': 'Name', '0_y': 'Ph'}).drop(['index_x','index_y'], 1)
#Wrangling data from state.txt
arcode = state_data.iloc[::2].reset_index()
state = state_data.iloc[1::2].reset_index()
sm_c = pd.merge(arcode,state,how="inner",left_index=True,right_index=True)
sm = sm_c.rename(columns={'0_x': 'Area Code', '0_y': 'State'}).drop(['index_x','index_y'], 1)
#Extracting area code from Phone numbers
friends['Area Code'] = friends['Ph'].str.replace("(","").astype(str).str[:3]
output = pd.merge(friends,sm,how='left')
print(output)
# +
# Trial run
zwp("/Users/zarawaheed/Documents/BostonUniversity/MA676/Assignments and Projects/Python Mini Project 1/", "friends", "states")
# +
# Applying the function to a new friends file
zwp("/Users/zarawaheed/Documents/BostonUniversity/MA676/Assignments and Projects/Python Mini Project 1/", "friends2", "states")
| code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MAZ Diplomarbeit - Biodiversität in der Schweiz#
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
df_gf = pd.read_csv("01_source/xls_ch/gefaesspflanzen_ch.csv")
df_gf.head(1)
df_gf.rename(columns={"GROUP":"group","FAMILY":"family","GENUS":"genus","SPECIES SSP":"species","Scientific name":"sc name","Deutscher Name":"dt name","CAT":"cat", "Criteria": "criteria", "Bemerkungen":"bemerkungen"},inplace=True)
len (df_gf)
# +
df_gf.pop("group")
df_gf.pop("family")
df_gf.pop("genus")
df_gf.pop("species")
df_gf.pop("bemerkungen")
df_gf.pop("criteria")
df_gf.pop("dt name")
df_gf.pop('ID_ISFS')
df_gf.head(1)
# +
df_gf['einteilung'] = 'Pflanzen'
df_gf['untergruppe'] = 'Gefässpflanzen'
df_gf.head(1)
# -
df_gf["cat"].value_counts()
# +
values = [1, 35, 132, 200, 368, 437, 1643]
colors = ['r', 'r', 'r', 'r', 'r', 'g', "g"]
labels = [
'EX: ausgestorben', 'RE: CH ausgestorben',
'CR: v. aussterben bedroht', 'EN: stark gefährdet',
'VU: verletzlich',"NT: potenziell gef.",
"LC: nicht gef."]
fig1, ax1 = plt.subplots(figsize=(7, 6))
fig1.subplots_adjust(0.3,0,1,1)
plt.pie(values, colors=colors, labels= values, counterclock=False,startangle=90)
plt.title('Gefährdung Gefässpfl, 2816 dokumentierte Arten')
total = sum(values)
plt.legend(
loc='upper left',
labels=['%s, %1.1f%%' % (
l, (float(s) / total) * 100) for l, s in zip(labels, values)],
bbox_to_anchor=(1.0, 0.5),
bbox_transform=fig1.transFigure
)
plt.show()
# -
df_epf = pd.read_csv("01_source/tabula_ch/tabula_ epiphytische_flechten.csv")
df_epf.rename(columns={"Art":"sc name","CH":"cat"},inplace=True)
df_epf.head(1)
# +
df_epf['einteilung'] = 'Pflanzen'
df_epf['untergruppe'] = 'epiphytische Flechten'
df_epf.head(1)
# -
len (df_epf)
df_aa = pd.read_csv("01_source/tabula_ch/tabula_armleuchteralgen.csv")
df_aa.head(1)
df_aa.rename(columns={"Wissenschaftlicher Name":"sc name","Deutscher Name":"dt name","Kat.":"cat","Lebensraumtyp":"lebensraumtyp", "Bemerkungen":"bemerkungen"},inplace=True)
# +
df_aa.pop("Kriterien der IUCN")
df_aa.pop("Wassertiefe (m)")
df_aa.pop("Höhenstufen")
df_aa.pop("dt name")
df_aa.pop("lebensraumtyp")
df_aa.pop("bemerkungen")
df_aa.head(1)
# +
df_aa['einteilung'] = 'Pflanzen'
df_aa['untergruppe'] = 'Armleuchteralgen'
df_aa.head(1)
# -
len (df_aa)
df_erf = pd.read_csv("01_source/tabula_ch/tabula_erdbewohnende_flechten.csv")
df_erf.rename(columns={"SPECIES SSP":"sc name","CAT":"cat", "Criteria": "criteria"},inplace=True)
df_erf.head(1)
df_erf.pop("criteria")
df_erf.head(2)
df_erf['einteilung'] = 'Pflanzen'
df_erf['untergruppe'] = 'erdbewohnende Flechten'
df_erf.head(2)
len (df_erf)
df_p = pd.read_csv("01_source/tabula_ch/tabula_grosspilze.csv")
# +
df_p.pop("Criteria")
df_p.pop("NHV")
df_p.pop("deutscher name")
df_p.pop("Bemerkungen")
df_p.head(2)
# -
df_p.rename(columns={"Scientific Name":"sc name","CAT":"cat"},inplace=True)
df_p.head(2)
# +
df_p['einteilung'] = 'Pflanzen'
df_p['untergruppe'] = 'Grosspilze'
df_p.head(1)
# -
len (df_p)
df_p["cat"].value_counts()
# +
values = [1, 81, 354, 478, 368, 139, 0]
colors = ['r', 'r', 'r', 'r', 'r', 'g', "g"]
labels = [
'EX: ausgestorben', 'RE: CH ausgestorben',
'CR: v. aussterben bedroht', 'EN: stark gefährdet',
'VU: verletzlich',"NT: potenziell gef.",
"LC: nicht gef."]
fig1, ax1 = plt.subplots(figsize=(7, 6))
fig1.subplots_adjust(0.3,0,1,1)
plt.pie(values, colors=colors, labels= values, counterclock=False,startangle=90)
plt.title('Gefährdung Grosspilze, 943 dokumentierte Arten')
total = sum(values)
plt.legend(
loc='upper left',
labels=['%s, %1.1f%%' % (
l, (float(s) / total) * 100) for l, s in zip(labels, values)],
bbox_to_anchor=(1.0, 0.5),
bbox_transform=fig1.transFigure
)
plt.show()
# -
df_m = pd.read_csv("01_source/tabula_ch/tabula_moose.csv")
# +
df_m.rename(columns={"FAMILY":"family","Scientific name": "sc name","CAT":"cat", "Bemerkungen":"bemerkungen"},inplace=True)
df_m.head(1)
# +
df_m.pop("NHV")
df_m.pop("family")
df_m.pop("Kriterien IUCN")
df_m.pop("bemerkungen")
df_m.head(1)
# +
df_m['einteilung'] = 'Pflanzen'
df_m['untergruppe'] = 'Moose'
df_m.head(1)
# -
len (df_m)
df_pflanzen= df_gf.append([df_epf, df_aa, df_erf, df_p, df_m])
df_pflanzen.head(5)
len(df_pflanzen)
df_pflanzen["cat"].value_counts()
# +
values = [1, 88, 317, 711, 1244, 743, 2408]
colors = ['r', 'r', 'r', 'r', 'r', 'g', "g"]
labels = [
'EX: ausgestorben', 'RE: CH ausgestorben',
'CR: v. aussterben bedroht', 'EN: stark gefährdet',
'VU: verletzlich',"NT: potenziell gef.",
"LC: nicht gef."]
fig1, ax1 = plt.subplots(figsize=(7, 6))
fig1.subplots_adjust(0.3,0,1,1)
plt.pie(values, colors=colors, labels= values, counterclock=False,startangle=90)
plt.title('Gefährdung Pflanzen, 5512 dokumentierte Arten')
total = sum(values)
plt.legend(
loc='upper left',
labels=['%s, %1.1f%%' % (
l, (float(s) / total) * 100) for l, s in zip(labels, values)],
bbox_to_anchor=(1.0, 0.5),
bbox_transform=fig1.transFigure
)
plt.show()
# -
# +
#df_neu = df.copy()
df['stamm'] = 'Pflanze'
df.columns = ['id', '2', '3']
"""
df = df[[
'stamm',
'GROUP',
'FAMILY',
'GENUS',
'SPECIES SSP',
'Deutscher Name',
'CAT',
'Bemerkungen'
]]
"""
df.head()
| Eigene Projekte/Diplomarbeit_Biodiversitaet/003_pflanzen_zusammenfassen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Taylor problem 2.40
# Consider an object that is coasting horizontally (positive $x$ direction) subject to a drag force $f = -bv -c v^2$. Your first job is to solve Newton's 2nd law equation for $v(t)$ by separating variables. You should find:
#
# $\begin{align}
# v(t) &= \frac{b A e^{-bt/m}}{1 - c A e^{-bt/m}} \\
# A &\equiv \frac{v_0}{b + c v_0}
# \end{align}$
#
# Now we want to plot $v(t)$ as analyze the behavior for large $t$.
#
# **Go through and fill in the blanks where ### appears.**
# +
import numpy as np
def v_of_t(t, b, c, v0, m=1):
A = v0/(b + c*v0)
return ### fill in the equation here
# -
# Next we make a plot in the standard way:
# +
# %matplotlib inline
import matplotlib.pyplot as plt
t_pts = ### determine a set of t points such that you see the decay
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(t_pts, v_of_t(t_pts, 1., 1., 1.))
ax.set_xlabel(r'$t$')
ax.set_ylabel(r'$v(t)$')
# -
# Now we add another plot and check if it is an exponential decay. **What kind of plot is this? (Google the name along with 'matplotlib'.)**
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
t_pts = np.arange(0., 3., 0.1)
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(1,2,1)
ax.plot(t_pts, v_of_t(t_pts, 1., 1., 1.))
ax.set_xlabel(r'$t$')
ax.set_ylabel(r'$v(t)$')
ax.grid(True)
ax2 = fig.add_subplot(1,2,2)
ax2.semilogy(t_pts, v_of_t(t_pts, 1., 1., 1.))
ax2.set_xlabel(r'$t$')
ax2.set_ylabel(r'$v(t)$')
ax2.grid(True)
fig.tight_layout() # make the spacing of subplots nicer
# -
fig.savefig('Taylor_prob_2.40.png', bbox_inches='tight')
### Find the figure file and display it in your browser, then save or print.
### What do you learn from the second graph?
| 2020_week_1/.ipynb_checkpoints/Taylor_problem_2.40_template-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
score_list = np.random.randint(30, 100, 20)
score_list
bins = [30,59,79,100]
a = pd.cut(score_list, bins)
score_list
type(a)
a.value_counts()
df = DataFrame()
df['student'] = [pd.util.testing.rands(5) for i in range(20)]
df['score'] = score_list
df
df['bins'] = pd.cut(df['score'], bins=bins, labels=['bad','ok','great'])
df
| play_with_data/2. DataFrame Binning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df_artists = pd.read_csv('../../data/raw/artists.csv')
df_artworks = pd.read_csv('../../data/raw/artworks.csv')
#test
df_artists.count()
df_artworks.count()
| analysis/submitted/.ipynb_checkpoints/dataframe-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Testing HMC sampling from score function only
# %pylab inline
from nsec.samplers import ScoreHamiltonianMonteCarlo
from nsec.datasets.swiss_roll import get_swiss_roll
import jax
import jax.numpy as jnp
import tensorflow_probability as tfp; tfp = tfp.experimental.substrates.jax
dist = get_swiss_roll(.5)
init_samples = dist.sample(10000, seed=jax.random.PRNGKey(0))
hist2d(init_samples[:,0], init_samples[:,1],64); gca().set_aspect('equal')
# +
# Initialize the HMC transition kernel.
num_results = int(10e3)
num_burnin_steps = int(1e3)
kernel = ScoreHamiltonianMonteCarlo(
target_score_fn=jax.grad(dist.log_prob),
num_leapfrog_steps=10,
num_delta_logp_steps=16,
step_size=0.1)
# -
samples, is_accepted = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=init_samples[1],
kernel=kernel,
trace_fn=lambda _, pkr: pkr.is_accepted,
seed=jax.random.PRNGKey(1))
hist2d(samples[is_accepted,0], samples[is_accepted,1],64); gca().set_aspect('equal')
scatter(init_samples[1,0], init_samples[1,1], label='x0', color='C1')
legend()
# Same thing with proper HMC
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=dist.log_prob,
num_leapfrog_steps=10,
step_size=0.1)
samples, is_accepted = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=init_samples[1],
kernel=kernel,
trace_fn=lambda _, pkr: pkr.is_accepted,
seed=jax.random.PRNGKey(1))
hist2d(samples[is_accepted,0], samples[is_accepted,1],64); gca().set_aspect('equal')
scatter(init_samples[1,0], init_samples[1,1], label='x0', color='C1')
legend()
# Let's try to learn that distribution
import numpy as onp
import haiku as hk
from jax.experimental import optix
from nsec.models.dae.ardae import ARDAE
from functools import partial
# +
def forward(x, sigma, is_training=False):
denoiser = ARDAE(is_training=is_training)
return denoiser(x, sigma)
model_train = hk.transform_with_state(partial(forward, is_training=True))
# +
batch_size = 512
delta = 0.5
rng_key = jax.random.PRNGKey(seed=0)
def get_batch(rng_key):
y = dist.sample(batch_size, seed=rng_key)
u = onp.random.randn(batch_size, 2)
s = delta * onp.random.randn(batch_size, 1)
x = y + s * u
# x is a noisy sample, y is a sample from the distribution
# u is the random normal noise realisation
return {'x':x, 'y':y, 'u':u, 's':s}
# -
optimizer = optix.adam(1e-3)
rng_seq = hk.PRNGSequence(42)
@jax.jit
def loss_fn(params, state, rng_key, batch):
res, state = model_train.apply(params, state, rng_key,
batch['x'], batch['s'])
loss = jnp.mean((batch['u'] + batch['s'] * res)**2)
return loss, state
@jax.jit
def update(params, state, rng_key, opt_state, batch):
(loss, state), grads = jax.value_and_grad(loss_fn, has_aux=True)(params, state, rng_key, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optix.apply_updates(params, updates)
return loss, new_params, state, new_opt_state
params, state = model_train.init(next(rng_seq),
jnp.zeros((1, 2)),
jnp.ones((1, 1)))
opt_state = optimizer.init(params)
losses = []
for step in range(2000):
batch = get_batch(next(rng_seq))
loss, params, state, opt_state = update(params, state, next(rng_seq), opt_state, batch)
losses.append(loss)
if step%100==0:
print(step, loss)
semilogy(np.array(losses[:]), label='loss')
legend()
# +
X = np.arange(-12, 12, 0.5)
Y = np.arange(-12, 12, 0.5)
points = stack(meshgrid(X, Y), axis=-1).reshape((-1, 2))
model = hk.transform_with_state(partial(forward, is_training=False))
score_fn = partial(model.apply, params, state, next(rng_seq))
# -
res = score_fn(points, 0.0*jnp.ones((len(points),1)))[0]
g = res.reshape([len(Y), len(X),2])
figure(figsize=(14,7))
quiver(X, Y, g[:,:,0], g[:,:,1])
def target_score_fn(x):
x = x.reshape([1,2])
return score_fn(x, 0.0*jnp.ones((len(x),1)))[0][0]
kernel = ScoreHamiltonianMonteCarlo(
target_score_fn=target_score_fn,
num_leapfrog_steps=10,
num_delta_logp_steps=16,
step_size=0.1)
samples, is_accepted = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=init_samples[1],
kernel=kernel,
trace_fn=lambda _, pkr: pkr.is_accepted,
seed=jax.random.PRNGKey(1))
# +
hist2d(samples[is_accepted,0],
samples[is_accepted,1],
64, range=[[-15,15],[-15,15]]); gca().set_aspect('equal')
scatter(init_samples[1,0], init_samples[1,1], label='x0', color='C1')
legend()
# +
# Now, same thing but with extra regularisation
# +
from nsec.normalization import SNParamsTree as CustomSNParamsTree
def forward(x, sigma, is_training=False):
denoiser = ARDAE(is_training=is_training)
return denoiser(x, sigma)
model_train = hk.transform_with_state(partial(forward, is_training=True))
sn_fn = hk.transform_with_state(lambda x: CustomSNParamsTree(ignore_regex='[^?!.]*b$')(x))
# -
optimizer = optix.adam(1e-3)
rng_seq = hk.PRNGSequence(42)
@jax.jit
def loss_fn(params, state, rng_key, batch):
res, state = model_train.apply(params, state, rng_key,
batch['x'], batch['s'])
loss = jnp.mean((batch['u'] + batch['s'] * res)**2)
return loss, state
@jax.jit
def update(params, state, sn_state, rng_key, opt_state, batch):
(loss, state), grads = jax.value_and_grad(loss_fn, has_aux=True)(params, state, rng_key, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optix.apply_updates(params, updates)
new_params, new_sn_state = sn_fn.apply(None, sn_state, None, new_params)
return loss, new_params, state, new_sn_state, new_opt_state
# +
params, state = model_train.init(next(rng_seq),
jnp.zeros((1, 2)),
jnp.ones((1, 1)))
opt_state = optimizer.init(params)
_, sn_state = sn_fn.init(jax.random.PRNGKey(1), params)
losses = []
# -
for step in range(2000):
batch = get_batch(next(rng_seq))
loss, params, state, sn_state, opt_state = update(params, state, sn_state, next(rng_seq), opt_state, batch)
losses.append(loss)
if step%100==0:
print(step, loss)
semilogy(np.array(losses[:]), label='loss')
legend()
# +
X = np.arange(-15, 15, 0.5)
Y = np.arange(-15, 15, 0.5)
points = stack(meshgrid(X, Y), axis=-1).reshape((-1, 2))
model = hk.transform_with_state(partial(forward, is_training=False))
score_fn = partial(model.apply, params, state, next(rng_seq))
# -
res = score_fn(points, 0.0*jnp.ones((len(points),1)))[0]
g = res.reshape([len(Y), len(X),2])
figure(figsize=(14,7))
quiver(X, Y, g[:,:,0], g[:,:,1])
def target_score_fn(x):
x = x.reshape([1,2])
return score_fn(x, 0.0*jnp.ones((len(x),1)))[0][0]
kernel = ScoreHamiltonianMonteCarlo(
target_score_fn=target_score_fn,
num_leapfrog_steps=10,
num_delta_logp_steps=16,
step_size=0.1)
samples, is_accepted = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=init_samples[1],
kernel=kernel,
trace_fn=lambda _, pkr: pkr.is_accepted,
seed=jax.random.PRNGKey(1))
# +
hist2d(samples[is_accepted,0],
samples[is_accepted,1],
64, range=[[-15,15],[-15,15]]); gca().set_aspect('equal')
scatter(init_samples[1,0], init_samples[1,1], label='x0', color='C1')
legend()
# -
| notebooks/ScoreHMC_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def dydx(x,y):
y_derivs = np.zeros(2)
y_derivs[0] = y[1]
y_derivs[1] = -1*y[0]
return y_derivs
def rk4_mv_core(dydx,xi,yi,nv,h):
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
x_ipoh = xi + 0.5*h
x_ipo = xi + h
y_temp = np.zeros(nv)
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
SAFETY = 0.9
H_NEW_FAC = 2.0
imax = 10000
i = 0
Delta = np.full(nv,2*tol)
h_step = h
while(Delta.max()/tol > 1.0):
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
Delta = np.fabs(y_2 - y_11)
if(Delta.max()/tol > 1.0):
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
i+=1
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
return y_2, h_new, h_step
def rk4_mv(dfdx,a,b,y_a,tol):
xi = a
yi = y_a.copy()
h = 1.0e-4 * (b-a)
imax = 10000
i = 0
nv = len(y_a)
x = np.full(1,a)
y = np.full((1,nv),y_a)
flag = 1
while(flag):
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
h = h_new
if(xi+h_step>b):
h = b-xi
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
flag = 0
xi += h_step
yi[:] = yi_new[:]
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y = y_new
if(i>=imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ",i)
i += 1
s = "i = %3d\tx = %9.8f\th = %9.8f\tb=%9.8f" % (i,xi, h_step, b)
print(s)
if(xi==b):
flag = 0
return x,y
# +
a = 0.0
b = 2.0 * np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tolerance = 1.0e-6
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
# -
plt.plot(x,y[:,0],'o',label='y(x)')
plt.plot(x,y[:,1],'o',label='dydx(x)')
xx = np.linspace(0,2*np.pi,1000)
plt.plot(xx,np.sin(xx),label='sin(x)')
plt.plot(xx,np.cos(xx),label='cos(x)')
plt.xlabel('x')
plt.ylabel('y,dy/dx')
plt.legend(frameon=False)
# +
sine = np.sin(x)
cosine = np.cos(x)
y_error = (y[:,0]-sine)
dydx_error = (y[:,1]-cosine)
plt.plot(x, y_error, label="y(x) Error")
plt.plot(x, dydx_error, label="dydx(x) Error")
plt.legend(frameon=False)
# -
| hw-6.ipynb |
#
# <h1 id="Deep-Neural-Network-for-Image-Classification:-Application">Deep Neural Network for Image Classification: Application<a class="anchor-link" href="#Deep-Neural-Network-for-Image-Classification:-Application">¶</a></h1><p>By the time you complete this notebook, you will have finished the last programming assignment of Week 4, and also the last programming assignment of Course 1! Go you!</p>
# <p>To build your cat/not-a-cat classifier, you'll use the functions from the previous assignment to build a deep network. Hopefully, you'll see an improvement in accuracy over your previous logistic regression implementation.</p>
# <p><strong>After this assignment you will be able to:</strong></p>
# <ul>
# <li>Build and train a deep L-layer neural network, and apply it to supervised learning</li>
# </ul>
# <p>Let's get started!</p>
#
#
# <h2 id="Table-of-Contents">Table of Contents<a class="anchor-link" href="#Table-of-Contents">¶</a></h2><ul>
# <li><a href="#1">1 - Packages</a></li>
# <li><a href="#2">2 - Load and Process the Dataset</a></li>
# <li><a href="#3">3 - Model Architecture</a><ul>
# <li><a href="#3-1">3.1 - 2-layer Neural Network</a></li>
# <li><a href="#3-2">3.2 - L-layer Deep Neural Network</a></li>
# <li><a href="#3-3">3.3 - General Methodology</a></li>
# </ul>
# </li>
# <li><a href="#4">4 - Two-layer Neural Network</a><ul>
# <li><a href="#ex-1">Exercise 1 - two_layer_model</a></li>
# <li><a href="#4-1">4.1 - Train the model</a></li>
# </ul>
# </li>
# <li><a href="#5">5 - L-layer Neural Network</a><ul>
# <li><a href="#ex-2">Exercise 2 - L_layer_model</a></li>
# <li><a href="#5-1">5.1 - Train the model</a></li>
# </ul>
# </li>
# <li><a href="#6">6 - Results Analysis</a></li>
# <li><a href="#7">7 - Test with your own image (optional/ungraded exercise)</a></li>
# </ul>
#
#
# <p><a name="1"></a></p>
# <h2 id="1---Packages">1 - Packages<a class="anchor-link" href="#1---Packages">¶</a></h2>
#
#
# <p>Begin by importing all the packages you'll need during this assignment.</p>
# <ul>
# <li><a href="https://www.numpy.org/">numpy</a> is the fundamental package for scientific computing with Python.</li>
# <li><a href="http://matplotlib.org">matplotlib</a> is a library to plot graphs in Python.</li>
# <li><a href="http://www.h5py.org">h5py</a> is a common package to interact with a dataset that is stored on an H5 file.</li>
# <li><a href="http://www.pythonware.com/products/pil/">PIL</a> and <a href="https://www.scipy.org/">scipy</a> are used here to test your model with your own picture at the end.</li>
# <li><code>dnn_app_utils</code> provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.</li>
# <li><code>np.random.seed(1)</code> is used to keep all the random function calls consistent. It helps grade your work - so please don't change it! </li>
# </ul>
#
# +
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
from public_tests import *
# %matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
np.random.seed(1)
# -
#
# <p><a name="2"></a></p>
# <h2 id="2---Load-and-Process-the-Dataset">2 - Load and Process the Dataset<a class="anchor-link" href="#2---Load-and-Process-the-Dataset">¶</a></h2><p>You'll be using the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you built back then had 70% test accuracy on classifying cat vs non-cat images. Hopefully, your new model will perform even better!</p>
# <p><strong>Problem Statement</strong>: You are given a dataset ("data.h5") containing:</p>
# <pre><code>- a training set of `m_train` images labelled as cat (1) or non-cat (0)
# - a test set of `m_test` images labelled as cat and non-cat
# - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).
#
# </code></pre>
# <p>Let's get more familiar with the dataset. Load the data by running the cell below.</p>
#
# +
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
# -
#
# <p>The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to check out other images.</p>
#
# +
# Example of a picture
index = 13
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# +
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
# -
#
# <p>As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below.</p>
# <p><img src="images/imvectorkiank.png" style="width:450px;height:300px;"/></p>
# <caption><center><font color="purple"><b>Figure 1</b>: Image to vector conversion.</font></center></caption>
#
# +
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
# -
#
# <p><strong>Note</strong>:
# $12,288$ equals $64 \times 64 \times 3$, which is the size of one reshaped image vector.</p>
#
#
# <p><a name="3"></a></p>
# <h2 id="3---Model-Architecture">3 - Model Architecture<a class="anchor-link" href="#3---Model-Architecture">¶</a></h2>
#
#
# <p><a name="3-1"></a></p>
# <h3 id="3.1---2-layer-Neural-Network">3.1 - 2-layer Neural Network<a class="anchor-link" href="#3.1---2-layer-Neural-Network">¶</a></h3><p>Now that you're familiar with the dataset, it's time to build a deep neural network to distinguish cat images from non-cat images!</p>
# <p>You're going to build two different models:</p>
# <ul>
# <li>A 2-layer neural network</li>
# <li>An L-layer deep neural network</li>
# </ul>
# <p>Then, you'll compare the performance of these models, and try out some different values for $L$.</p>
# <p>Let's look at the two architectures:</p>
# <p><img src="images/2layerNN_kiank.png" style="width:650px;height:400px;"/></p>
# <caption><center><font color="purple"><b>Figure 2</b>: 2-layer neural network. <br/> The model can be summarized as: INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT.</font></center></caption><p><u><b>Detailed Architecture of Figure 2</b></u>:</p>
# <ul>
# <li>The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. </li>
# <li>The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.</li>
# <li>Then, add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.</li>
# <li>Repeat the same process.</li>
# <li>Multiply the resulting vector by $W^{[2]}$ and add the intercept (bias). </li>
# <li>Finally, take the sigmoid of the result. If it's greater than 0.5, classify it as a cat.</li>
# </ul>
# <p><a name="3-2"></a></p>
# <h3 id="3.2---L-layer-Deep-Neural-Network">3.2 - L-layer Deep Neural Network<a class="anchor-link" href="#3.2---L-layer-Deep-Neural-Network">¶</a></h3><p>It's pretty difficult to represent an L-layer deep neural network using the above representation. However, here is a simplified network representation:</p>
# <p><img src="images/LlayerNN_kiank.png" style="width:650px;height:400px;"/></p>
# <caption><center><font color="purple"><b>Figure 3</b>: L-layer neural network. <br/> The model can be summarized as: [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID</font></center></caption><p><u><b>Detailed Architecture of Figure 3</b></u>:</p>
# <ul>
# <li>The input is a (64,64,3) image which is flattened to a vector of size (12288,1).</li>
# <li>The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.</li>
# <li>Next, take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.</li>
# <li>Finally, take the sigmoid of the final linear unit. If it is greater than 0.5, classify it as a cat.</li>
# </ul>
# <p><a name="3-3"></a></p>
# <h3 id="3.3---General-Methodology">3.3 - General Methodology<a class="anchor-link" href="#3.3---General-Methodology">¶</a></h3><p>As usual, you'll follow the Deep Learning methodology to build the model:</p>
# <ol>
# <li>Initialize parameters / Define hyperparameters</li>
# <li>Loop for num_iterations:
# a. Forward propagation
# b. Compute cost function
# c. Backward propagation
# d. Update parameters (using parameters, and grads from backprop) </li>
# <li>Use trained parameters to predict labels</li>
# </ol>
# <p>Now go ahead and implement those two models!</p>
#
#
# <p><a name="4"></a></p>
# <h2 id="4---Two-layer-Neural-Network">4 - Two-layer Neural Network<a class="anchor-link" href="#4---Two-layer-Neural-Network">¶</a></h2><p><a name="ex-1"></a></p>
# <h3 id="Exercise-1---two_layer_model">Exercise 1 - two_layer_model<a class="anchor-link" href="#Exercise-1---two_layer_model">¶</a></h3><p>Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: <em>LINEAR -> RELU -> LINEAR -> SIGMOID</em>. The functions and their inputs are:</p>
# <div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">initialize_parameters</span><span class="p">(</span><span class="n">n_x</span><span class="p">,</span> <span class="n">n_h</span><span class="p">,</span> <span class="n">n_y</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">parameters</span>
# <span class="k">def</span> <span class="nf">linear_activation_forward</span><span class="p">(</span><span class="n">A_prev</span><span class="p">,</span> <span class="n">W</span><span class="p">,</span> <span class="n">b</span><span class="p">,</span> <span class="n">activation</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">A</span><span class="p">,</span> <span class="n">cache</span>
# <span class="k">def</span> <span class="nf">compute_cost</span><span class="p">(</span><span class="n">AL</span><span class="p">,</span> <span class="n">Y</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">cost</span>
# <span class="k">def</span> <span class="nf">linear_activation_backward</span><span class="p">(</span><span class="n">dA</span><span class="p">,</span> <span class="n">cache</span><span class="p">,</span> <span class="n">activation</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">dA_prev</span><span class="p">,</span> <span class="n">dW</span><span class="p">,</span> <span class="n">db</span>
# <span class="k">def</span> <span class="nf">update_parameters</span><span class="p">(</span><span class="n">parameters</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">learning_rate</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">parameters</span>
# </pre></div>
#
# +
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
learning_rate = 0.0075
# +
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
#(≈ 1 line of code)
# parameters = ...
# YOUR CODE STARTS HERE
parameters = initialize_parameters(n_x, n_h, n_y)
# YOUR CODE ENDS HERE
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
#(≈ 2 lines of code)
# A1, cache1 = ...
# A2, cache2 = ...
# YOUR CODE STARTS HERE
A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
# YOUR CODE ENDS HERE
# Compute cost
#(≈ 1 line of code)
# cost = ...
# YOUR CODE STARTS HERE
cost = compute_cost(A2, Y)
# YOUR CODE ENDS HERE
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
#(≈ 2 lines of code)
# dA1, dW2, db2 = ...
# dA0, dW1, db1 = ...
# YOUR CODE STARTS HERE
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu")
# YOUR CODE ENDS HERE
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
#(approx. 1 line of code)
# parameters = ...
# YOUR CODE STARTS HERE
parameters = update_parameters(parameters, grads, learning_rate)
# YOUR CODE ENDS HERE
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 iterations
if print_cost and i % 100 == 0 or i == num_iterations - 1:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if i % 100 == 0 or i == num_iterations:
costs.append(cost)
return parameters, costs
def plot_costs(costs, learning_rate=0.0075):
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# +
parameters, costs = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2, print_cost=False)
print("Cost after first iteration: " + str(costs[0]))
two_layer_model_test(two_layer_model)
# -
#
# <p><strong>Expected output:</strong></p>
# <pre><code>cost after iteration 1 must be around 0.69</code></pre>
#
#
# <p><a name="4-1"></a></p>
# <h3 id="4.1---Train-the-model">4.1 - Train the model<a class="anchor-link" href="#4.1---Train-the-model">¶</a></h3><p>If your code passed the previous cell, run the cell below to train your parameters.</p>
# <ul>
# <li><p>The cost should decrease on every iteration.</p>
# </li>
# <li><p>It may take up to 5 minutes to run 2500 iterations.</p>
# </li>
# </ul>
#
# +
parameters, costs = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
plot_costs(costs, learning_rate)
# -
#
# <p><strong>Expected Output</strong>:</p>
# <table>
# <tr>
# <td> <b>Cost after iteration 0</b></td>
# <td> 0.6930497356599888 </td>
# </tr>
# <tr>
# <td> <b>Cost after iteration 100</b></td>
# <td> 0.6464320953428849 </td>
# </tr>
# <tr>
# <td> <b>...</b></td>
# <td> ... </td>
# </tr>
# <tr>
# <td> <b>Cost after iteration 2499</b></td>
# <td> 0.04421498215868956 </td>
# </tr>
# </table>
#
#
# <p><strong>Nice!</strong> You successfully trained the model. Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.</p>
# <p>Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.</p>
#
# +
predictions_train = predict(train_x, train_y, parameters)
# -
#
# <p><strong>Expected Output</strong>:</p>
# <table>
# <tr>
# <td> <b>Accuracy</b></td>
# <td> 0.9999999999999998 </td>
# </tr>
# </table>
#
# +
predictions_test = predict(test_x, test_y, parameters)
# -
#
# <p><strong>Expected Output</strong>:</p>
# <table>
# <tr>
# <td> <b>Accuracy</b></td>
# <td> 0.72 </td>
# </tr>
# </table>
#
#
# <h3 id="Congratulations!-It-seems-that-your-2-layer-neural-network-has-better-performance-(72%)-than-the-logistic-regression-implementation-(70%,-assignment-week-2).-Let's-see-if-you-can-do-even-better-with-an-$L$-layer-model.">Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model.<a class="anchor-link" href="#Congratulations!-It-seems-that-your-2-layer-neural-network-has-better-performance-(72%)-than-the-logistic-regression-implementation-(70%,-assignment-week-2).-Let's-see-if-you-can-do-even-better-with-an-$L$-layer-model.">¶</a></h3><p><strong>Note</strong>: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and you'll hear more about it in the next course. Early stopping is a way to prevent overfitting.</p>
#
#
# <p><a name="5"></a></p>
# <h2 id="5---L-layer-Neural-Network">5 - L-layer Neural Network<a class="anchor-link" href="#5---L-layer-Neural-Network">¶</a></h2><p><a name="ex-2"></a></p>
# <h3 id="Exercise-2---L_layer_model">Exercise 2 - L_layer_model<a class="anchor-link" href="#Exercise-2---L_layer_model">¶</a></h3><p>Use the helper functions you implemented previously to build an $L$-layer neural network with the following structure: <em>[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID</em>. The functions and their inputs are:</p>
# <div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">initialize_parameters_deep</span><span class="p">(</span><span class="n">layers_dims</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">parameters</span>
# <span class="k">def</span> <span class="nf">L_model_forward</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">parameters</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">AL</span><span class="p">,</span> <span class="n">caches</span>
# <span class="k">def</span> <span class="nf">compute_cost</span><span class="p">(</span><span class="n">AL</span><span class="p">,</span> <span class="n">Y</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">cost</span>
# <span class="k">def</span> <span class="nf">L_model_backward</span><span class="p">(</span><span class="n">AL</span><span class="p">,</span> <span class="n">Y</span><span class="p">,</span> <span class="n">caches</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">grads</span>
# <span class="k">def</span> <span class="nf">update_parameters</span><span class="p">(</span><span class="n">parameters</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">learning_rate</span><span class="p">):</span>
# <span class="o">...</span>
# <span class="k">return</span> <span class="n">parameters</span>
# </pre></div>
#
# +
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# +
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization.
#(≈ 1 line of code)
# parameters = ...
# YOUR CODE STARTS HERE
parameters = initialize_parameters_deep(layers_dims)
# YOUR CODE ENDS HERE
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
#(≈ 1 line of code)
# AL, caches = ...
# YOUR CODE STARTS HERE
AL, caches = L_model_forward(X, parameters)
# YOUR CODE ENDS HERE
# Compute cost.
#(≈ 1 line of code)
# cost = ...
# YOUR CODE STARTS HERE
cost = compute_cost(AL, Y)
# YOUR CODE ENDS HERE
# Backward propagation.
#(≈ 1 line of code)
# grads = ...
# YOUR CODE STARTS HERE
grads = L_model_backward(AL, Y, caches)
# YOUR CODE ENDS HERE
# Update parameters.
#(≈ 1 line of code)
# parameters = ...
# YOUR CODE STARTS HERE
parameters = update_parameters(parameters, grads, learning_rate)
# YOUR CODE ENDS HERE
# Print the cost every 100 iterations
if print_cost and i % 100 == 0 or i == num_iterations - 1:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if i % 100 == 0 or i == num_iterations:
costs.append(cost)
return parameters, costs
# +
parameters, costs = L_layer_model(train_x, train_y, layers_dims, num_iterations = 1, print_cost = False)
print("Cost after first iteration: " + str(costs[0]))
L_layer_model_test(L_layer_model)
# -
#
# <p><a name="5-1"></a></p>
# <h3 id="5.1---Train-the-model">5.1 - Train the model<a class="anchor-link" href="#5.1---Train-the-model">¶</a></h3><p>If your code passed the previous cell, run the cell below to train your model as a 4-layer neural network.</p>
# <ul>
# <li><p>The cost should decrease on every iteration.</p>
# </li>
# <li><p>It may take up to 5 minutes to run 2500 iterations.</p>
# </li>
# </ul>
#
# +
parameters, costs = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
# -
#
# <p><strong>Expected Output</strong>:</p>
# <table>
# <tr>
# <td> <b>Cost after iteration 0</b></td>
# <td> 0.771749 </td>
# </tr>
# <tr>
# <td> <b>Cost after iteration 100</b></td>
# <td> 0.672053 </td>
# </tr>
# <tr>
# <td> <b>...</b></td>
# <td> ... </td>
# </tr>
# <tr>
# <td> <b>Cost after iteration 2499</b></td>
# <td> 0.088439 </td>
# </tr>
# </table>
#
# +
pred_train = predict(train_x, train_y, parameters)
# -
#
# <p><strong>Expected Output</strong>:</p>
# <table>
# <tr>
# <td>
# <b>Train Accuracy</b>
# </td>
# <td>
# 0.985645933014
# </td>
# </tr>
# </table>
#
# +
pred_test = predict(test_x, test_y, parameters)
# -
#
# <p><strong>Expected Output</strong>:</p>
# <table>
# <tr>
# <td> <b>Test Accuracy</b></td>
# <td> 0.8 </td>
# </tr>
# </table>
#
#
# <h3 id="Congrats!-It-seems-that-your-4-layer-neural-network-has-better-performance-(80%)-than-your-2-layer-neural-network-(72%)-on-the-same-test-set.">Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set.<a class="anchor-link" href="#Congrats!-It-seems-that-your-4-layer-neural-network-has-better-performance-(80%)-than-your-2-layer-neural-network-(72%)-on-the-same-test-set.">¶</a></h3><p>This is pretty good performance for this task. Nice job!</p>
# <p>In the next course on "Improving deep neural networks," you'll be able to obtain even higher accuracy by systematically searching for better hyperparameters: learning_rate, layers_dims, or num_iterations, for example.</p>
#
#
# <p><a name="6"></a></p>
# <h2 id="6---Results-Analysis">6 - Results Analysis<a class="anchor-link" href="#6---Results-Analysis">¶</a></h2><p>First, take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.</p>
#
# +
print_mislabeled_images(classes, test_x, test_y, pred_test)
# -
#
# <p><strong>A few types of images the model tends to do poorly on include:</strong></p>
# <ul>
# <li>Cat body in an unusual position</li>
# <li>Cat appears against a background of a similar color</li>
# <li>Unusual cat color and species</li>
# <li>Camera Angle</li>
# <li>Brightness of the picture</li>
# <li>Scale variation (cat is very large or small in image) </li>
# </ul>
#
#
# <h3 id="Congratulations-on-finishing-this-assignment!">Congratulations on finishing this assignment!<a class="anchor-link" href="#Congratulations-on-finishing-this-assignment!">¶</a></h3><p>You just built and trained a deep L-layer neural network, and applied it in order to distinguish cats from non-cats, a very serious and important task in deep learning. ;)</p>
# <p>By now, you've also completed all the assignments for Course 1 in the Deep Learning Specialization. Amazing work! If you'd like to test out how closely you resemble a cat yourself, there's an optional ungraded exercise below, where you can test your own image.</p>
# <p>Great work and hope to see you in the next course!</p>
#
#
# <p><a name="7"></a></p>
# <h2 id="7---Test-with-your-own-image-(optional/ungraded-exercise)">7 - Test with your own image (optional/ungraded exercise)<a class="anchor-link" href="#7---Test-with-your-own-image-(optional/ungraded-exercise)">¶</a></h2><p>From this point, if you so choose, you can use your own image to test the output of your model. To do that follow these steps:</p>
# <ol>
# <li>Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.</li>
# <li>Add your image to this Jupyter Notebook's directory, in the "images" folder</li>
# <li>Change your image's name in the following code</li>
# <li>Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!</li>
# </ol>
#
# +
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(Image.open(fname).resize((num_px, num_px)))
plt.imshow(image)
image = image / 255.
image = image.reshape((1, num_px * num_px * 3)).T
my_predicted_image = predict(image, my_label_y, parameters)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
# -
#
# <p><strong>References</strong>:</p>
# <ul>
# <li>for auto-reloading external module: <a href="http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython">http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython</a></li>
# </ul>
#
| C1_Neural Networks and Deep Learning/Deep Neural Network - Application.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gradient Boosting with XGBoost in Python
# * ref http://machinelearningmastery.com/data-preparation-gradient-boosting-xgboost-python/
# +
# multiclass classification
import pandas
#import numpy
import xgboost
from sklearn import model_selection,cross_validation
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
# %matplotlib inline
import matplotlib.pyplot as plt
# load data
data = pandas.read_csv('iris.csv', header=1)
dataset = data.values
# split data into X and y
X = dataset[:,0:4]
Y = dataset[:,4]
# encode string class values as integers
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(Y)
label_encoded_y = label_encoder.transform(Y)
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, label_encoded_y, test_size=test_size, random_state=seed)
# fit model no training data
model = xgboost.XGBClassifier()
model.fit(X_train, y_train)
print(model)
# make predictions for test data
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
# -
# plot the important features #
fig, ax = plt.subplots() #figsize=(12,18)
#xgboost.plot_importance(model, max_num_features=50, height=0.8, ax=ax)
xgboost.plot_importance(model, height=0.8, ax=ax)
plt.show()
| xgboost/Feature_Importance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Imdb Movie Reviews Classification
# #### Notebook Configurations
# +
from IPython.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# -
# #### Library Imports
# +
import random
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import imdb
# -
# ### Load dataset
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# #### Decode any review
# +
word_index = imdb.get_word_index()
reverse_word_index = {v: k for k, v in word_index.items()}
def decode_review(review):
"""Convert a review from integers to words"""
return " ".join(reverse_word_index.get(word) for word in review)
# -
decode_review(train_data[5])
# #### Vectorize Data
#
# Out train and test data is an array of integers which we cannot feed to our neural network.
# So we need to convert the data to `tensors` which a neural network will understand.
# Let's OneHot encode our data so each datapoint has a fix width of `10000` with `1`'s in places where a common word exists and `0` otherwise.
def vectorize_sequences(sequences, dimension=10000):
"""One Hot encodes the imdb data so integers are mapped to 0 and 1"""
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.0
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
x_train, x_test
# On a similar note out labels need to be vectorized as well
y_train = np.asarray(train_labels).astype("float32")
y_test = np.asarray(test_labels).astype("float32")
y_train, y_test
# #### Model Definition
#
# We have inputs as vectors and labels as `scalars.`
# - The type of network that works best for such type of problems is a simple stack of fully connected `(dense)` layers with `relu` activations.
#
# Let's use
# - two intermediate layers with `16 hidden units` each having `relu` activation
# - a third layer that will output a `scalar` prediction having `sigmoid` activation
from keras import models
from keras import layers
# #### Build model architecture
model = models.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(10000,)))
model.add(layers.Dense(16, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
# #### Compiling the Model
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
# #### Validation
#
# Let's create a validation set by setting apart `10000` samples from the original training data
# +
x_val, y_val = x_train[:10000], y_train[:10000]
partial_x_train, partial_y_train = x_train[10000:], y_train[10000:]
# -
# #### Train
history = model.fit(
x=partial_x_train,
y=partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val),
)
# #### Plot training and validation loss
# +
history_dict = history.history
acc = history_dict["accuracy"]
val_acc = history_dict["val_accuracy"]
loss_values = history_dict["loss"]
val_loss_values = history_dict["val_loss"]
epochs = range(1, len(acc) + 1)
# +
plt.plot(epochs, loss_values, label="Training loss")
plt.plot(epochs, val_loss_values, label="Validation loss")
plt.title("Training and Validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# -
# #### Plot Training Vs Validation Accuracy
# +
plt.plot(epochs, acc, label="Training accuracy")
plt.plot(epochs, val_acc, label="Validation accuracy")
plt.title("Training and Validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
# -
# #### Observations
#
# - The model clearly seem to overfit after `4` epochs
# - We can see how to validation loss is increasing after each epoch
# - Since we have a comparatively small dataset we should avoid over complexity
#
# Let's train a new network from scratch for `4` epochs and then evaluate on test data
# #### Retrain
# +
model = models.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(10000,)))
model.add(layers.Dense(16, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(x_train, y_train, epochs=4, batch_size=512)
# -
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f"Test Loss: {test_loss*100:.2f}%\nTest Accuracy: {test_acc*100:.2f}%")
# #### Observation
#
# - So we are able to achieve an accuracy of `88%` with this simple technique.
# - Remember each review in our original dataset just has the top `10000` words in it.
# - Now let's do the predictions on the entire test dataset and show a random review with its `actual` and `predicted` sentiment
# #### Predict
y_pred = model.predict(x_test)
n = random.randint(0, 25000)
review, sentiment, sentiment_pred = (
decode_review(test_data[n]),
test_labels[n],
y_pred[n][0],
)
print(
f"Review\n\n{review}\n\nSentiment Actual: {sentiment}\nSentiment Prediction Probability: {sentiment_pred:.2f}"
)
# ### Further experimentation
#
# We try a model with `three` hidden layers
# +
model = models.Sequential()
model.add(layers.Dense(16, activation="tanh", input_shape=(10000,)))
model.add(layers.Dense(16, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(
partial_x_train,
partial_y_train,
epochs=15,
batch_size=512,
validation_data=(x_val, y_val),
)
# -
d = history.history
loss, accuracy, val_loss, val_accuracy = (
d["loss"],
d["accuracy"],
d["val_loss"],
d["val_accuracy"],
)
epochs = range(1, len(loss) + 1)
# +
plt.plot(epochs, loss, label="Training Loss")
plt.plot(epochs, val_loss, label="Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Training Loss Vs Validation Loss")
plt.legend()
plt.show()
# +
plt.plot(epochs, accuracy, label="Training Accuracy")
plt.plot(epochs, val_accuracy, label="Validation Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Training Accuracy Vs Validation Accuracy")
plt.legend()
plt.show()
# +
test_loss, test_acc = model.evaluate(x_test, y_test)
print(
f"""
test loss : {test_loss*100:.2f}%
test accuracy: {test_acc*100:.2f}%
"""
)
# -
# ### The End
| notebooks/imdb_movie_reviews_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + tags=[]
import requests
from notebook.notebookapp import list_running_servers
# get server info so that we can make api calls
servers=list(list_running_servers())
info=next(list_running_servers())
base_url=info['url']
api_url=base_url + 'api/terminals'
api_token=info['token']
# get list of current terminals so that we can reuse this if enough exist
# otherwise we will create new ones as needed
r=requests.get(url=api_url, headers={'Authorization': 'token ' + api_token})
TERMINALS=r.json()
try:
EDITORTERM=TERMINALS[0]['name']
except IndexError:
# create a terminal for our editor
r=requests.post(url=api_url, headers={'Authorization': 'token ' + api_token})
c=r.json()
EDITORTERM=c['name']
try:
BUILDTERM=TERMINALS[1]['name']
except IndexError:
# create a terminal for running out builds
r=requests.post(url=api_url, headers={'Authorization': 'token ' + api_token})
c=r.json()
BUILDTERM=c['name']
try:
DEBUGGERTERM=TERMINALS[2]['name']
except IndexError:
# create a terminal for running gdb
r=requests.post(url=api_url, headers={'Authorization': 'token ' + api_token})
c=r.json()
DEBUGGERTERM=c['name']
# -
# # Lets write some assembly code
#
# In this chapter we will get going and write some simple assembly code, "build" it and run it within a "debugger" so that we can get a sense of how everything fits together.
#
# To do this we will use three terminal sessions one terminal to run an ascii editor, one to run the shell on its own so that we can compile our source code, and one to run our debugger. We use three different terminals so that we can stay organized and avoid having to constantly stop and start the different programs. In some sense we are using multiple terminals to form our own Integrated Development Environmnt (IDE) where we are using each terminal as if it were a subwindow of our IDE. In actuality the editor that we will be using (emacs) has support for integrating all three tasks within itself but for the moment we will keep things seperate to make sure we know what is going on and not tie ourselves to this particular editor (there are many others including the popular VIM).
# ## EDITOR : Terminal to run our editor
#
# An editor allows us to create and udpate plain text ascii files. An editor is the core tool of a programmer! Programming is all about writing software in the form of ascii files that encode what we want the computer to do in some language or another (in our case assembly and C). So far you my have been taught to use various Integrated Development Environments (IDEs) that include an editor, build system and debugger within them. In this class we will strip things down to there traditional bare essentials so you can get an idea of how things are really working and how IDE's are themselves constructed.
# + cell_style="split" tags=["hide_input"]
from IPython.display import IFrame
display(IFrame('/terminals/' + EDITORTERM, 1000,600))
# -
# - `emacs popcnt.S`
#
# In the above terminal we will run the `emacs` editor to create and write our first assembly program.
# To do this issue the above command. Emacs is like many of our tools cryptic but very powerful. In reality emacs itself contains a programming language (called elisp) that is used to write very powerful extension packages for it. That being said we are going to stick to the basics.
#
# At the top you will see a menu bar, in the middle you will see blank area where we will type in our file contents, and at the bottom is a status line and an area for entering emacs commands in by hand (such as convert all occurrances of X to Y). In time you can learn about the commands and how to issue them for the moment you can use the menubar for most of the things you need to do.
#
# To access the menu bar functions press F10. By default emacs does not automatically save your file when you make changes. However it does show you in the status bar if you have changes that have not been saved. If this is the case you will see `:**` towards the left of the status bary. You must explicity save changes by hand. To do this you can use the menubar File-Save or you can press `control-x` followed by `control-s` (note you can just keep the control key pressed then press x, release x, press s, release s). Doing so will have the ASCII contents of the emacs to a file called `popcnt.S`
#
# Remember you are running emacs in the terminal so you cannot move the cursor with your mouse you will need to navigate using the arrow keys along with page-up and down to navigate your document. There are many hot-key sequences that you can use in time to acclerate you work. But they too numerous to get into now but there are many tutorials and cheetsheets online to help you get going.
#
# Now enter the following code and save it.
#
# ```assembly_x86
# .intel_syntax noprefix // set assembly language format to intel
# .text // linker section <directive>
#
# .global _start // linker symbol type <directive>
# _start: // introduce a symbolic (human readable) label for "this" address
# popcnt rax, rbx // ok the single intel opcode (instruction) that makes up our program
# ```
#
# The following is a version of this code that is verbosely documented: [popcnt.S](src/popcnt.S)
# ## Building: Terminal to run our build commands
from IPython.display import IFrame
display(IFrame('/terminals/' + BUILDTERM, 1000,600))
# - gcc --static -g -nostartfiles -nolibc popcnt.S -o popcnt
#
# To execute our code we must convert the "source" into a binary executable that can be loaded into memory and contains all the data and instructions (at the right locations). To do this we must use programs that converts our assembly code into the "correct" raw binary values and assigns the those values to address. The OS will load these values to the specified locations when we ask it to run our program.
#
# This process of converting human readable source code into a binary executable format is often referred to as "building". The tools we will use are an assembler and a linker.
#
# The assembler's has been written to convert the human names ("memonics") of the instructions in our source files into the binary code that our CPU understands. There is no magic! The manufacuter of the CPU publishes a manual that defines what instructions the CPU supports. Each instruction has a human "memonic" (eg. `mov rax, <value>`) and the binary code that the CPU understands (eg. `mov rax, 0xdeadbeef` is `0x48,0xb8,0xef,0xbe,0xad,0xde,0x00,0x00,0x00,0x00`). Given the manual a programmer writes the assemble to go over our source files and translates what we write into the cpu binary code. The programmer extends the memonics with what are called "directives" such as `.intel_syntax, .text, .global, .quad, etc` that we can use to control and direct the assemble. To fully understand all the syntax and what we can do one must look at both the manual for the CPU and for the assembler. If all goes well and our program does not have any syntax errors then the assembler will generate a file with its output. This file is called an object file.
#
# We use a tool called a linker to process the object files that makes up our program to create a binary executable specific to our operating system and cpu. It is this file that is "really" our program. The linkers job is to prodess all our object files to create the binary with knowledge of where our OS wants things to be placed in memory (in our simple examples there is only one, later on we will have other object files from libraries of functions that we will want to use as well). Specifically the developers of the OS provide information to the linker that tells it the rules of where instructions and data can go. It is the linkers job to figure out what addresses each of the values that makes up our program should be given. As such it also needs to fix up our code so that the final addresses are reflect for each of the places in our code where we reference particular symbolic names. We will talk more about this later. Assuming all goes well and the linker does not flag any errors then it will produce a binary executable that the OS can load and run. One special task of the linker is to mark in the binary the address of the first instruction so that the OS can be sure to initialize the CPU correctly to start executing instructions from the right starting location -- this location is called the "entry point". Our linker by default assumes that our code contains a symbol named `_start`. If so the address it assigns to `_start` is what it will write into the executable as the entry point so that the OS can load and start our program correct. If we fail to define the `_start` label the linker will produce an warning and man an assumption. It is a bad idea to ignore warnings when programming at this level ... after all we know what assuming makes of you and me.
#
# So in the shell above we will run a command (`gcc --static -g -nostartfiles -nolibc popcnt.S -o popcnt`) that runs both the assembler and linker for us. We will have to pay close attention to see if there are any errors. If so we need to go up to the editor make changes and save those changes. Then try building again. We repeat this untill there are no build time errors. Remember the executable is different from the source any change we make to the program source code requires that we rerun the build process to update the binary. Remember just because there are no build time errors does not mean that our code is "right" or free of bugs.
#
# Later on we will see how to use another tool called make to further simplify and automate the building process.
# ## Debugger: Terminal to run our debugger -- actually it is much more than just a debugger
from IPython.display import IFrame
display(IFrame('/terminals/' + DEBUGGERTERM, 1000,600))
# + [markdown] tags=[]
# - `gdb -tui popcnt`
#
# The following are gdb commands and should be entered at the gdb prompt (aka they are not shell commands)
# - `set disassembly-flavor intel`
# - `tui new-layout 210 src 2 regs 5 status 1 cmd 1`
# - `layout 210`
# - `break _start`
# - `run`
# - `print /x &_start`
# - `x /16xb _start`
# - `x /4i _start`
# - `p $pc`
# - `p {$pc, $rax, $rbx}`
# - `p \t {$pc, $rax, $rbx}`
# - `step`
#
# Continue stepping until you get to the end
#
# `gdb` (or `gdb -tui`) which starts in a slightly more friendly mode) is a very powerful tool in the hands of a power user (that's you or soon to be). `gdb` is complicated and cryptic but allows you to not just trace your programs execution but it allows you to explore all aspects of the hardware that your program has access too. You can peek into the CPU and examine arbitrary memory locations. And perhaps even more cool you can change the CPU registers and any memory location on the fly while your program is running! It is going to take us a while to full explore all the power of gdb. But lets get started.
#
# If you type help you will get a list of the major commands that gdb support for the moment we are going to focus on the basics of following tasks:
# - inspecting memory : examining memory, disassembling memory
# - inspecting registers
# - setting breakpoints
# - starting execution
# - stepping instructions
# - quiting
#
# -
# Ok lets write a new program that does something else
| underthecovers/assembly/assembly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Feature Engineering with Open-Source
#
# In this notebook, we will reproduce the Feature Engineering Pipeline from the notebook 2 (02-Machine-Learning-Pipeline-Feature-Engineering), but we will replace, whenever possible, the manually created functions by open-source classes, and hopefully understand the value they bring forward.
# # Reproducibility: Setting the seed
#
# With the aim to ensure reproducibility between runs of the same notebook, but also between the research and production environment, for each step that includes some element of randomness, it is extremely important that we **set the seed**.
# +
# data manipulation and plotting
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# for saving the pipeline
import joblib
# from Scikit-learn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, Binarizer
# from feature-engine
from feature_engine.imputation import (
AddMissingIndicator,
MeanMedianImputer,
CategoricalImputer,
)
from feature_engine.encoding import (
RareLabelEncoder,
OrdinalEncoder,
)
from feature_engine.transformation import (
LogTransformer,
YeoJohnsonTransformer,
)
from feature_engine.selection import DropFeatures
from feature_engine.wrappers import SklearnTransformerWrapper
# to visualise al the columns in the dataframe
pd.pandas.set_option('display.max_columns', None)
# +
# load dataset
data = pd.read_csv('train.csv')
# rows and columns of the data
print(data.shape)
# visualise the dataset
data.head()
# -
# # Separate dataset into train and test
#
# It is important to separate our data intro training and testing set.
#
# When we engineer features, some techniques learn parameters from data. It is important to learn these parameters only from the train set. This is to avoid over-fitting.
#
# Our feature engineering techniques will learn:
#
# - mean
# - mode
# - exponents for the yeo-johnson
# - category frequency
# - and category to number mappings
#
# from the train set.
#
# **Separating the data into train and test involves randomness, therefore, we need to set the seed.**
# +
# Let's separate into train and test set
# Remember to set the seed (random_state for this sklearn function)
X_train, X_test, y_train, y_test = train_test_split(
data.drop(['Id', 'SalePrice'], axis=1), # predictive variables
data['SalePrice'], # target
test_size=0.1, # portion of dataset to allocate to test set
random_state=0, # we are setting the seed here
)
X_train.shape, X_test.shape
# -
# # Feature Engineering
#
# In the following cells, we will engineer the variables of the House Price Dataset so that we tackle:
#
# 1. Missing values
# 2. Temporal variables
# 3. Non-Gaussian distributed variables
# 4. Categorical variables: remove rare labels
# 5. Categorical variables: convert strings to numbers
# 5. Standardize the values of the variables to the same range
# ## Target
#
# We apply the logarithm
y_train = np.log(y_train)
y_test = np.log(y_test)
# ## Missing values
#
# ### Categorical variables
#
# We will replace missing values with the string "missing" in those variables with a lot of missing data.
#
# Alternatively, we will replace missing data with the most frequent category in those variables that contain fewer observations without values.
#
# This is common practice.
# +
# let's identify the categorical variables
# we will capture those of type object
cat_vars = [var for var in data.columns if data[var].dtype == 'O']
# MSSubClass is also categorical by definition, despite its numeric values
# (you can find the definitions of the variables in the data_description.txt
# file available on Kaggle, in the same website where you downloaded the data)
# lets add MSSubClass to the list of categorical variables
cat_vars = cat_vars + ['MSSubClass']
# cast all variables as categorical
X_train[cat_vars] = X_train[cat_vars].astype('O')
X_test[cat_vars] = X_test[cat_vars].astype('O')
# number of categorical variables
len(cat_vars)
# +
# make a list of the categorical variables that contain missing values
cat_vars_with_na = [
var for var in cat_vars
if X_train[var].isnull().sum() > 0
]
# print percentage of missing values per variable
X_train[cat_vars_with_na ].isnull().mean().sort_values(ascending=False)
# +
# variables to impute with the string missing
with_string_missing = [
var for var in cat_vars_with_na if X_train[var].isnull().mean() > 0.1]
# variables to impute with the most frequent category
with_frequent_category = [
var for var in cat_vars_with_na if X_train[var].isnull().mean() < 0.1]
# +
# I print the values here, because it makes it easier for
# later when we need to add this values to a config file for
# deployment
with_string_missing
# -
with_frequent_category
# +
# replace missing values with new label: "Missing"
# set up the class
cat_imputer_missing = CategoricalImputer(
imputation_method='missing', variables=with_string_missing)
# fit the class to the train set
cat_imputer_missing.fit(X_train)
# the class learns and stores the parameters
cat_imputer_missing.imputer_dict_
# +
# replace NA by missing
# IMPORTANT: note that we could store this class with joblib
X_train = cat_imputer_missing.transform(X_train)
X_test = cat_imputer_missing.transform(X_test)
# +
# replace missing values with most frequent category
# set up the class
cat_imputer_frequent = CategoricalImputer(
imputation_method='frequent', variables=with_frequent_category)
# fit the class to the train set
cat_imputer_frequent.fit(X_train)
# the class learns and stores the parameters
cat_imputer_frequent.imputer_dict_
# +
# replace NA by missing
# IMPORTANT: note that we could store this class with joblib
X_train = cat_imputer_frequent.transform(X_train)
X_test = cat_imputer_frequent.transform(X_test)
# +
# check that we have no missing information in the engineered variables
X_train[cat_vars_with_na].isnull().sum()
# +
# check that test set does not contain null values in the engineered variables
[var for var in cat_vars_with_na if X_test[var].isnull().sum() > 0]
# -
# ### Numerical variables
#
# To engineer missing values in numerical variables, we will:
#
# - add a binary missing indicator variable
# - and then replace the missing values in the original variable with the mean
# +
# now let's identify the numerical variables
num_vars = [
var for var in X_train.columns if var not in cat_vars and var != 'SalePrice'
]
# number of numerical variables
len(num_vars)
# +
# make a list with the numerical variables that contain missing values
vars_with_na = [
var for var in num_vars
if X_train[var].isnull().sum() > 0
]
# print percentage of missing values per variable
X_train[vars_with_na].isnull().mean()
# -
# print, makes my life easier when I want to create the config
vars_with_na
# +
# add missing indicator
missing_ind = AddMissingIndicator(variables=vars_with_na)
missing_ind.fit(X_train)
X_train = missing_ind.transform(X_train)
X_test = missing_ind.transform(X_test)
# check the binary missing indicator variables
X_train[['LotFrontage_na', 'MasVnrArea_na', 'GarageYrBlt_na']].head()
# +
# then replace missing data with the mean
# set the imputer
mean_imputer = MeanMedianImputer(
imputation_method='mean', variables=vars_with_na)
# learn and store parameters from train set
mean_imputer.fit(X_train)
# the stored parameters
mean_imputer.imputer_dict_
# +
X_train = mean_imputer.transform(X_train)
X_test = mean_imputer.transform(X_test)
# IMPORTANT: note that we could save the imputers with joblib
# check that we have no more missing values in the engineered variables
X_train[vars_with_na].isnull().sum()
# +
# check that test set does not contain null values in the engineered variables
[var for var in vars_with_na if X_test[var].isnull().sum() > 0]
# -
# ## Temporal variables
#
# ### Capture elapsed time
#
# There is in Feature-engine 2 classes that allow us to perform the 2 transformations below:
#
# - [CombineWithFeatureReference](https://feature-engine.readthedocs.io/en/latest/creation/CombineWithReferenceFeature.html) to capture elapsed time
# - [DropFeatures](https://feature-engine.readthedocs.io/en/latest/selection/DropFeatures.html) to drop the unwanted features
#
# We will do the first one manually, so we take the opportunity to create 1 class ourselves for the course. For the second operation, we will use the DropFeatures class.
def elapsed_years(df, var):
# capture difference between the year variable
# and the year in which the house was sold
df[var] = df['YrSold'] - df[var]
return df
for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']:
X_train = elapsed_years(X_train, var)
X_test = elapsed_years(X_test, var)
# +
# now we drop YrSold
drop_features = DropFeatures(features_to_drop=['YrSold'])
X_train = drop_features.fit_transform(X_train)
X_test = drop_features.transform(X_test)
# -
# ## Numerical variable transformation
#
# ### Logarithmic transformation
#
# In the previous notebook, we observed that the numerical variables are not normally distributed.
#
# We will transform with the logarightm the positive numerical variables in order to get a more Gaussian-like distribution.
# +
log_transformer = LogTransformer(
variables=["LotFrontage", "1stFlrSF", "GrLivArea"])
X_train = log_transformer.fit_transform(X_train)
X_test = log_transformer.transform(X_test)
# -
# check that test set does not contain null values in the engineered variables
[var for var in ["LotFrontage", "1stFlrSF", "GrLivArea"] if X_test[var].isnull().sum() > 0]
# same for train set
[var for var in ["LotFrontage", "1stFlrSF", "GrLivArea"] if X_train[var].isnull().sum() > 0]
# ### Yeo-Johnson transformation
#
# We will apply the Yeo-Johnson transformation to LotArea.
# +
yeo_transformer = YeoJohnsonTransformer(
variables=['LotArea'])
X_train = yeo_transformer.fit_transform(X_train)
X_test = yeo_transformer.transform(X_test)
# the learned parameter
yeo_transformer.lambda_dict_
# -
# check absence of na in the train set
[var for var in X_train.columns if X_train[var].isnull().sum() > 0]
# check absence of na in the test set
[var for var in X_train.columns if X_test[var].isnull().sum() > 0]
# ### Binarize skewed variables
#
# There were a few variables very skewed, we would transform those into binary variables.
#
# We can perform the below transformation with open source. We can use the [Binarizer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Binarizer.html) from Scikit-learn, in combination with the [SklearnWrapper](https://feature-engine.readthedocs.io/en/latest/wrappers/Wrapper.html) from Feature-engine to be able to apply the transformation only to a subset of features.
#
# Instead, we are going to do it manually, to give us another opportunity to code the class as an in-house package later in the course.
# +
skewed = [
'BsmtFinSF2', 'LowQualFinSF', 'EnclosedPorch',
'3SsnPorch', 'ScreenPorch', 'MiscVal'
]
binarizer = SklearnTransformerWrapper(
transformer=Binarizer(threshold=0), variables=skewed
)
X_train = binarizer.fit_transform(X_train)
X_test = binarizer.transform(X_test)
X_train[skewed].head()
# -
# ## Categorical variables
#
# ### Apply mappings
#
# These are variables which values have an assigned order, related to quality. For more information, check Kaggle website.
# +
# re-map strings to numbers, which determine quality
qual_mappings = {'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, 'Missing': 0, 'NA': 0}
qual_vars = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond',
'HeatingQC', 'KitchenQual', 'FireplaceQu',
'GarageQual', 'GarageCond',
]
for var in qual_vars:
X_train[var] = X_train[var].map(qual_mappings)
X_test[var] = X_test[var].map(qual_mappings)
# +
exposure_mappings = {'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}
var = 'BsmtExposure'
X_train[var] = X_train[var].map(exposure_mappings)
X_test[var] = X_test[var].map(exposure_mappings)
# +
finish_mappings = {'Missing': 0, 'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}
finish_vars = ['BsmtFinType1', 'BsmtFinType2']
for var in finish_vars:
X_train[var] = X_train[var].map(finish_mappings)
X_test[var] = X_test[var].map(finish_mappings)
# +
garage_mappings = {'Missing': 0, 'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}
var = 'GarageFinish'
X_train[var] = X_train[var].map(garage_mappings)
X_test[var] = X_test[var].map(garage_mappings)
# +
fence_mappings = {'Missing': 0, 'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}
var = 'Fence'
X_train[var] = X_train[var].map(fence_mappings)
X_test[var] = X_test[var].map(fence_mappings)
# -
# check absence of na in the train set
[var for var in X_train.columns if X_train[var].isnull().sum() > 0]
# ### Removing Rare Labels
#
# For the remaining categorical variables, we will group those categories that are present in less than 1% of the observations. That is, all values of categorical variables that are shared by less than 1% of houses, well be replaced by the string "Rare".
#
# To learn more about how to handle categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) in Udemy.
# +
# capture all quality variables
qual_vars = qual_vars + finish_vars + ['BsmtExposure','GarageFinish','Fence']
# capture the remaining categorical variables
# (those that we did not re-map)
cat_others = [
var for var in cat_vars if var not in qual_vars
]
len(cat_others)
# -
cat_others
# +
rare_encoder = RareLabelEncoder(tol=0.01, n_categories=1, variables=cat_others)
# find common labels
rare_encoder.fit(X_train)
# the common labels are stored, we can save the class
# and then use it later :)
rare_encoder.encoder_dict_
# -
X_train = rare_encoder.transform(X_train)
X_test = rare_encoder.transform(X_test)
# ### Encoding of categorical variables
#
# Next, we need to transform the strings of the categorical variables into numbers.
#
# We will do it so that we capture the monotonic relationship between the label and the target.
#
# To learn more about how to encode categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) in Udemy.
# +
# set up the encoder
cat_encoder = OrdinalEncoder(encoding_method='ordered', variables=cat_others)
# create the mappings
cat_encoder.fit(X_train, y_train)
# mappings are stored and class can be saved
cat_encoder.encoder_dict_
# -
X_train = cat_encoder.transform(X_train)
X_test = cat_encoder.transform(X_test)
# check absence of na in the train set
[var for var in X_train.columns if X_train[var].isnull().sum() > 0]
# check absence of na in the test set
[var for var in X_test.columns if X_test[var].isnull().sum() > 0]
# +
# let me show you what I mean by monotonic relationship
# between labels and target
def analyse_vars(train, y_train, var):
# function plots median house sale price per encoded
# category
tmp = pd.concat([X_train, np.log(y_train)], axis=1)
tmp.groupby(var)['SalePrice'].median().plot.bar()
plt.title(var)
plt.ylim(2.2, 2.6)
plt.ylabel('SalePrice')
plt.show()
for var in cat_others:
analyse_vars(X_train, y_train, var)
# -
# The monotonic relationship is particularly clear for the variables MSZoning and Neighborhood. Note how, the higher the integer that now represents the category, the higher the mean house sale price.
#
# (remember that the target is log-transformed, that is why the differences seem so small).
# ## Feature Scaling
#
# For use in linear models, features need to be either scaled. We will scale features to the minimum and maximum values:
# +
# create scaler
scaler = MinMaxScaler()
# fit the scaler to the train set
scaler.fit(X_train)
# transform the train and test set
# sklearn returns numpy arrays, so we wrap the
# array with a pandas dataframe
X_train = pd.DataFrame(
scaler.transform(X_train),
columns=X_train.columns
)
X_test = pd.DataFrame(
scaler.transform(X_test),
columns=X_train.columns
)
# -
X_train.head()
# # Conclusion
#
# We now have several classes with parameters learned from the training dataset, that we can store and retrieve at a later stage, so that when a colleague comes with new data, we are in a better position to score it faster.
#
# Still:
#
# - we would need to save each class
# - then we could load each class
# - and apply each transformation individually.
#
# Which sounds like a lot of work.
#
# The good news is, we can reduce the amount of work, if we set up all the transformations within a pipeline.
#
# **IMPORTANT**
#
# In order to set up the entire feature transformation within a pipeline, we still need to create a class that can be used within a pipeline to map the categorical variables with the arbitrary mappings, and also, to capture elapsed time between the temporal variables.
#
# We will take that opportunity to create an in-house package.
| section-04-research-and-development/06-feature-engineering-with-open-source.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python388jvsc74a57bd0dce69896fdb445434427c12e791455610f9ef8e6bb07ea975426634cd43b3db3
# ---
# +
import pandas as pd
import numpy as np
import re
# preprocess
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import GenericUnivariateSelect, chi2
from sklearn.feature_selection import GenericUnivariateSelect, chi2
from sklearn.model_selection import train_test_split
# model
from sklearn.neural_network import MLPClassifier
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
# + pycharm={"name": "#%%\n"}
# load raw data csv files
"""
attributes: name, n_steps, n_ingredients, steps, ingredients
"""
df_train = pd.read_csv("resources/datasets/recipe_train.csv")
df_test = pd.read_csv("resources/datasets/recipe_test.csv")
# + pycharm={"name": "#%%\n"}
def preprocess(df):
arr_ingr = df['ingredients'].copy().to_numpy()
arr_steps = df['steps'].copy().to_numpy()
arr_name = df['name'].copy().to_numpy()
_RE_COMBINE_WHITESPACE = re.compile(r"\s+")
## remove all puntuation
for i in range(len(arr_steps)):
arr_steps[i] = re.sub(r'[^\w\s]', '', str(arr_steps[i]))
arr_steps[i] = _RE_COMBINE_WHITESPACE.sub(" ", arr_steps[i]).strip() + ' '
arr_ingr[i] = re.sub(r'[^\w\s]', '', str(arr_ingr[i]))
arr_ingr[i] = _RE_COMBINE_WHITESPACE.sub(" ", arr_ingr[i]).strip() + ' '
arr_name[i] = re.sub(r'[^\w\s]', '', str(arr_name[i]))
arr_name[i] = _RE_COMBINE_WHITESPACE.sub(" ", arr_name[i]).strip()
# combined all three features
X = arr_steps + arr_ingr + arr_name
return X
# + pycharm={"name": "#%%\n"}
X = preprocess(df_train)
y = df_train['duration_label']
# include both uni-grams and bi-grams
# exclude stop words
vectorizer = TfidfVectorizer(sublinear_tf=True, ngram_range=(1,2), analyzer='word', stop_words= 'english')
X = vectorizer.fit_transform(X)
print("Shape of X (nrow, ncol):", X.shape)
# + pycharm={"name": "#%%\n"}
fselect = GenericUnivariateSelect(chi2, mode='percentile', param=20)
X_new = fselect.fit_transform(X, y)
X_new.shape
# + pycharm={"name": "#%%\n"}
X_train, X_test, y_train, y_test = train_test_split(X_new, y, test_size=20)
mlp = MLPClassifier(random_state=1, max_iter=200, verbose=True)
mlp.fit(X_train, y_train)
# + pycharm={"name": "#%%\n"}
mlp.score(X_test, y_test)
# + pycharm={"name": "#%%\n"}
X_train = preprocess(df_train)
X_test = preprocess(df_test)
y_train = df_train['duration_label']
X = np.concatenate((X_train, X_test), axis=0)
# transform into sparse
vectorizer = TfidfVectorizer(sublinear_tf=True, ngram_range=(1,2), analyzer='word', stop_words= 'english')
vectorizer.fit(X)
X_train = vectorizer.transform(X_train)
X_test = vectorizer.transform(X_test)
# feature selection
fselect = GenericUnivariateSelect(chi2, mode='percentile', param=20)
fselect.fit(X_train, y_train)
X_train_new = fselect.transform(X_train)
X_test_new = fselect.transform(X_test)
# + pycharm={"name": "#%%\n"}
mlp = MLPClassifier(max_iter=200, verbose=True, early_stopping=True)
mlp.fit(X_train_new, y_train)
# + pycharm={"name": "#%%\n"}
predicts = mlp.predict(X_test_new)
# + pycharm={"name": "#%%\n"}
ids = np.array(range(len(predicts))) + 1
output = pd.DataFrame({'id': ids, 'duration_label': predicts})
output.to_csv('output_test.csv', index=False)
# + pycharm={"name": "#%%\n"}
| test/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Similarity
# We will work on applying similarity: Jaccard and Cosine similarity. This exercise is a simple application of the lecture.
# Begin by importing the needed libraries:
# import needed libraries
import nltk
import numpy as np
import pandas as pd
# We will work with the following examples:
A = "Outside the classroom, Stallman pursued his studies with even more diligence, rushing off to fulfill his laboratory-assistant duties at Rockefeller University during the week and dodging the Vietnam protesters on his way to Saturday school at Columbia. It was there, while the rest of the Science Honors Program students sat around discussing their college choices, that Stallman finally took a moment to participate in the preclass bull session."
B = "To facilitate the process, AI Lab hackers had built a system that displayed both the source and display modes on a split screen. Despite this innovative hack, switching from mode to mode was still a nuisance."
C = "With no dorm and no dancing, Stallman's social universe imploded. Like an astronaut experiencing the aftereffects of zero-gravity, Stallman found that his ability to interact with nonhackers, especially female nonhackers, had atrophied significantly. After 16 weeks in the AI Lab, the self confidence he'd been quietly accumulating during his 4 years at Harvard was virtually gone."
# Begin by computing the Jaccard Similarity J of all possibilities:
# * J(A, B)
# * J(B, C)
# * J(A, C)
# +
# TODO: compute the Jaccard similarities
# Split the sentences
a = set(A.split())
b = set(B.split())
c = set(C.split())
# Compute the intersection and union
intersectionAB = a.intersection(b)
intersectionBC = b.intersection(c)
intersectionAC = a.intersection(c)
unionAB = a.union(b)
unionBC = b.union(c)
unionAC = a.union(c)
# Compute and print the Jaccard Similarity
jacAB = len(intersectionAB)/len(unionAB)
jacBC = len(intersectionBC)/len(unionBC)
jacAC = len(intersectionAC)/len(unionAC)
print(jacAB)
print(jacBC)
print(jacAC)
# -
# What are the closest to the other according to Jaccard Similarity?
#
# Now let's do the same using TF-IDF and Cosine Similarity. Compute the TF-IDF and cosine similarities and print them.
# TODO: compute the TF-IDF of A, B and C and the cosine similarities of all possibilities
# Is it consistent with the Jaccard values?
| Lab 5/Similarity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # QCD Estimation
#
# ## Generate histograms
# +
import zdb
import glob
import os
import oyaml as yaml
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import dftools
import scipy
import pysge
from tqdm.auto import tqdm
plt.style.use('cms')
plt.rcParams.update({
"figure.dpi": 150,
"figure.figsize": (4, 3),
"lines.markersize": 3,
"errorbar.capsize": 2,
"font.size": 10.,
})
# -
def generate_yaml(hists, selection, filepath, tables={"central": "Events"}, systs=[], mc=False):
hists_def = {
"_".join(k): [
{"table": "df: '{table_name}'"},
{"varname0": "df: '{}'".format(k[0])},
{"varname1": "df: '{}'".format(k[1])},
{"selection": "df: '{selection_name}'"},
{"parent": "df: df.parent"},
{"binvar0": "df: df.{}_bin".format(k[0])},
{"binvar1": "df: df.{}_bin".format(k[1])},
{"count": "df: 1."},
{"sum_w": "df: {weight}"},
{"sum_ww": "df: df.sum_w**2"},
] for k in hists
}
if mc:
for key, hdef in hists_def.items():
for label, vari in systs:
hdef.extend([
{"sum_w_{}".format(label): "df: df.sum_w*({})".format(vari)},
{"sum_ww_{}".format(label): "df: (df.sum_w*({}))**2".format(vari)},
])
monojet_selection = selection + ["METTriggered", "MinDPhiJ1234METnoX>0.5"]
monojetqcd_selection = selection + ["METTriggered", "MinDPhiJ1234METnoX<=0.5"]
singlemuon_selection = selection + ["METTriggered", "MinDPhiJ1234METnoX>0.5", "nMuonSelection==1", "MTW>30.", "MTW<125."]
singlemuonqcd_selection = selection + ["METTriggered", "MinDPhiJ1234METnoX<=0.5", "nMuonSelection==1", "MTW>30.", "MTW<125."]
doublemuon_selection = selection + ["METTriggered", "MinDPhiJ1234METnoX>0.5", "nMuonSelection==2", "MLL>71.", "MLL<111.", "LeptonCharge==0."]
doublemuonqcd_selection = selection + ["METTriggered", "MinDPhiJ1234METnoX<=0.5", "nMuonSelection==2", "MLL>71.", "MLL<111.", "LeptonCharge==0."]
singleelectron_selection = selection + ["SingleElectronTriggered", "MinDPhiJ1234METnoX>0.5", "nElectronSelection==1", "MTW>30.", "MTW<125.", "MET_pt>100."]
singleelectronqcd_selection = selection + ["SingleElectronTriggered", "MinDPhiJ1234METnoX<=0.5", "nElectronSelection==1", "MTW>30.", "MTW<125.", "MET_pt>100."]
doubleelectron_selection = selection + ["SingleElectronTriggered", "MinDPhiJ1234METnoX>0.5", "nElectronSelection==2", "MLL>71.", "MLL<111.", "LeptonCharge==0."]
doubleelectronqcd_selection = selection + ["SingleElectronTriggered", "MinDPhiJ1234METnoX<=0.5", "nElectronSelection==2", "MLL>71.", "MLL<111.", "LeptonCharge==0."]
singletau_selection = selection + ["METTriggered", "MinDPhiJ1234METnoX>0.5", "nTauSelection==1"]
singlemuonmu_selection = selection + ["SingleMuonTriggered", "MinDPhiJ1234METnoX>0.5", "nMuonSelection==1", "MTW>30.", "MTW<125."]
singlemuonmuqcd_selection = selection + ["SingleMuonTriggered", "MinDPhiJ1234METnoX<=0.5", "nMuonSelection==1", "MTW>30.", "MTW<125."]
doublemuonmu_selection = selection + ["SingleMuonTriggered", "MinDPhiJ1234METnoX>0.5", "nMuonSelection==2", "MLL>71.", "MLL<111.", "LeptonCharge==0."]
doublemuonmuqcd_selection = selection + ["SingleMuonTriggered", "MinDPhiJ1234METnoX<=0.5", "nMuonSelection==2", "MLL>71.", "MLL<111.", "LeptonCharge==0."]
if not mc:
monojet_selection.extend(["nMuonVeto==0", "nElectronVeto==0", "nTauVeto==0"])
monojetqcd_selection.extend(["nMuonVeto==0", "nElectronVeto==0", "nTauVeto==0"])
singlemuon_selection.extend(["nMuonVeto==1", "nElectronVeto==0", "nTauVeto==0"])
singlemuonqcd_selection.extend(["nMuonVeto==1", "nElectronVeto==0", "nTauVeto==0"])
doublemuon_selection.extend(["nMuonVeto==2", "nElectronVeto==0", "nTauVeto==0"])
doublemuonqcd_selection.extend(["nMuonVeto==2", "nElectronVeto==0", "nTauVeto==0"])
singleelectron_selection.extend(["nMuonVeto==0", "nElectronVeto==1", "nTauVeto==0"])
singleelectronqcd_selection.extend(["nMuonVeto==0", "nElectronVeto==1", "nTauVeto==0"])
doubleelectron_selection.extend(["nMuonVeto==0", "nElectronVeto==2", "nTauVeto==0"])
doubleelectronqcd_selection.extend(["nMuonVeto==0", "nElectronVeto==2", "nTauVeto==0"])
singletau_selection.extend(["nMuonVeto==0", "nElectronVeto==0", "nTauVeto==1"])
singlemuonmu_selection.extend(["nMuonVeto==1", "nElectronVeto==0", "nTauVeto==0"])
singlemuonmuqcd_selection.extend(["nMuonVeto==1", "nElectronVeto==0", "nTauVeto==0"])
doublemuonmu_selection.extend(["nMuonVeto==2", "nElectronVeto==0", "nTauVeto==0"])
doublemuonmuqcd_selection.extend(["nMuonVeto==2", "nElectronVeto==0", "nTauVeto==0"])
else:
monojet_selection.extend(["nTauSelection==0"])
monojetqcd_selection.extend(["nTauSelection==0"])
singlemuon_selection.extend(["nTauSelection==0"])
singlemuonqcd_selection.extend(["nTauSelection==0"])
doublemuon_selection.extend(["nTauSelection==0"])
doublemuonqcd_selection.extend(["nTauSelection==0"])
singleelectron_selection.extend(["nTauSelection==0"])
singleelectronqcd_selection.extend(["nTauSelection==0"])
doubleelectron_selection.extend(["nTauSelection==0"])
doubleelectronqcd_selection.extend(["nTauSelection==0"])
singlemuonmu_selection.extend(["nTauSelection==0"])
singlemuonmuqcd_selection.extend(["nTauSelection==0"])
doublemuonmu_selection.extend(["nTauSelection==0"])
doublemuonmuqcd_selection.extend(["nTauSelection==0"])
cutflows = {
"Monojet": {
"selection_name": "Monojet",
"selection": "(" + ") & (".join(monojet_selection) + ")",
"weight": "1." if not mc else "df.Weight_Monojet",
"hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
"MTW", "MLL", "Muon", "Electron", "Tau",
])],
},
"MonojetQCD": {
"selection_name": "MonojetQCD",
"selection": "(" + ") & (".join(monojetqcd_selection) + ")",
"weight": "1." if not mc else "df.Weight_MonojetQCD",
"hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
"MTW", "MLL", "Muon", "Electron", "Tau",
])],
},
#"SingleMuon": {
# "selection_name": "SingleMuon",
# "selection": "(" + ") & (".join(singlemuon_selection) + ")",
# "weight": "1." if not mc else "df.Weight_SingleMuon",
# "hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
# "MLL", "SecondMuon", "Electron", "Tau",
# ])],
#},
"SingleMuonQCD": {
"selection_name": "SingleMuonQCD",
"selection": "(" + ") & (".join(singlemuonqcd_selection) + ")",
"weight": "1." if not mc else "df.Weight_SingleMuonQCD",
"hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
"MLL", "SecondMuon", "Electron", "Tau",
])],
},
#"DoubleMuon": {
# "selection_name": "DoubleMuon",
# "selection": "(" + ") & (".join(doublemuon_selection) + ")",
# "weight": "1." if not mc else "df.Weight_DoubleMuon",
# "hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
# "MTW", "Electron", "Tau",
# ])],
#},
#"SingleElectron": {
# "selection_name": "SingleElectron",
# "selection": "(" + ") & (".join(singleelectron_selection) + ")",
# "weight": "1." if not mc else "df.Weight_SingleElectron",
# "hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
# "MLL", "SecondElectron", "Muon", "Tau",
# ])],
#},
"SingleElectronQCD": {
"selection_name": "SingleElectronQCD",
"selection": "(" + ") & (".join(singleelectronqcd_selection) + ")",
"weight": "1." if not mc else "df.Weight_SingleElectronQCD",
"hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
"MLL", "SecondElectron", "Muon", "Tau",
])],
},
#"DoubleElectron": {
# "selection_name": "DoubleElectron",
# "selection": "(" + ") & (".join(doubleelectron_selection) + ")",
# "weight": "1." if not mc else "df.Weight_DoubleElectron",
# "hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
# "MTW", "Muon", "Tau",
# ])],
#},
#"SingleTau": {
# "selection_name": "SingleTau",
# "selection": "(" + ") & (".join(singletau_selection) + ")",
# "weight": "1." if not mc else "df.Weight_SingleTau",
# "hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
# "MTW", "MLL", "Muon", "Electron",
# ])],
#},
#"SingleMuonMu": {
# "selection_name": "SingleMuonMu",
# "selection": "(" + ") & (".join(singlemuonmu_selection) + ")",
# "weight": "1." if not mc else "df.Weight_SingleMuonMu",
# "hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
# "MLL", "SecondMuon", "Electron", "Tau",
# ])],
#},
"SingleMuonMuQCD": {
"selection_name": "SingleMuonMuQCD",
"selection": "(" + ") & (".join(singlemuonmuqcd_selection) + ")",
"weight": "1." if not mc else "df.Weight_SingleMuonMuQCD",
"hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
"MLL", "SecondMuon", "Electron", "Tau",
])],
},
#"DoubleMuonMu": {
# "selection_name": "DoubleMuonMu",
# "selection": "(" + ") & (".join(doublemuonmu_selection) + ")",
# "weight": "1." if not mc else "df.Weight_DoubleMuonMu",
# "hists": [h for h in hists_def.keys() if not any(test in h or h.startswith("n") for test in [
# "MTW", "Electron", "Tau",
# ])],
#},
}
met_triggers = [
"df.HLT_PFMETNoMu90_PFMHTNoMu90_IDTight",
"df.HLT_PFMETNoMu100_PFMHTNoMu100_IDTight",
"df.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight",
"df.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight",
"(df.HLT_PFMET170_NotCleaned & (df.run<276282))",
"(df.HLT_PFMET170_BeamHaloCleaned & (df.run<276282))",
"df.HLT_PFMET170_HBHECleaned",
"df.HLT_PFMET170_HBHE_BeamHaloCleaned",
"df.HLT_MET75_IsoTrk50",
]
if mc:
met_triggers = [
"df.HLT_PFMETNoMu90_PFMHTNoMu90_IDTight",
"df.HLT_PFMETNoMu100_PFMHTNoMu100_IDTight",
"df.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight",
"df.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight",
"df.HLT_PFMET170_NotCleaned",
"df.HLT_PFMET170_BeamHaloCleaned",
"df.HLT_PFMET170_HBHECleaned",
"df.HLT_PFMET170_HBHE_BeamHaloCleaned",
"df.HLT_MET75_IsoTrk50",
]
singlemuon_triggers = ["df.HLT_IsoMu24", "df.HLT_IsoTkMu24"]
singleelectron_triggers = ["df.HLT_Ele27_WPTight_Gsf"]
cfg_eval_bins = [
{"METnoX_pt_bin": "df: np.minimum(1550., np.floor(df.METnoX_pt/10.)*10.)"},
{"NearestJetSelectionMET_pt_bin": "df: np.minimum(1140., np.floor((df.NearestJetSelectionMET_pt-40.)/50.)*50.+40.)"},
{"METTriggered": "df: (" + ") | (".join(met_triggers) + ")"},
{"SingleMuonTriggered": "df: (" + ") | (".join(singlemuon_triggers) + ")"},
{"SingleElectronTriggered": "df: (" + ") | (".join(singleelectron_triggers) + ")"},
]
if mc:
weight = [
"df.WeightXsLumi",
"df.WeightQcdEwk",
"df.WeightPU",
"df.WeightPreFiring",
"df.WeightOneMinusPhotonVetoIdLoose",
"df.WeightOneMinusPhotonVetoPixelSeedVeto",
"df.WeightOneMinusJetBVetoIdMedium",
]
weight_ele_veto = [
"df.WeightOneMinusElectronVetoNoSelectionIdIsoVeto",
"df.WeightOneMinusElectronSelectionIdIsoTight",
"df.WeightOneMinusElectronVetoReco",
]
weight_ele_sele = [
"df.WeightElectronSelectionIdIsoTight",
"df.WeightElectronSelectionReco",
"df.WeightOneMinusElectronVetoNoSelectionIdIsoVeto",
"df.WeightOneMinusElectronVetoNoSelectionReco",
]
weight_mu_veto = [
"df.WeightOneMinusMuonVetoNoSelectionIdLoose",
"df.WeightOneMinusMuonVetoNoSelectionIsoLoose",
"df.WeightOneMinusMuonSelectionIdTight",
"df.WeightOneMinusMuonSelectionIsoTight",
]
weight_mu_sele = [
"df.WeightMuonSelectionIdTight",
"df.WeightMuonSelectionIsoTight",
"df.WeightOneMinusMuonVetoNoSelectionIdLoose",
"df.WeightOneMinusMuonVetoNoSelectionIsoLoose",
]
weight_tau_veto = [
#"df.WeightOneMinusTauSelectionIdTight",
"df.WeightOneMinusTauVetoNoSelectionIdVLoose",
]
weight_tau_sele = [
#"df.WeightTauSelectionIdTight",
"df.WeightOneMinusTauVetoNoSelectionIdVLoose",
]
weight_met_trig = ["df.WeightMETTrig",]
weight_singleele_trig = ["df.WeightSingleElectronTrig"]
weight_singlemu_trig = ["df.WeightSingleMuonTrig"]
cfg_eval_bins.extend([
{"parent": "df: np.where( df.parent.isin(['WJetsToLNu', 'DYJetsToLL']), np.where( df.parent=='WJetsToLNu', np.where( df.LeptonIsElectron, np.full_like(df.parent, 'WJetsToENu'), np.where( df.LeptonIsMuon, np.full_like(df.parent, 'WJetsToMuNu'), np.where( df.LeptonIsTau, np.where( df.nGenTauL==0, np.full_like(df.parent, 'WJetsToTauHNu'), np.where( df.nGenTauL==1, np.full_like(df.parent, 'WJetsToTauLNu'), np.full_like(df.parent, 'WJetsToTauNu'), ), ), np.full_like(df.parent, 'WJetsToLNu'), ), ), ), np.where( df.parent=='DYJetsToLL', np.where( df.LeptonIsElectron, np.full_like(df.parent, 'DYJetsToEE'), np.where( df.LeptonIsMuon, np.full_like(df.parent, 'DYJetsToMuMu'), np.where( df.LeptonIsTau, np.where( df.nGenTauL==0, np.full_like(df.parent, 'DYJetsToTauHTauH'), np.where( df.nGenTauL==1, np.full_like(df.parent, 'DYJetsToTauHTauL'), np.where( df.nGenTauL==2, np.full_like(df.parent, 'DYJetsToTauLTauL'), np.full_like(df.parent, 'DYJetsToTauTau'), ), ), ), np.full_like(df.parent, 'DYJetsToLL'), ), ), ), df.parent, ), ), df.parent, )"},
{"Weight_Monojet": "df: (" + ")*(".join(weight+weight_ele_veto+weight_mu_veto+weight_tau_veto+weight_met_trig)+")"},
{"Weight_MonojetQCD": "df: df.Weight_Monojet"},
{"Weight_SingleMuon": "df: (" + ")*(".join(weight+weight_ele_veto+weight_mu_sele+weight_tau_veto+weight_met_trig)+")"},
{"Weight_SingleMuonQCD": "df: df.Weight_SingleMuon"},
{"Weight_DoubleMuon": "df: df.Weight_SingleMuon"},
{"Weight_SingleElectron": "df: (" + ")*(".join(weight+weight_ele_sele+weight_mu_veto+weight_tau_veto+weight_singleele_trig)+")"},
{"Weight_SingleElectronQCD": "df: df.Weight_SingleElectron"},
{"Weight_DoubleElectron": "df: df.Weight_SingleElectron"},
{"Weight_SingleTau": "df: (" + ")*(".join(weight+weight_ele_veto+weight_mu_veto+weight_tau_sele+weight_met_trig)+")"},
{"Weight_SingleMuonMu": "df: (" + ")*(".join(weight+weight_ele_veto+weight_mu_sele+weight_tau_veto+weight_singlemu_trig)+")"},
{"Weight_SingleMuonMuQCD": "df: df.Weight_SingleMuonMu"},
{"Weight_DoubleMuonMu": "df: df.Weight_SingleMuonMu"},
])
cfg = {
"query": {
"groupby": ["table", "varname0", "varname1", "selection", "parent", "binvar0", "binvar1"],
"tables": tables,
"aliases": {},
"eval": cfg_eval_bins,
"cutflows": cutflows,
"hists": hists_def,
},
"files": sorted(p for p in glob.glob(filepath)),
}
return cfg
# +
hists = [("METnoX_pt", "NearestJetSelectionMET_pt")]
systs = [
("d1kqcdUp", "df.WeightQcdEwk_d1kqcdUp/df.WeightQcdEwk"),
("d1kqcdDown", "df.WeightQcdEwk_d1kqcdDown/df.WeightQcdEwk"),
("d2kqcdUp", "df.WeightQcdEwk_d2kqcdUp/df.WeightQcdEwk"),
("d2kqcdDown", "df.WeightQcdEwk_d2kqcdDown/df.WeightQcdEwk"),
("d3kqcdUp", "df.WeightQcdEwk_d3kqcdUp/df.WeightQcdEwk"),
("d3kqcdDown", "df.WeightQcdEwk_d3kqcdDown/df.WeightQcdEwk"),
("d1kewUp", "df.WeightQcdEwk_d1kewUp/df.WeightQcdEwk"),
("d1kewDown", "df.WeightQcdEwk_d1kewDown/df.WeightQcdEwk"),
("d2kewzUp", "df.WeightQcdEwk_d2kewzUp/df.WeightQcdEwk"),
("d2kewzDown", "df.WeightQcdEwk_d2kewzDown/df.WeightQcdEwk"),
("d2kewwUp", "df.WeightQcdEwk_d2kewwUp/df.WeightQcdEwk"),
("d2kewwDown", "df.WeightQcdEwk_d2kewwDown/df.WeightQcdEwk"),
("d3kewzUp", "df.WeightQcdEwk_d3kewzUp/df.WeightQcdEwk"),
("d3kewzDown", "df.WeightQcdEwk_d3kewzDown/df.WeightQcdEwk"),
("d3kewwUp", "df.WeightQcdEwk_d3kewwUp/df.WeightQcdEwk"),
("d3kewwDown", "df.WeightQcdEwk_d3kewwDown/df.WeightQcdEwk"),
("dkmixUp", "df.WeightQcdEwk_dkmixUp/df.WeightQcdEwk"),
("dkmixDown", "df.WeightQcdEwk_dkmixDown/df.WeightQcdEwk"),
("pileupUp", "df.WeightPU_pileupUp/df.WeightPU"),
("pileupDown", "df.WeightPU_pileupDown/df.WeightPU"),
("prefiringUp", "df.WeightPreFiring_prefiringUp/df.WeightPreFiring"),
("prefiringDown", "df.WeightPreFiring_prefiringDown/df.WeightPreFiring"),
("muonTrigUp", "np.where(df.selection.str.contains('MuonMu'), df.WeightSingleMuonTrig_muonTrigUp/df.WeightSingleMuonTrig, np.ones_like(df.WeightSingleMuonTrig))"),
("muonTrigDown", "np.where(df.selection.str.contains('MuonMu'), df.WeightSingleMuonTrig_muonTrigDown/df.WeightSingleMuonTrig, np.ones_like(df.WeightSingleMuonTrig))"),
("muonIdLooseStatUp", 'df.WeightOneMinusMuonVetoNoSelectionIdLoose_muonIdLooseStatUp/df.WeightOneMinusMuonVetoNoSelectionIdLoose'),
("muonIdLooseStatDown", 'df.WeightOneMinusMuonVetoNoSelectionIdLoose_muonIdLooseStatDown/df.WeightOneMinusMuonVetoNoSelectionIdLoose'),
("muonIdLooseSystUp", 'df.WeightOneMinusMuonVetoNoSelectionIdLoose_muonIdLooseSystUp/df.WeightOneMinusMuonVetoNoSelectionIdLoose'),
("muonIdLooseSystDown", 'df.WeightOneMinusMuonVetoNoSelectionIdLoose_muonIdLooseSystDown/df.WeightOneMinusMuonVetoNoSelectionIdLoose'),
("muonIsoLooseStatUp", 'df.WeightOneMinusMuonVetoNoSelectionIsoLoose_muonIsoLooseStatUp/df.WeightOneMinusMuonVetoNoSelectionIsoLoose'),
("muonIsoLooseStatDown", 'df.WeightOneMinusMuonVetoNoSelectionIsoLoose_muonIsoLooseStatDown/df.WeightOneMinusMuonVetoNoSelectionIsoLoose'),
("muonIsoLooseSystUp", 'df.WeightOneMinusMuonVetoNoSelectionIsoLoose_muonIsoLooseSystUp/df.WeightOneMinusMuonVetoNoSelectionIsoLoose'),
("muonIsoLooseSystDown", 'df.WeightOneMinusMuonVetoNoSelectionIsoLoose_muonIsoLooseSystDown/df.WeightOneMinusMuonVetoNoSelectionIsoLoose'),
("muonIdTightStatUp", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIdTight_muonIdTightStatUp/df.WeightMuonSelectionIdTight, df.WeightOneMinusMuonSelectionIdTight_muonIdTightStatUp/df.WeightOneMinusMuonSelectionIdTight)"),
("muonIdTightStatDown", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIdTight_muonIdTightStatDown/df.WeightMuonSelectionIdTight, df.WeightOneMinusMuonSelectionIdTight_muonIdTightStatDown/df.WeightOneMinusMuonSelectionIdTight)"),
("muonIdTightSystUp", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIdTight_muonIdTightSystUp/df.WeightMuonSelectionIdTight, df.WeightOneMinusMuonSelectionIdTight_muonIdTightSystUp/df.WeightOneMinusMuonSelectionIdTight)"),
("muonIdTightSystDown", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIdTight_muonIdTightSystDown/df.WeightMuonSelectionIdTight, df.WeightOneMinusMuonSelectionIdTight_muonIdTightSystDown/df.WeightOneMinusMuonSelectionIdTight)"),
("muonIsoTightStatUp", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIsoTight_muonIsoTightStatUp/df.WeightMuonSelectionIsoTight, df.WeightOneMinusMuonSelectionIsoTight_muonIsoTightStatUp/df.WeightOneMinusMuonSelectionIsoTight)"),
("muonIsoTightStatDown", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIsoTight_muonIsoTightStatDown/df.WeightMuonSelectionIsoTight, df.WeightOneMinusMuonSelectionIsoTight_muonIsoTightStatDown/df.WeightOneMinusMuonSelectionIsoTight)"),
("muonIsoTightSystUp", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIsoTight_muonIsoTightSystUp/df.WeightMuonSelectionIsoTight, df.WeightOneMinusMuonSelectionIsoTight_muonIsoTightSystUp/df.WeightOneMinusMuonSelectionIsoTight)"),
("muonIsoTightSystDown", "np.where(df.selection.str.contains('Muon'), df.WeightMuonSelectionIsoTight_muonIsoTightSystDown/df.WeightMuonSelectionIsoTight, df.WeightOneMinusMuonSelectionIsoTight_muonIsoTightSystDown/df.WeightOneMinusMuonSelectionIsoTight)"),
("eleTrigUp", "np.where(df.selection.str.contains('Electron'), df.WeightSingleElectronTrig_eleTrigUp/df.WeightSingleElectronTrig, np.ones_like(df.WeightSingleElectronTrig))"),
("eleTrigDown", "np.where(df.selection.str.contains('Electron'), df.WeightSingleElectronTrig_eleTrigDown/df.WeightSingleElectronTrig, np.ones_like(df.WeightSingleElectronTrig))"),
("eleIdIsoVetoUp", 'df.WeightOneMinusElectronVetoNoSelectionIdIsoVeto_eleIdIsoVetoUp/df.WeightOneMinusElectronVetoNoSelectionIdIsoVeto'),
("eleIdIsoVetoDown", 'df.WeightOneMinusElectronVetoNoSelectionIdIsoVeto_eleIdIsoVetoDown/df.WeightOneMinusElectronVetoNoSelectionIdIsoVeto'),
("eleIdIsoTightUp", "np.where(df.selection.str.contains('Electron'), df.WeightElectronSelectionIdIsoTight_eleIdIsoTightUp/df.WeightElectronSelectionIdIsoTight, df.WeightOneMinusElectronSelectionIdIsoTight_eleIdIsoTightUp/df.WeightOneMinusElectronSelectionIdIsoTight)"),
("eleIdIsoTightDown", "np.where(df.selection.str.contains('Electron'), df.WeightElectronSelectionIdIsoTight_eleIdIsoTightDown/df.WeightElectronSelectionIdIsoTight, df.WeightOneMinusElectronSelectionIdIsoTight_eleIdIsoTightDown/df.WeightOneMinusElectronSelectionIdIsoTight)"),
("eleRecoUp", "np.where(df.selection.str.contains('Electron'), df.WeightElectronSelectionReco_eleRecoUp*df.WeightOneMinusElectronVetoNoSelectionReco_eleRecoUp/(df.WeightElectronSelectionReco*df.WeightOneMinusElectronVetoNoSelectionReco), df.WeightOneMinusElectronVetoReco_eleRecoUp/df.WeightOneMinusElectronVetoReco)"),
("eleRecoDown", "np.where(df.selection.str.contains('Electron'), df.WeightElectronSelectionReco_eleRecoDown*df.WeightOneMinusElectronVetoNoSelectionReco_eleRecoDown/(df.WeightElectronSelectionReco*df.WeightOneMinusElectronVetoNoSelectionReco), df.WeightOneMinusElectronVetoReco_eleRecoDown/df.WeightOneMinusElectronVetoReco)"),
("photonIdLooseUp", "df.WeightOneMinusPhotonVetoIdLoose_photonIdLooseUp/df.WeightOneMinusPhotonVetoIdLoose"),
("photonIdLooseDown", "df.WeightOneMinusPhotonVetoIdLoose_photonIdLooseDown/df.WeightOneMinusPhotonVetoIdLoose"),
("photonPixelSeedVetoUp", "df.WeightOneMinusPhotonVetoPixelSeedVeto_photonPixelSeedVetoUp/df.WeightOneMinusPhotonVetoPixelSeedVeto"),
("photonPixelSeedVetoDown", "df.WeightOneMinusPhotonVetoPixelSeedVeto_photonPixelSeedVetoDown/df.WeightOneMinusPhotonVetoPixelSeedVeto"),
("tauIdVLooseUp", "df.WeightOneMinusTauVetoNoSelectionIdVLoose_tauIdVLooseUp/df.WeightOneMinusTauVetoNoSelectionIdVLoose"),
("tauIdVLooseDown", "df.WeightOneMinusTauVetoNoSelectionIdVLoose_tauIdVLooseDown/df.WeightOneMinusTauVetoNoSelectionIdVLoose"),
("tauIdTightUp", "np.where(df.selection.str.contains('Tau'), df.WeightTauSelectionIdTight_tauIdTightUp, df.WeightOneMinusTauSelectionIdTight_tauIdTightUp)"),
("tauIdTightDown", "np.where(df.selection.str.contains('Tau'), df.WeightTauSelectionIdTight_tauIdTightDown, df.WeightOneMinusTauSelectionIdTight_tauIdTightDown)"),
("btagSFUp", 'df.WeightOneMinusJetBVetoIdMedium_btagSFUp/df.WeightOneMinusJetBVetoIdMedium'),
("btagSFDown", 'df.WeightOneMinusJetBVetoIdMedium_btagSFDown/df.WeightOneMinusJetBVetoIdMedium'),
("metTrigMuonMultiplicitySystUp", "np.where(~(df.selection.str.contains('Electron') | df.selection.str.contains('MuonMu')), df.WeightMETTrig_muonMultiplicitySystUp/df.WeightMETTrig, np.ones_like(df.WeightMETTrig))"),
("metTrigMuonMultiplicitySystDown", "np.where(~(df.selection.str.contains('Electron') | df.selection.str.contains('MuonMu')), df.WeightMETTrig_muonMultiplicitySystDown/df.WeightMETTrig, np.ones_like(df.WeightMETTrig))"),
("metTrigReferenceTriggerSystUp", "np.where(~(df.selection.str.contains('Electron') | df.selection.str.contains('MuonMu')), df.WeightMETTrig_referenceTriggerSystUp/df.WeightMETTrig, np.ones_like(df.WeightMETTrig))"),
("metTrigReferenceTriggerSystDown", "np.where(~(df.selection.str.contains('Electron') | df.selection.str.contains('MuonMu')), df.WeightMETTrig_referenceTriggerSystDown/df.WeightMETTrig, np.ones_like(df.WeightMETTrig))"),
("metTrigRegionSystUp", "np.where(~(df.selection.str.contains('Electron') | df.selection.str.contains('MuonMu')), df.WeightMETTrig_regionSystUp/df.WeightMETTrig, np.ones_like(df.WeightMETTrig))"),
("metTrigRegionSystDown", "np.where(~(df.selection.str.contains('Electron') | df.selection.str.contains('MuonMu')), df.WeightMETTrig_regionSystDown/df.WeightMETTrig, np.ones_like(df.WeightMETTrig))"),
] + [
("lheScaleWeight{}".format(idx), "np.where(~(df.parent.str.contains('ZJetsTo') | df.parent.str.contains('WJetsTo') | df.parent.str.contains('DYJetsTo') | df.parent.str.contains('GStarJetsTo')), df.LHEScaleWeight{}, np.ones_like(df.LHEScaleWeight0))".format(idx))
for idx in range(9)
] + [
("lhePdfWeight{}".format(idx), "df.LHEPdfWeight{}".format(idx))
for idx in range(104)
]
data_selection = [
"IsCertified", "Flag_goodVertices", "Flag_globalSuperTightHalo2016Filter",
"Flag_HBHENoiseFilter", "Flag_HBHENoiseIsoFilter", "Flag_EcalDeadCellTriggerPrimitiveFilter",
"Flag_BadPFMuonFilter", "Flag_eeBadScFilter", "MET_dCaloMET<0.5",
"nJetSelection>0", "nJetSelection==nJetVeto", "LeadJetSelection_chHEF>0.1",
"LeadJetSelection_neHEF<0.8", "LeadJetSelection_pt>200.", "nPhotonVeto==0",
"nBJetVeto==0", "METnoX_pt>100.",
]
mc_selection = [
"(parent!='EWKV2Jets' | nGenBosonSelection==1)", "Flag_goodVertices", "Flag_globalSuperTightHalo2016Filter",
"Flag_HBHENoiseFilter", "Flag_HBHENoiseIsoFilter", "Flag_EcalDeadCellTriggerPrimitiveFilter",
"Flag_BadPFMuonFilter", "MET_dCaloMET<0.5", "nJetSelection>0",
"nJetSelection==nJetVeto", "LeadJetSelection_chHEF>0.1", "LeadJetSelection_neHEF<0.8",
"LeadJetSelection_pt>200.", "METnoX_pt>100.",
]
cfg_data = generate_yaml(
hists, data_selection,
"/vols/cms/sdb15/Analysis/ZinvWidth/databases/skims/2019/09_Sep/20_skims/data/*.h5",
mc=False,
)
with open("configs/data.yaml", "w") as f:
yaml.dump(cfg_data, f, indent=4)
cfg_mc = generate_yaml(
hists, mc_selection,
"/vols/cms/sdb15/Analysis/ZinvWidth/databases/skims/2019/09_Sep/14_skims/mc/*.h5",
systs=systs,
mc=True,
)
with open("configs/mc.yaml", "w") as f:
yaml.dump(cfg_mc, f, indent=4)
cfg_mc_jes = generate_yaml(
hists, mc_selection,
"/vols/cms/sdb15/Analysis/ZinvWidth/databases/skims/2019/09_Sep/20_skims/mc_jes/*.h5",
tables={"jesTotal10": "Events_jesTotal10", "jesTotal20": "Events_jesTotal20", "jesTotal30": "Events_jesTotal30", "jesTotal40": "Events_jesTotal40", "jesTotal50": "Events_jesTotal50", "jesTotal60": "Events_jesTotal60", "jesTotal70": "Events_jesTotal70", "jesTotal80": "Events_jesTotal80", "jesTotal90": "Events_jesTotal90"},
mc=True,
)
with open("configs/mc_jes.yaml", "w") as f:
yaml.dump(cfg_mc_jes, f, indent=4)
cfg_mc_jer = generate_yaml(
hists, mc_selection,
"/vols/cms/sdb15/Analysis/ZinvWidth/databases/skims/2019/09_Sep/14_skims/mc_jer/*.h5",
tables={"jerSF10": "Events_jerSF10", "jerSF20": "Events_jerSF20", "jerSF30": "Events_jerSF30", "jerSF40": "Events_jerSF40", "jerSF50": "Events_jerSF50", "jerSF60": "Events_jerSF60", "jerSF70": "Events_jerSF70", "jerSF80": "Events_jerSF80", "jerSF90": "Events_jerSF90"},
mc=True,
)
with open("configs/mc_jer.yaml", "w") as f:
yaml.dump(cfg_mc_jer, f, indent=4)
cfg_mc_unclust = generate_yaml(
hists, mc_selection,
"/vols/cms/sdb15/Analysis/ZinvWidth/databases/skims/2019/09_Sep/14_skims/mc_unclust/*.h5",
tables={"unclust10": "Events_unclust10", "unclust20": "Events_unclust20", "unclust30": "Events_unclust30", "unclust40": "Events_unclust40", "unclust50": "Events_unclust50", "unclust60": "Events_unclust60", "unclust70": "Events_unclust70", "unclust80": "Events_unclust80", "unclust90": "Events_unclust90"},
mc=True,
)
with open("configs/mc_unclust.yaml", "w") as f:
yaml.dump(cfg_mc_unclust, f, indent=4)
cfg_mc_lepscales = generate_yaml(
hists, mc_selection,
"/vols/cms/sdb15/Analysis/ZinvWidth/databases/skims/2019/09_Sep/14_skims/mc_lepscales/*.h5",
tables={"eleEnergyScaleUp": "Events_eleEnergyScaleup", "eleEnergyScaleDown": "Events_eleEnergyScaledown", "muonPtScaleUp": "Events_muonPtScaleup", "muonPtScaleDown": "Events_muonPtScaledown", "photonEnergyScaleUp": "Events_photonEnergyScaleup", "photonEnergyScaleDown": "Events_photonEnergyScaledown", "tauPtScaleUp": "Events_tauPtScaleup", "tauPtScaleDown": "Events_tauPtScaledown"},
mc=True,
)
with open("configs/mc_lepscales.yaml", "w") as f:
yaml.dump(cfg_mc_lepscales, f, indent=4)
# -
# !~/Scripts/batch/QSTAT.py
zdb.modules.multi_analyse(
["configs/data.yaml", "configs/mc.yaml", "configs/mc_jes.yaml", "configs/mc_jer.yaml", "configs/mc_unclust.yaml", "configs/mc_lepscales.yaml"],
outputs=[
"hists_qcd_estimation.h5:DataAggEvents", "hists_qcd_estimation.h5:MCAggEvents",
"hists_qcd_estimation.h5:MCAggEvents_jes", "hists_qcd_estimation.h5:MCAggEvents_jer",
"hists_qcd_estimation.h5:MCAggEvents_unclust", "hists_qcd_estimation.h5:MCAggEvents_lepscales",
],
mode='sge',
ncores=-1,
batch_opts="-q hep.q -l h_rt=3:0:0 -l h_vmem=24G",
chunksize=600_000,
merge_opts={"mode": "sge", "ncores": 10, "batch_opts": "-q hep.q"},
)
| notebooks/zinv/3_background_prediction/2_qcd/1_generate_hists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: machine-learning
# language: python
# name: python3
# ---
# <center><img src='../../img/ai4eo_logos.jpg' alt='Logos AI4EO MOOC' width='80%'></img></center>
# <hr>
# <br>
# <a href="https://www.futurelearn.com/courses/artificial-intelligence-for-earth-monitoring/1/steps/1280514"><< Back to FutureLearn</a><br>
# # Tile-based classification using Sentinel-2 L1C and EuroSAT data - Training
# <i>by <NAME>, Planetek Italia S.r.l., Bari, Italy</i>
# <hr>
# ## Watch the video tutorial
from IPython.display import HTML
HTML('<div align="center"><iframe src="https://player.vimeo.com/video/636104139?h=ee3c58721c" width="640" height="360" frameborder="0" allow="autoplay; fullscreen; picture-in-picture" allowfullscreen align="middle"></iframe></div>')
# <br>
# <hr>
# ## Introduction
# This workflow shows you how you can train a `Convolutional Neural Network (CNN)` with Keras based on the benchmark dataset [EuroSAT](https://arxiv.org/abs/1709.00029). The notebook provides you an introduction to the EuroSAT benchmark dataset, guides you through the preparation of training and test data and shows you how to configure and fit a Convolutional Neural Network. At the end how you can evaluate your model performance with a confusion matrix.
# ## Machine-Learning Algorithm
# This example develops a `Sequential Convolutional Neural Network (CNN)` with Keras. CNN's are often used for image classification. `Convolutional Neural Networks` use convolutional layers (ConV layers), which are the major building blocks used in convolutional neural networks. ConV layers are in principle a set of filters, which you can think of as 2D matrices of numbers, that turn input images into the expected output.
#
# The strength of CNN's is the abilitiy to automatically learn from a large number of filters in parallel and are often used for predicitve modelling problems, such as image classification. CNN's are often able to detect highly specific features on input images.
# ## Data
# The model is trained on the [EuroSAT benchmark dataset](https://arxiv.org/abs/1709.00029) which is based on Sentinel-2 satellite images and consists of a total of 27,000 labeled and geo-referenced images. The dataset provides information on the following ten land cover / land use classes:
# * `Annual Crop`
# * `Forest`
# * `Herbaceous Vegetation`
# * `Highway`
# * `Industrial`
# * `Pasture`
# * `Permanent Crop`
# * `Residential`
# * `River`
# * `Sea Lake`
#
# The benchmark dataset can be used to detect `land cover / land use changes`. The geo-referenced dataset EuroSAT is publicly available [here](https://github.com/phelber/eurosat).
# ## Further resources
# * [EuroSAT: A Novel Dataset and Deep Learning Benchmark for Land Use and Land Cover Classification](https://arxiv.org/abs/1709.00029)
# * [EuroSAT data](https://github.com/phelber/eurosat)
# <hr>
# ## Notebook outline
# * [1 - Load the EuroSAT benchmark dataset as input data](#load_eurosat)
# * [2 - Create training and test subsets from input data](#split_test_training_eurosat)
# * [3 - Define the Convolutional Neural Network architecture](#cnn_eurosat)
# * [4 - Fitting (training) of the convolutional neural network](#fitting_eurosat)
# * [5 - Evaluate the performance of the CNN model with a confusion matrix](#evaluate_eurosat)
# <hr>
# #### Import libraries
# +
"""
## BEGIN S3FS IMPORT SNIPPET ##
import os, sys
s3_home = os.getcwd()
try: sys.path.remove(s3_home) # REMOVE THE S3 ROOT FROM THE $PATH
except Exception: pass
current_dir = os.getcwd()
os.chdir("D:\\Data\\Wekeo\\") # TEMPORARILY MOVE TO ANOTHER DIRECTORY
# BEGIN IMPORTS #
"""
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import ModelCheckpoint
import glob
import numpy as np
import pandas as pd
import seaborn as sns
from osgeo import gdal_array
from matplotlib import pyplot as plt
from sklearn import metrics
from sklearn.model_selection import train_test_split
import zipfile
"""
# END IMPORTS #
os.chdir(current_dir) # GO BACK TO YOUR PREVIOUS DIRECTORY
sys.path.append(s3_home) # RESTORE THE S3 ROOT IN THE $PATH
## END S3FS IMPORT SNIPPET ##
"""
# -
# #### Load helper functions
# %run ./3B_tile-based_classification_with_EuroSAT_data_functions.ipynb
# <hr>
# <br>
# ## <a id='load_eurosat'></a>1. Load the EuroSAT benchmark dataset as input data
# The `EuroSAT benchmark dataset` is in the folder `./S2_Tile_based_classification/01_Input_data/S2_tile_4_training/`. This folder contains a total of ten folders, one for each land cover class. The first step is to go through the different folders and load all the EuroSAT images as a `numpy` array. You can use the function `.LoadFile` from the [GDAL Python bindings](https://pypi.org/project/GDAL/) module `gdal_array` to read a raster image (e.g. in `.tif` format) into a `numpy.array`.
#
# The result is `lst_arr_training`, a list of 10,000 arrays and each array has the dimension `[13, 64, 64]`. For each of the images, you want to create a `numpy.array` with ten entries indicating in binary form (0 or 1) the class the image belongs to. The resulting list is called `lst_gt_training` and has the same length as the list of training images.
#
# **NOTE**: for training purposes, the example only makes use of a subset (10,000 images) of the EuroSAT benchmark dataset (27,000 images).
# <br>
# Define the folder paths where the EuroSAT training images are located.
# +
# with zipfile.ZipFile("./S2_Tile_based_classification.zip", 'r') as zip_ref:
with zipfile.ZipFile("D:\\Data\\Wekeo\\S2_Tile_based_classification.zip", 'r') as zip_ref:
zip_ref.extractall("D:\\Data\\Wekeo\\")
MAIN_PATH = 'D:\\Data\\Wekeo\\S2_Tile_based_classification\\'
DATA_PATH = '01_Input_data/S2_tile_4_training/'
# -
# <br>
# Loop over the training data folders and build up two lists:
# * `lst_arr_training` - List of training arrays
# * `lst_gt_training` - List of arrays indicating to which class each image belongs to
# +
len_data_for_training_tmp = 1000
folder_for_training = glob.glob(MAIN_PATH+DATA_PATH+'*/')
print('[AI4EO_MOOC]_log: There are %d folders' % (len(folder_for_training)))
lst_arr_training=[]
lst_gt_training =[]
for i in range(0,len(folder_for_training)):
data_for_training_tmp=glob.glob(folder_for_training[i]+'*.tif')
print('[AI4EO_MOOC]_log: There are %d images for %s class' % (
len_data_for_training_tmp, folder_for_training[i][40:-1])
)
for j in range(0,len_data_for_training_tmp):
arr_tmp = gdal_array.LoadFile(data_for_training_tmp[j])
lst_arr_training.append(arr_tmp)
tmp_gt = np.zeros(10)
tmp_gt[i]=1
lst_gt_training.append(tmp_gt)
# -
type(lst_gt_training[0])
# <br>
# Let us inspect the length of the created lists as well as the dimensions of the images. You see that both lists have a length of 10,000 items. Each image has the dimension [13, 64, 64] and the binary class vector has a length of ten, representing the ten land cover classes.
print(len(lst_arr_training), len(lst_gt_training))
print(lst_arr_training[1000-1].shape, lst_gt_training[1000-1].shape)
# <br>
# The next step is to transform the two lists of arrays into multistack arrays. You can do this with the numpy function `.asarray()`. The result are two numpy arrays with the following specifications:
# * `arr_training`: 4 dimensions (10000, 13, 64, 64) --> (number of images, bands, rows, columns)
# * `arr_gt`: 2 dimensions (10000, 10) --> (number of images, columns)
# +
arr_training = np.asarray(lst_arr_training)
arr_gt = np.asarray(lst_gt_training)
arr_training.shape, arr_gt.shape
# -
# <br>
# ### Reshape the multi-array's native shape to an AI readable shape
# Next, you have to reshape the array with the training images from its native shape (10000, 13, 64, 64) to a shape that is readable by Artificial Intelligence algorithms. For this reason, the multi-dimension array `arr_training` needs to be re-organised into the following dimensions:
# * `arr_training_res`: 4 dimensions (10000, 64, 64, 13) --> (number of images, rows, columns, bands)
# +
num_of_img,bands,rows,columns=arr_training.shape
print('[AI4EO_MOOC]_log: Reshape array from native shape (num_of_img:%d, bands:%d, rows:%d, columns:%d) to AI readble shape (num_of_img:%d, rows:%d, columns:%d, bands:%d). . .' % (num_of_img,bands,rows,columns, num_of_img,rows,columns,bands))
arr_training_res = np.reshape(arr_training,(num_of_img,rows,columns,bands))
arr_training_res.shape
# -
type(arr_training_res)
# <br>
# ### Normalisation of the image radiances to a [0, 1] interval
# As a final step, we normalize the data and bring the data into a [0, 1] range. First, you want to transform the data type from `uint16 - [0,65535]` to `float32`. Then, you loop over each image in the numpy multi-dimension array, you retrieve the maximum value of each image with the numpy function `.amax()` and then you divide each value in the array by the maximum value.
print('[AI4EO_MOOC]_log: Normalization data into [0,1] intervall...')
arr_training_res = arr_training_res.astype('float32')
for i in range(0, len(arr_training_res)):
amax_tmp=np.amax(arr_training_res[i,:,:,:])
arr_training_res[i,:,:,:] = arr_training_res[i,:,:,:] / amax_tmp
# <br>
# Let us inspect one image of the array. You see that the interval range is now a float number between 0 and 1.
arr_training_res[1000-1,:,:,:]
# <br>
# ## <a id='split_test_training_eurosat'></a>2. Create training and test subsets from input data
# Let us now randomly split the training data into a `training subset` and a `testing subset`. Scikit-learn offers a popular function called `train_test_split()`, which creates four subsets based on the input and output variables `X - arr_training_res` and `y - arr_gt`. The function takes the following kwargs:
#
# * `arrays`: input and output data arrays
# * `test_size`: a float number representing the proportion of the input dataset to include in the test subset
# * `random_state`: An integer assuring reproducibility of the random shuffling of the data
#
# Let us use 85% of the input data for training and 15% for testing.
# +
test_size=0.15
print('[AI4EO_MOOC]_log: Training (%0.2f %%) and validation (%0.2f %%) split..' % (
(1-test_size)*100,(test_size)*100))
X_train, X_test, y_train, y_test = train_test_split(
arr_training_res,
arr_gt,
test_size=test_size,
random_state=42)
# -
# <br>
# ## <a id='cnn_eurosat'></a>3. Define the Convolutional Neural Network architecture
# First, we initiate a `sequential neural network` model with the Keras class `keras.Sequential()`.
_, num_classes = y_train.shape
print('[AI4EO_MOOC]_log: Convolutional Neural Network architecture:')
model = keras.Sequential()
# <br>
# The next step is to build up the architecture of the `Convolutional Neural Network (CNN)`, with the function `model.add()`.
# A `CNN` composes of the following set of layers:
# * `Conv2D`: Convolutional layer with number of filters, e.g. 32 or 64, the shape of the filter ((3,3), (5,5), ...) and the application of padding
# * `Activation`: Activation layer, e.g. `relu`, `sigmod`, ...
# * `MaxPooling2D`: Max Pooling layer with shape (2,2), (3,3), ...
# * `Dropout`: to reduce overfitting
#
# In the following code block, we build three blocks of layers:
#
# * **First block of layers**<br>
# The first block of layers conists of two `Conv2D` layers with 32 neurons and we add non-linear properties by adding an `Activation` layer in between. We then define a `MaxPooling2D`layer, which downsamples the input by taking the maximum value of a given window size. The block of layers finishes with a `Dropout` layer, which randomly skips 25% of the interconnections.
#
# * **Second block of layers**<br>
# The second block of layers conists again of two `Conv2D` layers, but this time with 64 neurons and we add non-linear properties by adding an `Activation` layer in between. We then define a `MaxPooling2D`layer, which downsamples the input by taking the maximum value of a given window size. The block of layers finishes with a `Dropout` layer, which randomly skips 25% of the interconnections.
#
# * **Third block of layers**<br>
# The third block consists of two `Dense` layers, which are fully-connected layers. In between, we again add non-linear properties by adding an `Activation` layer. The number of neurons of the final `Dense` layer has to be the same as the number of land use classes.
# +
model.add(Conv2D(32, (3, 3), padding='same', input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# -
# <br>
# After the model architecture has been defined, you can compile (configure) the model with `model.compile()` and define the following hyperparameters:
# * `loss='categorical_crossentropy'` - Categorical crossentropy is one of many loss options and calculates the crossentropy loss between the labels and the predictions
# * `optimizer=RMSprop(lr=0.0001, decay=1e-6)` - Optimizers are algorithms the network learns from
# * `metrics=['accuracy']` - is used to evaluate how the model is performing
# +
opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# -
# <br>
# The function `model.summary()` provides you a tabular summary of the `CNN architecture`, which is helpful to see the output shape of the model after each operation.
model.summary()
# <br>
# ## <a id='fitting_eurosat'></a>4. Fitting (training) of the convolutional neural network
# The next step in the training process is the actual training (fitting) of the model. Since the training process is time-consuming (depening on the technical capabilities of your machine as well as the architecture of your model), it is a common practise to save the model with the best accuracy. The saved model can then be re-loaded without the need to repeat the training process.
# Let us set the folder path to which the model with the best validation accuracy is saved to. The pre-trained model is saved in the folder `02_pretrained_model`.
# +
save_dir = MAIN_PATH + '02_pretrained_model/'
model_name = 'keras_sentinel2_classification_trained_model_e50.json'
filepath_tmp = save_dir+model_name
filepath_tmp
# -
# <br>
# `Callbacks` are utilities that are called at certain points during model training. They can support you in better understanding the performance of your model during training. One useful callback is [ModelCheckpoint](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint), which allows you to save the Keras model after training. With the class `ModelCheckpoint` you can define the type of information you would like to save.
#
# The following code saves only the model with the best `validation accuracy`.
checkpoint = ModelCheckpoint(
filepath_tmp,
'val_accuracy',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
# <br>
# The final step is to train (fit) the Keras model. You can use `model.fit()` to fit the model based on the training dataset. The resulting object is a `history` object and a common practise is to call the output of the training process `history`.
#
# The function `model.fit()` requires you to specify the following parameters:
# * `input (X)` and `output (y)` data: here we specify the input and output training data of your model
# * `validation_data`: here we enter the test data subsets X_test and y_test and our model outputs are validated against these validation data after each epoch (training cycle)
# * `epochs`: number of training cycles
# * `batch_size`: defines the size of a training data subset (e.g. 32 samples) after which the weights of the network are updated
# * `callbacks`: define here the callbacks you would like to make use of during the training process
# * `verbose`: specify how the progress of the training shall be shown - option 0, 1, 2 - option 1 for example shows you a progress bar for each epoch
# +
batch_size = 32
epochs = 10
history = model.fit(X_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks_list,
verbose = 1)
# -
# <br>
# ## <a id='evaluate_eurosat'></a>5. Evaluate the performance of the CNN model with a confusion matrix
# The final step is to evaluate the performance of your model and the model's ability to classify land cover / land use with the help of the test data. With the function `model.predict()`, you can classify the test input data (`X_test`). The result `y_pred` is an array with the dimensions `(1500, 10)`. For each input image of the test data, the prediction provides a vector which holds for each class the probability of the class.
y_pred = model.predict(X_test)
y_pred.shape
# <br>
# The final classification result is the class with the highest probability. The aim is to have a vector with the same length of the number of test images, which provides for each image the final predicted class. For this, we have to loop over the predicted (`y_pred`) and test (`y_test`) output arrays and retrieve the index of the argument with the maximum value. You can retrieve the index of the argument with the maximum value with numpy's function `np.argmax()`.
#
# The result are two one dimensional arrays, `y_pred_amax` and `y_test_amax`, providing the index of the land cover class for each image.
# +
y_pred_amax=np.zeros((len(y_pred)))
y_test_amax=np.zeros((len(y_pred)))
for i in range(0,len(y_pred)):
y_pred_amax[i]=np.argmax(y_pred[i,:])
y_test_amax[i]=np.argmax(y_test[i,:])
y_test_amax, y_pred_amax
# -
# <br>
# ### Create a confusion matrix
# Classification accuracy alone can often be misleading, especially if you have an unequal number of observations in each class or if you have more than two classes in your dataset. A confusion matrix can give you a better idea of what your model is getting right and what type of errors it is making. A `confusion matrix` is also known as error matrix and is a common technique for summarizing the performance of a classification algorithm.
#
# The metrics class of the scikit-learn package offers the function `confusion_matrix`, which computes the confusion matrix between the actual (`y_test_amax`) and predicted class (`y_pred_amax`). The result is a matrix which summarizes the number of correct and incorrect predictions with count values broken down by each class. The rows indicate the actual class and the columns indicate the predicted class.
matrix = metrics.confusion_matrix(y_test_amax, y_pred_amax)
matrix
# <br>
# Let us convert the matrix into a `pandas.dataframe` and add class labels to the rows and columns. This helps to better interpret the results.
# +
label_str=[
'AnnualCrop',
'Forest',
'HerbaceousVegetation',
'Highway',
'Industrial',
'Pasture',
'PermanentCrop',
'Residential',
'River',
'SeaLake' ]
con_mat_df = pd.DataFrame(matrix,
index = label_str,
columns= label_str)
con_mat_df
# -
# <br>
# Now, you can use the `heatmap()` function of the seaborn library to visualize the confusion matrix, which gives you a more visual picture of the classification performance of the model.
plt.figure(figsize=(10, 10))
sns.heatmap(con_mat_df,
annot=True,
fmt='d',
cmap=plt.cm.Blues)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# <br>
# Alternatively, you can also normalize the confusion matrix, which makes it often easier to interpret how the classes are predicted by the model. You can create the `normalized confusion matrix` also with the function `confusion_matrix` from scikit-learn's 'metrics' class, but you have to set the keyword argument `normalize` to true.
# +
matrix_norm = metrics.confusion_matrix(y_test_amax, y_pred_amax, normalize='true')
matrix_norm = np.around(matrix_norm,decimals=2)
# -
# <br>
# Now, you can also transform the normalized matrix to a pandas data frame. You see now that the matrix represents relative frequencies per row.
con_mat_df_norm = pd.DataFrame(matrix_norm,
index = label_str,
columns= label_str)
con_mat_df_norm
# <br>
# As a final step, you can visualize the `normalized confusion matrix` with the function `heatmap()` from the seaborn library.
# +
plt.figure(figsize=(10, 10))
sns.heatmap(con_mat_df_norm,
annot=True,
cmap=plt.cm.Blues)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# -
# <br>
# <br>
# <a href="https://www.futurelearn.com/courses/artificial-intelligence-for-earth-monitoring/1/steps/1170903"><< Back to FutureLearn</a><br>
# <hr>
# <img src='../../img/copernicus_logo.png' alt='Copernicus logo' align='left' width='20%'></img>
# Course developed for [EUMETSAT](https://www.eumetsat.int/), [ECMWF](https://www.ecmwf.int/) and [Mercator Ocean International](https://www.mercator-ocean.fr/en/) in support of the [EU’s Copernicus Programme](https://www.copernicus.eu/en) and the [WEkEO platform](https://wekeo.eu/).
#
| 3_land/3B_tile-based_classification_with_EuroSAT_data/3B_tile-based_classification_with_EuroSAT_data_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
import warnings
warnings.filterwarnings('ignore')
#read data set
df = pd.read_csv('melb_data.csv')
df.head()
df.head()
df.nunique()
df.shape
cols_to_use = ['Suburb', 'Rooms', 'Type', 'Method', 'SellerG', 'Regionname', 'Propertycount',
'Distance', 'CouncilArea', 'Bedroom2', 'Bathroom', 'Car', 'Landsize', 'BuildingArea', 'Price']
df = df[cols_to_use]
df.head()
df.shape
df.isna().sum()
cols_to_fill_zero = ['Propertycount', 'Distance', 'Bedroom2', 'Bathroom', 'Car']
df[cols_to_fill_zero] = df[cols_to_fill_zero].fillna(0)
df.isna().sum()
df['Landsize'] = df['Landsize'].fillna(df.Landsize.mean())
df['BuildingArea'] = df['BuildingArea'].fillna(df.BuildingArea.mean())
df.isna().sum()
df.dropna(inplace=True)
df.isna().sum()
df = pd.get_dummies(df, drop_first=True)
df.head()
x = df.drop('Price', axis=1)
y = df['Price']
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(x, y, test_size=0.3, random_state=2)
from sklearn.linear_model import LinearRegression
lr = LinearRegression().fit(train_X, train_y)
lr.score(test_X, test_y)
lr.score(train_X, train_y)
# # Here training score is 71% but test score is so low which is very low
# ##Normal Regression is clearly overfitting the data, let's try other models
# ##Using Lasso (L1 Regularized) Regression Model
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=50, max_iter=1000, tol=1.0)
lasso_reg.fit(train_X, train_y)
lasso_reg.score(test_X, test_y)
lasso_reg.score(train_X, train_y)
# # Using Ridge (L2 Regularized) Regression Model
from sklearn.linear_model import Ridge
ridge_reg= Ridge(alpha=50, max_iter=1000, tol=0.3)
ridge_reg.fit(train_X, train_y)
ridge_reg.score(test_X, test_y)
ridge_reg.score(train_X, train_y)
| Regularization/regularization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Check Homework HW05
# Use this notebook to check your solutions. This notebook will **not** be graded.
import pandas as pd
import numpy as np
# Now, import your solutions from `hw5_answers.py`. The following code looks a bit redundant. However, we do this to allow reloading the `hw5_answers.py` in case you made some changes. Normally, Python assumes that modules don't change and therefore does not try to import them again.
import hw5_answers
reload(hw5_answers)
from hw5_answers import *
# The Employees, Territory, Customers, and Orders tables are the same as those we used in class.
Employees = pd.read_excel('/home/data/AdventureWorks/Employees.xls')
Territory = pd.read_excel('/home/data/AdventureWorks/SalesTerritory.xls')
Customers = pd.read_excel('/home/data/AdventureWorks/Customers.xls')
Orders = pd.read_excel('/home/data/AdventureWorks/ItemsOrdered.xls')
# ## Problem 1
# Write a function called `get_manager` that takes as its one argument the Pandas DataFrame "Employees" and returns a DataFrame containing list of all employees (EmployeeID, first name, middle name, last name), and their manager's first and last name. The columns in the output DataFrame should be: EmployeeID, FirstName, MiddleName, LastName, ManagerFirstName, ManagerLastName.
#
#
df1 = get_manager(Employees)
print "Shape of resulting table: ", df1.shape
print "Columns: ", ', '.join(df1.columns)
df1.head()
#
# Shape of resulting table: (291, 6)
# Columns: EmployeeID, FirstName, MiddleName, LastName, ManagerFirstName, ManagerLastName
#
# | EmployeeID | FirstName |MiddleName | LastName | ManagerFirstName | ManagerLastName
# -----------|-----------|-----------|----------|------------------|----------------
# 0 | 259 | Ben | T | Miller |Sheela | Word
# 1 | 278 | Garrett | R | Vargas |Stephen | Jiang
# 2 | 204 | Gabe | B | Mares | Peter | Krebs
# 3 | 78 | Reuben | H | D'sa | Peter | Krebs
# 4 | 255 | Gordon | L | Hee | Sheela | Word
#
# ## Problem 2
# Write a functon called `get_spend_by_order` that takes as its two arguments the Pandas DataFrames "Orders" and "Customers", and returns a DataFrame with the following columns: "FirstName", "LastName", "Item", "TotalSpent", listing all cutomer names, their purchased items, and the total amount spend on that item (remember that the "Price" listed in "Orders" is the _price per item_).
#
#
df2 = get_spend_by_order(Orders, Customers)
print "Shape of resulting table: ", df2.shape
print "Columns: ", ', '.join(df2.columns)
df2.head()
# Shape of resulting table: (32, 4)
# Columns: FirstName, LastName, Item, TotalSpent
#
# |FirstName | LastName | Item | TotalSpent
# ----------|----------|------|-----------
# 0 | Anthony | Sanchez | Umbrella | 4.5
# 1 | Conrad | Giles | Ski Poles | 25.5
# 2 | Conrad | Giles | Tent | 88.0
# 3 | Donald | Davids | Lawnchair | 32.0
# 4 | Elroy | Keller | Inflatable Mattress | 38.0
# ## Problem 3
# Write a function called `get_order_location` that takes three arguments: "Orders", "Customers", and "Territory", and returns a DataFrame containing the following columns: "CustomerID", "Name", and "TotalItems", that gives, for each order, the CustomerID, the name of the territory where the order was placed, and the total number of items ordered (yes, 2 ski poles counts as 2 items).
#
#
df3 = get_order_location(Orders, Customers, Territory)
print "Shape of resulting table: ", df3.shape
print "Columns: ", ', '.join(df3.columns)
df3.head()
# Shape of resulting table: (11, 3)
# Columns: CustomerID, Name, TotalItems
#
#
# | CustomerID | Name | TotalItems
# -----------|------|-----------
# 0 | 10315 | Central | 1
# 1 | 10438 | Central | 3
# 2 | 10439 | Central | 2
# 3 | 10101 | Northwest | 6
# 4 | 10299 | Northwest | 2
# ## Problem 4
# Write a function called `employee_info` that takes one argument: "Employees", and returns a DataFrame containing the following columns: JobTitle, NumberOfEmployees, and MeanVacationHours, containing all job titles, the number of employees with that job title, and the mean number of vacation days for employees with that job title.
df4 = employee_info(Employees)
print "Shape of resulting table: ", df4.shape
print "Columns: ", ', '.join(df4.columns)
df4.head()
# Shape of resulting table: (68, 3)
# Columns: JobTitle, NumberOfEmployees, MeanVacationHours
#
# | JobTitle | NumberOfEmployees | MeanVacationHours
# -|----------|-------------------|--------
# 0 | Accountant |2 |58.5
# 1 | Accounts Manager |1 |57.0
# 2 | Accounts Payable Specialist |2 |63.5
# 3 | Accounts Receivable Specialist |3 |61.0
# 4 | Application Specialist |4 |72.5
| 05-Operating-with-Multiple-Tables/HW05/CheckHomework05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 17. Random Forest and Gradient Boosted Trees Classifier
#
# [](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/17.RandomForest.ipynb)
#
# Previously, we used a Decision Tree Classifier to learn the fcc, bcc, and hcp crystal structure of 47 elements with Scikit-learn. Now, we will train a random forest and a gradient boosted trees model.
#
# Let's first load the required libraries.
# +
# Install the mendeleev and poymatgen packages using pip in the current Jupyter kernel
# To use them, you may need to restart the kernel
import sys
# !{sys.executable} -m pip install mendeleev
# !{sys.executable} -m pip install pymatgen
import pymatgen as pymat
from pymatgen.core.periodic_table import Element
import mendeleev as mendel
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
# -
# ### Getting the dataset
#
# We select 47 elements that occur in the fcc, hcp, and bcc structure. The elements listed were chosen because querying them for these properties yields a dataset with no unknown values, and because they represent the three most common crystallographic structures.
#
# We then query both Pymatgen and Mendeleev to get a complete set of properties per element. We will use this data to create the features from which the model will train and test.
# +
fcc_elements = ["Ag", "Al", "Au", "Cu", "Ir", "Ni", "Pb", "Pd", "Pt", "Rh", "Th", "Yb"]
bcc_elements = ["Ba", "Ca", "Cr", "Cs", "Eu", "Fe", "Li", "Mn", "Mo", "Na", "Nb", "Rb", "Ta", "V", "W" ]
hcp_elements = ["Be", "Cd", "Co", "Dy", "Er", "Gd", "Hf", "Ho", "Lu", "Mg", "Re",
"Ru", "Sc", "Tb", "Ti", "Tl", "Tm", "Y", "Zn", "Zr"]
elements = fcc_elements + bcc_elements + hcp_elements
random.Random(1).shuffle(elements)
querable_mendeleev = ["atomic_number", "atomic_volume", "boiling_point", "en_ghosh", "evaporation_heat", "heat_of_formation",
"melting_point", "specific_heat"]
querable_pymatgen = ["atomic_mass", "atomic_radius", "electrical_resistivity","molar_volume", "bulk_modulus", "youngs_modulus",
"average_ionic_radius", "density_of_solid", "coefficient_of_linear_thermal_expansion"]
querable_values = querable_mendeleev + querable_pymatgen
# -
# We will use the database queries to populate a pandas dataframe.
# +
all_values = [] # Values for Attributes
all_labels = [] # Crystal structure labels (0 = fcc, 1 = bcc, 2 = hcp)
for item in elements:
element_values = []
# This section queries Mendeleev
element_object = mendel.element(item)
for i in querable_mendeleev:
element_values.append(getattr(element_object,i))
# This section queries Pymatgen
element_object = Element(item)
for i in querable_pymatgen:
element_values.append(getattr(element_object,i))
all_values.append(element_values) # All lists are appended to another list, creating a List of Lists
if (item in fcc_elements):
all_labels.append(0) # The crystal structure labels are assigned here
elif (item in bcc_elements):
all_labels.append(1) # The crystal structure labels are assigned here
elif (item in hcp_elements):
all_labels.append(2) # The crystal structure labels are assigned here
# Pandas Dataframe
df = pd.DataFrame(all_values, columns=querable_values)
# We will patch some of the values that are not available in the datasets.
# Value for the CTE of Cesium
index_Cs = df.index[df['atomic_number'] == 55]
df.iloc[index_Cs, df.columns.get_loc("coefficient_of_linear_thermal_expansion")] = 0.000097
# Value from: <NAME> (ed), CRC Handbook of Chemistry and Physics, 84th Edition. CRC Press. Boca Raton, Florida, 2003
# Value for the CTE of Rubidium
index_Rb = df.index[df['atomic_number'] == 37]
df.iloc[index_Rb, df.columns.get_loc("coefficient_of_linear_thermal_expansion")] = 0.000090
# Value from: https://www.azom.com/article.aspx?ArticleID=1834
# Value for the Evaporation Heat of Ruthenium
index_Ru = df.index[df['atomic_number'] == 44]
df.iloc[index_Ru, df.columns.get_loc("evaporation_heat")] = 595 # kJ/mol
# Value from: https://www.webelements.com/ruthenium/thermochemistry.html
# Value for the Bulk Modulus of Zirconium
index_Zr = df.index[df['atomic_number'] == 40]
df.iloc[index_Zr, df.columns.get_loc("bulk_modulus")] = 94 # GPa
# Value from: https://materialsproject.org/materials/mp-131/
df.head(n=10)
# -
# ### Processing and Organizing Data
#
# We normalize the data and randomly split it into training and testing sets.
#
# ##### SETS
#
# We have 47 elements for which the crystal structure is known and we will use 40 of these as a training set and the remaining 7 as testing set.
#
# ##### NORMALIZATION
#
# We will again use the Standard Score Normalization, which subtracts the mean of the feature and divide by its standard deviation.
# $$
# \frac{X - µ}{σ}
# $$
# While our model might converge without feature normalization, the resultant model would be difficult to train and would be dependent on the choice of units used in the input.
# +
# SETS
all_values = [list(df.iloc[x]) for x in range(len(all_values))]
# List of lists are turned into Numpy arrays to facilitate calculations in steps to follow
# (Normalization).
all_values = np.array(all_values, dtype = float)
print("Shape of Values:", all_values.shape)
all_labels = np.array(all_labels, dtype = int)
print("Shape of Labels:", all_labels.shape)
# Training Set
train_values = all_values[:40, :]
train_labels = all_labels[:40]
# Testing Set
test_values = all_values[-7:, :]
test_labels = all_labels[-7:]
# NORMALIZATION
mean = np.nanmean(train_values, axis = 0) # mean
std = np.nanstd(train_values, axis = 0) # standard deviation
train_values = (train_values - mean) / std # input scaling
test_values = (test_values - mean) / std # input scaling
print(train_values[0]) # print a sample entry from the training set
print(train_labels[0])
# -
# ### Creating the Random Forest Model
#
# For this classification, we will use a random forest.
# +
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
# Create Decision Tree classifer object
model = RandomForestClassifier()
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
# -
# ### Validation
#
# We calculate the accuracy score on the training and the testing sets.
# +
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
# -
# ### Visualize the random forest model
#
# Since the random forest consists of many decision trees, we can visualize the individual decision trees.
# +
label_names = ('fcc', 'bcc', 'hcp')
fig = plt.figure(figsize=(25,20))
# Select an individual decision tree, here 0.
_ = tree.plot_tree(model.estimators_[0], feature_names=querable_values,
class_names = label_names, filled=True, impurity=True, rounded=True)
# +
train_predictions = model.predict(train_values)
test_predictions = model.predict(test_values)
print("train_labels = ", train_labels)
print("test_labels = ", test_labels)
all_labels = np.hstack((train_labels, test_labels))
all_predictions = np.hstack((train_predictions, test_predictions))
predicted_labels = []
true_labels = []
for i in range(all_predictions.shape[0]):
if (all_predictions[i] == 0):
predicted_labels.append("FCC")
if (all_labels[i] == 0):
true_labels.append("FCC")
if (all_predictions[i] == 1):
predicted_labels.append("BCC")
if (all_labels[i] == 1):
true_labels.append("BCC")
if (all_predictions[i] == 2):
predicted_labels.append("HCP")
if (all_labels[i] == 2):
true_labels.append("HCP")
predicted_labels = np.array(predicted_labels).reshape((-1, 1))
true_labels = np.array(true_labels).reshape((-1, 1))
headings = ["Atomic number", "True crystal structure", "Predicted crystal structure"]
atomic_number_array = np.array(df.iloc[:, 0]).reshape((-1, 1))
plot_table = np.concatenate((atomic_number_array, true_labels, predicted_labels), axis=1)
plot_df = pd.DataFrame(plot_table, columns=headings)
# -
plot_df
# ### Questions:
#
# #### Hyperparameter optimization
#
# 1. We can select the `criterion` parameter to measure the quality of a split. The default value is `'squared_error'`.
#
# 2. When the algorithm performs a split, the main goal is to decrease impurity as much as possible. The more the impurity decreases, the more informative power that split gains. As the tree gets deeper, the amount of impurity decrease becomes lower. We can use this to prevent the tree from doing further splits. The hyperparameter for this task is `min_impurity_decrease`. Its default is zero. Try changing it to see the difference.
#
# 3. If the algorithm keeps splitting nodes, the model will probably be overfit. The `min_samples_split` parameter can be used to control the tree based on impurity values. It sets a threshold on gini. Try setting it to 0.3, so a node needs to have a gini value that is more then 0.3 to be further split.
#
# 4. Another hyperparameter to control the depth of a tree is `max_depth`. It does not make any calculations regarding impurity or sample ratio. The model stops splitting when max_depth is reached. Note that `max_depth` is less flexible compared to min_impurity_decrease.
#
# 5. Another hyperparameter is `min_samples_leaf`. It indicates the minimum number of samples required to be at a leaf node.
#
# 6. We can also limit the number of leaf nodes using `max_leaf_nodes` parameter which grows the tree in best-first fashion until max_leaf_nodes reached. The best split is decided based on impurity decrease.
#
# 7. Another important hyperparameter of decision trees is `max_features` which is the number of features to consider when looking for the best split. If not specified, the model considers all of the features. There is only 1 feature in our dataset.
#
# To change the hyperparameters:
#
# `regressor = RandomForestRegressor(hyperparameter = value)`
#
# Change the `max_depth` and `min_samples_split` to see how this affects the training and prediction error.
# +
# Create Decision Tree classifer object
model = RandomForestClassifier(max_depth=3, min_samples_split=0.5)
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
label_names = ('fcc', 'bcc', 'hcp')
fig = plt.figure(figsize=(25,20))
_ = tree.plot_tree(model.estimators_[0], feature_names=querable_values, class_names = label_names, filled=True)
# -
# ### Creating the Gradient Boosted Trees Model
#
# Next, we will test gradient boosted trees for this classification.
# +
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
# Create Decision Tree classifer object
model = GradientBoostingClassifier()
# Train Decision Tree Classifer
model.fit(train_values, train_labels)
# -
# ### Validation
#
# We calculate the accuracy score on the training and the testing sets.
# +
#Predict the response for training and testing dataset
train_pred = model.predict(train_values)
test_pred = model.predict(test_values)
# Model Accuracy for training and testing set, how often is the classifier correct?
print('Training accuracy = %.3f ' % accuracy_score(train_labels, train_pred))
print('Testing accuracy = %.3f ' % accuracy_score(test_labels, test_pred))
# -
# As we can see, for this dataset all the tree-based models do comparably well.
| Notebooks/17.RandomForest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ml]
# language: python
# name: conda-env-ml-py
# ---
# # 01 Time Windows
#
# Featuretools has some great functionality around time. This allows creation of features right up until the time of making a prediction (without risk of information leakage), it allows uncomplete records to be used which can provide useful information....
#
# This can be used to control:
# - when data record becomes available
# - when specific columns within a record become avaialble
import pandas as pd
import numpy as np
import featuretools as ft
from create_data import make_attendances_dataframe
df = make_attendances_dataframe(15)
# #### Setting time index
# Use "time_index" to set the time when records become avaialble.
# +
es = ft.EntitySet('Hospital')
es = es.entity_from_dataframe(entity_id='attendances',
dataframe=df,
index='atten_id',
time_index='arrival_datetime')
# -
df.head()
# #### Cuttoff times
# Can be used to define a datetime at which a prediction is wished to be made; no infomration after this point will be used. To utilise we create a dataframe to pass to DFS. This df requires the unique_id (e.g. atten_id) and a cuttoff time.
#
# Cuttoff times also supports multiple cuttoff times being passed for each unique_id.
# +
ct = pd.DataFrame()
ct['atten_id'] = [1005,1009, 1004]
ct['time'] = pd.to_datetime(['2018-01-01 06:00',
'2018-01-01 06:00',
'2018-01-01 06:00'])
# Label column is optional, and will not be touched in any way by DFS, it can be used to pass labels for prediction.
ct['label'] = [True, True, False]
ct
# +
fm, features = ft.dfs(entityset=es,
target_entity='attendances',
cutoff_time=ct,
cutoff_time_in_index=True)
fm
# -
# We can see that using DFS we have only included the attendances which are available at 6am 1st Jan 2018 (of the three proivided in ct dataframe). The attendances 1004 has not occured yet (arrival_datetime is after the "cutoff") so we return NaNs for this row.
#
# An example use of this might be creation of data for prediction of "time_in_department", or "admission_flag" for those patients currently in a department at 6am.
#
# One problem in this case is reducing the data to that in columns which would be avaiable at the time of prediction, e.g. "time_in_department" would not be available for this prediction...we can reduce the columns by using a SECONDARY TIME INDEX.
#
#
# #### Setting Seccondary time index
# Use "secondary_time_index" to define when new information in a particular record becomes avaialble, by providing a dictionary. dictionary in e.g. below indicates that at the time "depart_datetime" the list of column names becomes available ( "time_in_department" in this e.g).
import featuretools.variable_types as vtypes
data_variable_types = {'atten_id': vtypes.Id,
'pat_id': vtypes.Id,
'arrival_datetime': vtypes.Datetime,
'time_in_department': vtypes.Numeric,
'departure_datetime': vtypes.Datetime,
'gender': vtypes.Boolean,
'ambulance_arrival': vtypes.Boolean}
#es = ft.EntitySet('Hospital')
es = es.entity_from_dataframe(entity_id='attendances',
dataframe=df,
index='atten_id',
time_index='arrival_datetime',
secondary_time_index={'departure_datetime':['time_in_department']}, # dictionary here!
variable_types=data_variable_types)
# +
fm, features = ft.dfs(entityset=es,
target_entity='attendances',
cutoff_time=ct,
cutoff_time_in_index=True)
fm
# -
# #### Training windows
# Whilst a cuttoff time limits the data to be used after a datetime. A "training window" limits the amount of past data that can be used while calculating a particular feature matrix.
es.add_last_time_indexes()
# +
window_fm, window_features = ft.dfs(entityset=es,
target_entity="attendances",
cutoff_time=ct,
cutoff_time_in_index=True,
training_window="24 hours")
window_fm
# -
#
| 01TimeWindows.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Содержание<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Описание-проекта" data-toc-modified-id="Описание-проекта-1"><span class="toc-item-num">1 </span>Описание проекта</a></span></li><li><span><a href="#Загрузка-и-подготовка-данных" data-toc-modified-id="Загрузка-и-подготовка-данных-2"><span class="toc-item-num">2 </span>Загрузка и подготовка данных</a></span></li><li><span><a href="#Проведем-первичное-изучение-данных" data-toc-modified-id="Проведем-первичное-изучение-данных-3"><span class="toc-item-num">3 </span>Проведем первичное изучение данных</a></span><ul class="toc-item"><li><span><a href="#Проверим-количество-дубликатов" data-toc-modified-id="Проверим-количество-дубликатов-3.1"><span class="toc-item-num">3.1 </span>Проверим количество дубликатов</a></span></li><li><span><a href="#Посмотрим-информацию-о-значениях" data-toc-modified-id="Посмотрим-информацию-о-значениях-3.2"><span class="toc-item-num">3.2 </span>Посмотрим информацию о значениях</a></span><ul class="toc-item"><li><span><a href="#Визуализируем-полученные-данные" data-toc-modified-id="Визуализируем-полученные-данные-3.2.1"><span class="toc-item-num">3.2.1 </span>Визуализируем полученные данные</a></span></li><li><span><a href="#Выводы" data-toc-modified-id="Выводы-3.2.2"><span class="toc-item-num">3.2.2 </span>Выводы</a></span></li></ul></li><li><span><a href="#Исправим-недочеты,-найденные-в-данных" data-toc-modified-id="Исправим-недочеты,-найденные-в-данных-3.3"><span class="toc-item-num">3.3 </span>Исправим недочеты, найденные в данных</a></span></li><li><span><a href="#Проверим-корреляцию-между-признаками" data-toc-modified-id="Проверим-корреляцию-между-признаками-3.4"><span class="toc-item-num">3.4 </span>Проверим корреляцию между признаками</a></span><ul class="toc-item"><li><span><a href="#Вывод" data-toc-modified-id="Вывод-3.4.1"><span class="toc-item-num">3.4.1 </span>Вывод</a></span></li></ul></li><li><span><a href="#Вывод" data-toc-modified-id="Вывод-3.5"><span class="toc-item-num">3.5 </span>Вывод</a></span></li></ul></li><li><span><a href="#Обучение-и-проверка-модели" data-toc-modified-id="Обучение-и-проверка-модели-4"><span class="toc-item-num">4 </span>Обучение и проверка модели</a></span><ul class="toc-item"><li><span><a href="#Вывод:" data-toc-modified-id="Вывод:-4.1"><span class="toc-item-num">4.1 </span>Вывод:</a></span></li></ul></li><li><span><a href="#Подготовка-к-расчету-прибыли" data-toc-modified-id="Подготовка-к-расчету-прибыли-5"><span class="toc-item-num">5 </span>Подготовка к расчету прибыли</a></span><ul class="toc-item"><li><span><a href="#Посмотрим-распределение-значений-по-районам" data-toc-modified-id="Посмотрим-распределение-значений-по-районам-5.1"><span class="toc-item-num">5.1 </span>Посмотрим распределение значений по районам</a></span></li><li><span><a href="#Вывод" data-toc-modified-id="Вывод-5.2"><span class="toc-item-num">5.2 </span>Вывод</a></span></li></ul></li><li><span><a href="#Расчёт-прибыли-и-рисков" data-toc-modified-id="Расчёт-прибыли-и-рисков-6"><span class="toc-item-num">6 </span>Расчёт прибыли и рисков</a></span></li><li><span><a href="#Вывод" data-toc-modified-id="Вывод-7"><span class="toc-item-num">7 </span>Вывод</a></span></li></ul></div>
# -
# # Описание проекта
# Допустим, вы работаете в добывающей компании «ГлавРосГосНефть». Нужно решить, где бурить новую скважину.
#
# Вам предоставлены пробы нефти в трёх регионах: в каждом 10 000 месторождений, где измерили качество нефти и объём её запасов. Постройте модель машинного обучения, которая поможет определить регион, где добыча принесёт наибольшую прибыль. Проанализируйте возможную прибыль и риски техникой *Bootstrap.*
#
# Шаги для выбора локации:
#
# - В избранном регионе ищут месторождения, для каждого определяют значения признаков;
# - Строят модель и оценивают объём запасов;
# - Выбирают месторождения с самым высокими оценками значений. Количество месторождений зависит от бюджета компании и стоимости разработки одной скважины;
# - Прибыль равна суммарной прибыли отобранных месторождений.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import itertools
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from numpy.random import RandomState
from tqdm import tqdm
RANDOM = 42
state = RandomState(RANDOM)
# -
# # Загрузка и подготовка данных
# Загрузим данные
zero_frame = pd.read_csv("/datasets/geo_data_0.csv")
first_frame = pd.read_csv("/datasets/geo_data_1.csv")
second_frame = pd.read_csv("/datasets/geo_data_2.csv")
def display_info(isFrame,**kwargs):
"""Функция структурированного вывода переданной информации
Формат оглавления ключа:
#################
# Имя_Ключа #
#################
"""
# Воспользуемся display,если передается фрейм и print, если передаются отсальные типы
if isFrame:
out_func = display
else:
out_func = print
# Обработаем все переданные параметры с их ключами
for frame_name, args in kwargs.items():
# Создадим строку с названием обрабатываемого ключа
name_string = "# "+ str(frame_name) + " #"
# Измерим длину получившегося названия, чтобы в дальнейшем создать окантовку для него
len_label = len(name_string)
# Выведем оглавление согласну формату указанному в описании
print("#"*len_label)
print(name_string)
print("#"*len_label)
print()
#обработаем и выведем параметры переданные для вывода
for info_string,element in args.items():
if not info_string:
out_func(element)
else:
out_func(info_string,element)
print()
display_info(True,
Zero_Frame = {'':zero_frame.head()},
First_Frame = {'':first_frame.head()},
Second_Frame = {'':second_frame.head()})
# # Проведем первичное изучение данных
#
print("####################")
print("# Zero Frame #")
print("####################")
print()
print(zero_frame.info())
print()
print("####################")
print("# First Frame #")
print("####################")
print()
print(first_frame.info())
print()
print("####################")
print("# Second Frame #")
print("####################")
print()
print(second_frame.info())
# ## Проверим количество дубликатов
display_info(False,
Zero_Frame = {'Количество дубликатов:':zero_frame.duplicated().sum()},
First_Frame = {'Количество дубликатов:':first_frame.duplicated().sum()},
Second_Frame = {'Количество дубликатов:':second_frame.duplicated().sum()})
# ## Посмотрим информацию о значениях
display_info(True,
Zero_Frame = {'':zero_frame.describe()},
First_Frame = {'':first_frame.describe()},
Second_Frame = {'':second_frame.describe()})
# ### Визуализируем полученные данные
# +
fig,ax = plt.subplots(3,4,figsize = (20,15))
all_frames = ["zero_frame","first_frame","second_frame"]
num_columns = zero_frame.columns[1:]
num_columns_with_columns = zip(zero_frame.columns[1:],range(4))
all_frames_with_rows = zip([zero_frame,first_frame,second_frame],range(3))
# Построим графики
for column in list(itertools.product(all_frames_with_rows,num_columns_with_columns)):
ax[column[0][1]][column[1][1]].boxplot(column[0][0][column[1][0]])
# Добавим название фрейма изображаемого на графике
for row in range(3):
ax[row][0].set_ylabel(all_frames[row])
# Добавим название столбца изображаемого на графике
for column in range(4):
ax[0][column].set_title(num_columns[column])
# -
# ### Выводы
# Заметим, что в некоторых столбцах есть данные, выходящие за границы. Эти данные будут мешать модели обучаться, отвлекая её. Поэтому их необходимо удалить.
# Выпишем необходимые столбцы для обработки в формате - (фрейм, столбец):
# 1. (zero_frame,"f2")
# 1. (first_frame,"f1")
# 1. (second_frame,"f0")
# 1. (second_frame,"f1")
# 1. (second_frame,"f2")
# ## Исправим недочеты, найденные в данных
def remove_ouliers(frame,column):
q25=np.array(frame[column].quantile(0.25))
q75=np.array(frame[column].quantile(0.75))
first_part=q25-1.5*(q75-q25)
second_part=q75+1.5*(q75-q25)
del_index = []
for index_value, value in zip(frame[column].index,frame[column]):
if second_part <= value or value <= first_part:
del_index.append(index_value)
print('Количество строк, выбранных для удаления: ',len(del_index))
return del_index
# +
noise_data = [(zero_frame,"f2"),
(first_frame,"f1"),
(second_frame,"f0"),
(second_frame,"f1"),
(second_frame,"f2")]
for frame,column in noise_data:
indexes = remove_ouliers(frame,column)
frame.drop(indexes,axis = 0,inplace = True)
# -
# Выборки пострадали не сильно, в первых двух потери составили менее 1%, в последней же потеря данных составила 2%
zero_frame = zero_frame.reset_index(drop = True)
first_frame = first_frame.reset_index(drop = True)
second_frame = second_frame.reset_index(drop = True)
# ## Проверим корреляцию между признаками
display_info(True,
Zero_Frame = {'':zero_frame.corr()},
First_Frame = {'':first_frame.corr()},
Second_Frame = {'':second_frame.corr()})
# ### Вывод
# Заметим, что в нулевом фрейме признаки f0 и f1 отрицательно коррелируют относительно друг друга и f2 слабо положительно коррелирует с целевым признаком. Так же в первом фрейме очень сильно коррелирует целевой признак и f2. Во втором фрейме так же есть коррелирующие признаки, такие как f2 и product.
#
# Если в случае с первым фреймом все достаточно понятно, там очень высокая корреляция и признак f2 следует удалить, то вот в случае с другими двумя выборками стоит опираться на результат, который мы получим на моделях, Следовательно необходимо подготовить 3 выборок:
# 1. Нулевая со всеми столбцами
# 1. Первая без f2
# 1. Вторая со всеми столбцами
first_frame_out_f2 = first_frame.drop(["f2"],axis = 1)
# ## Вывод
# Результаты первичного анализа:
# 1. Пропуски - отсутствуют
# 2. Типы столбцов - корректны
# 3. Названия столбцов - корректны
# 1. Дубликаты - отсутствуют
# 1. Объем запасов - положительный
# 1. Выбросы - удалены
# 1. Коррелирующие признаки - учтены
# # Обучение и проверка модели
# +
array_name = ["zero_frame",
"second_frame",
"first_frame_out_f2"]
array_frame = [zero_frame,
second_frame,
first_frame_out_f2]
data_dict = {"pipelines":{},"scores":{},"valid":{}}
for frame, name in zip(array_frame,array_name):
features = frame.drop(["id","product"], axis = 1)
target = frame["product"]
new_pipeline = make_pipeline(StandardScaler(),LinearRegression())
(features_train,
features_valid,
target_train,
target_valid) = train_test_split(features,
target,
test_size = 0.25,
random_state = RANDOM)
data_dict['valid'][name] = (features_valid,target_valid)
new_pipeline.fit(features_train,target_train)
data_dict['pipelines'][name] = new_pipeline
data_dict['scores'][name] = mean_squared_error(target_valid,
new_pipeline.predict(features_valid))**0.5
# -
data_dict['scores']
# Выборка с минимальной ошибкой:
# 1. Нулевой регион: 'zero_frame'
# 1. Первый регион: "first_frame_out_f2"
# 1. Второй регион: 'second_frame'
# +
best_model_zero = data_dict['pipelines']['zero_frame']
best_model_first = data_dict['pipelines']['first_frame_out_f2']
best_model_second = data_dict['pipelines']['second_frame']
predicted_values_zero = best_model_zero.predict(data_dict['valid']['zero_frame'][0])
predicted_values_first = best_model_first.predict(data_dict['valid']['first_frame_out_f2'][0])
predicted_values_second = best_model_second.predict(data_dict['valid']['second_frame'][0])
RMSE_model_zero = (mean_squared_error(data_dict['valid']['zero_frame'][1],predicted_values_zero))**0.5
RMSE_model_first = (mean_squared_error(data_dict['valid']['first_frame_out_f2'][1],predicted_values_first))**0.5
RMSE_model_second = (mean_squared_error(data_dict['valid']['second_frame'][1],predicted_values_second))**0.5
# -
display_info(False,
Zero_Frame = {'Средний запас:':predicted_values_zero.mean(),
"RMSE модели:":RMSE_model_zero},
First_Frame = {'Средний запас:':predicted_values_first.mean(),
"RMSE модели:":RMSE_model_first},
Second_Frame = {'Средний запас:':predicted_values_second.mean(),
"RMSE модели:":RMSE_model_second})
# ## Вывод:
# - Нулевой регион:
# - Ошибка в нулевом регионе самая маленькая, но по среднему объему запасов регион на втором месте
# - Первый регион:
# - Проигрывает другим регионам и по показателю средней ошибки и по среднему объему запасов
# - Второй регион:
# - Самый большой средний объем запасов, но по ошибке регион на втором месте
#
# # Подготовка к расчету прибыли
BUDGET_PER_REGION = 10*(10**9)
PRE_MAX_POINTS = 500
FINAL_MAX_POINTS = 200
PRICE_PER_BARREL = 450000
DAMAGE_THRESHOLD = 0.025
NON_DAMAGE_POINT = (BUDGET_PER_REGION/(PRICE_PER_BARREL*(10**3)))/(FINAL_MAX_POINTS)
print("Достаточный объем добычи для безубыточной разработки",round(NON_DAMAGE_POINT,2))
# ## Посмотрим распределение значений по районам
#
display_info(False,
Zero_Frame = {'':zero_frame.describe()},
First_Frame = {'':first_frame_out_f2.describe()},
Second_Frame = {'':second_frame.describe()})
# В соответствии с найденной точкой безубыточности все регионы нам подходят
#
print("25% скважин в нулевом регионе содержат больше сырья, чем ",
round(zero_frame["product"].quantile(0.75),2))
print("16% скважин в нулевом регионе содержат больше сырья, чем ",
round(first_frame_out_f2["product"].quantile(0.84),2))
print("25% скважин в нулевом регионе содержат больше сырья, чем ",
round(second_frame["product"].quantile(0.80),2))
# ## Вывод
# Минимальное количество баррелей нефти для безубыточной разработки составило 111.(1)
#
# В тоже время максимальное средний объем в регионах равен 94.
# Если обратить внимание на распределение объема запасов по району, можно заметить, что в нулевом и втором районах 20% скважин проходят минимальный порог(учитывая объемы выборок, 20% составляют чуть меньше 20 000 скважин)
#
# В то время как в первом районе подходят только 16% скважин
#
# # Расчёт прибыли и рисков
def income(true_target, pred_target):
sort_Series = pd.Series(pred_target).sort_values(ascending=False)[:FINAL_MAX_POINTS]
true_target_sort = (true_target
.reset_index(drop = True)[sort_Series.index])
sum_true = true_target_sort.sum()
return round((sum_true * PRICE_PER_BARREL) - BUDGET_PER_REGION,2)
print("Прибыль с лучших 200 скважин в нулевом регионе:",income(data_dict['valid']['zero_frame'][1],
predicted_values_zero))
print("Прибыль с лучших 200 скважин во втором регионе:",income(data_dict['valid']['second_frame'][1],
predicted_values_second))
print("Прибыль с лучших 200 скважин в первом регионе:",income(data_dict['valid']['first_frame_out_f2'][1],
predicted_values_first))
def confidence_interval(true_target,pred_target):
samples = []
for i in tqdm(range(1000)):
sample = pd.Series(pred_target).sample(n = PRE_MAX_POINTS, replace=True, random_state=state)
samples.append(income(true_target,sample))
samples = pd.Series(samples)
print(samples.mean())
print(samples.apply(lambda x: x < 0).sum()/len(samples)*100,"%")
lower = samples.quantile(0.025)
upper = samples.quantile(0.975)
return round(lower,2), round(upper,2)
# +
print("95% доверительный итервал для Нулевового региона лежит между:",
confidence_interval(data_dict['valid']['zero_frame'][1],pd.Series(predicted_values_zero)))
print()
print("95% Доверительный итервал для Второго региона лежит между:",
confidence_interval(data_dict['valid']['second_frame'][1],predicted_values_second))
print()
print("95% Доверительный итервал для Первого региона лежит между:",
confidence_interval(data_dict['valid']['first_frame_out_f2'][1],predicted_values_first))
# -
# # Вывод
#
# - Нулевой регион:
# - Доверительный интервал: (-110 929 096.14, 891 087 236.3)
# - Точка безубыточности: 111.(1)
# - Средняя прибыль в нулевом регионе: 381 840 608.64
# - Риски: 6.7 %
# - Второй регион:
# - Доверительный интервал: (-142 559 672.52, 900 660 974.77)
# - Точка безубыточности: 111.(1)
# - Средняя прибыль с одной скважины во втором регионе: 382 973 380.22
# - Риски: 7 %
#
# По условию рисков не подходит ни один из регионов. Если же выбирать из наименее рискованных, наиболее подходящим будет нулевой регион. Но также можно заметить, что по сравнению со вторым регионом у него средняя прибыль меньше, в то же время у второго региона на 0,3% выше риски.
#
| oil.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env-graphstats
# language: python
# name: env-graphstats
# ---
import networkx as nx
import numpy as np
import pandas as pd
import time
from plotnine import *
# # Basic Profiling
#
# Here, we will develop a small code-base for testing the speed of converting between `networkx` and `numpy` arrays. These demonstrations will serve as the groundwork for whether the internal data structure of the package should be numpy matrices or just networkx objects, with appropriate conversion scripts held internally to handle operations such as converting between different edge types or optionally for select algorithms to internally convert to adjacency matrices. Using `networkx` objects gives us the benefit of not having to manually keep track of useful metadata such as graphs with richly attributed attributes, such as richly attributed vertices (ie, vertex labels, vertex names, vertex hierarchies) and/or richly attributed edges (ie, multiple weight functions for a single graph).
#
# ## Goal
#
# The goal of these experiments is to identify whether, in a basic framework, a package based solely on `networkx` objects would incur a significant (>10 seconds) speed hit for using `networkx` graphs instead of defaulting to convert everything directly to `numpy` adjacency matrices. The results of these experiments will give us some basic direction moving forward with the formation of the package.
#
# ## Model
#
# We will use the following simulations:
#
# \begin{align*}
# \left(\mathcal{V}_n, \mathcal{E}_{n, p}, \mathcal{W}_{n, p}\right) ~ ER(n, p) \times \mathcal{N}(0, 1)
# \end{align*}
#
# where $ER(n, p)$ is the Erdos-Renyi model with $n$ vertices and $p$ probability of an edge existing. We know by the ER model that $\mathbb{E}\[|\mathcal{E}\] = n^2p$ where $\mathcal{E}$ is the number of edges in the graph. We also add a weight parameter where the weights are normally distributed with $\mu = 0$ and $\sigma = 1$, since many of the graphs in our case will be weighted in some form, and therefore this model will provide a more indicative performance for our real-world implementation. We will test for a dense setting where $p = 0.8$ and a sparse setting where $p = 0.2$.
#
# ## Algorithm
#
# For simulating graphs from the $ER(n, p) \times \mathcal{N}(0, 1)$ model, we will use the following approach:
# + for p in (0.2, 0.8):
# + for n in logspace(3, 10, len=10):
# + A = empty $n \times n$ numpy array
# + sample at random $n^2p$ indices from $A$, and assign each of these points weight $\mathcal{N}(0, 1)$
# + convert $A$ to a `networkx` graph `G` in time $t_\rightarrow$
# + measure the time to convert `G` back to a `numpy` matrix in time $t_\leftarrow$
# + record $t_{\rightarrow}$ and $t_{\leftarrow}$
# + repeat $z$ times
# +
ns = np.ceil(np.logspace(6, 11, num=10, base=2.0)).astype(int)
ps = [0.2, 0.8]
z = 20
tar = []; nar = []; par = []; zar = []; directions = []
for p in ps:
print("p: {:.2f}".format(p))
for n in ns:
print("n: {:d}".format(n))
for i in range(0, z):
if (i%10 == 0):
print("i: {:d}".format(i))
A = np.zeros((n, n))
nedge = int(round(n*n*p))
np.put(A, np.random.choice(np.arange(0, n*n), size=nedge, replace=False), np.random.normal(size=nedge))
t = time.time()
Gfor = nx.from_numpy_matrix(A)
ftime = time.time() - t
t = time.time()
Aback = nx.to_numpy_matrix(Gfor)
rtime = time.time() - t
for (t, d) in zip([ftime, rtime], ['A -> G', 'G -> A']):
tar.append(t); directions.append(d); nar.append(n); par.append(p); zar.append(i)
# -
results = pd.DataFrame({'time': tar, 'direction': directions, 'n': nar, 'p': par, 'z': zar})
results['direction'] = results['direction'].astype('category')
results['p'] = results['p'].astype('category')
(ggplot(results.loc[results['direction'] == 'G -> A'], aes(x='n', y='time', color='p', group='p')) +
geom_jitter() +
stat_summary(fun_data='mean_cl_boot', geom='line') +
theme_bw() +
xlab("Number of Vertices") +
ylab("Time (s)") +
ggtitle("Analyzing conversion performance G -> A"))
(ggplot(results.loc[results['direction'] == 'A -> G'], aes(x='n', y='time', color='p', group='p')) +
geom_jitter() +
stat_summary(fun_data='mean_cl_boot', geom='line') +
theme_bw() +
xlab("Number of Vertices") +
ylab("Time (s)") +
ggtitle("Analyzing conversion performance A -> G"))
| notebooks/ebridge2/912/profiling/profiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <span style="font-family:Papyrus; font-size:3em;">Lab: Cross Validation</span>
#
# This lab develops codes and the workflow for doing cross validation of tellurium models. The running example is a simple Antimony model.
IS_COLAB = False
#
if IS_COLAB:
# !pip install matplotlib
# !pip install numpy
# !pip install tellurium
# !pip install SBstoat
# !pip install lmfit
# !pip install seaborn
#
# Constants for standalone notebook
if not IS_COLAB:
CODE_DIR = "/home/ubuntu/advancing-biomedical-models/common"
DATA_DIR = "/home/ubuntu/advancing-biomedical-models/labs"
else:
from google.colab import drive
drive.mount('/content/drive')
CODE_DIR = "/content/drive/MyDrive/Winter 2021/common"
DATA_DIR = "/content/drive/MyDrive/Winter 2021/labs"
import sys
sys.path.insert(0, CODE_DIR)
# Make the packages usable in this notebook
import copy
import numpy as np
import pandas as pd
import tellurium as te
import matplotlib.pyplot as plt
from SBstoat.observationSynthesizer import ObservationSynthesizerRandomErrors
from SBstoat.namedTimeseries import NamedTimeseries, TIME
import lmfit
import util_crossvalidation as ucv
# # Preliminaries
# ## Model
MODEL = """
A -> B; k1*A
B -> C; k2*B
C -> D; k3*C
A = 10
B = 0
C = 0
D = 0
k1 = 0.1
k2 = 0.2
k3 = 0.3
"""
# ## Constants
NOISE_STD = 1.0
END_TIME = 20
NUM_POINT = 100
LOWER = 0.01
UPPER = 1
# Model data
PARAMETER_DCT = {
"k1": 0.1,
"k2": 0.2,
"k3": 0.3,
}
PARAMETER_NAMES = list(PARAMETER_DCT.keys())
ROAD_RUNNER = te.loada(MODEL)
dataArr = ROAD_RUNNER.simulate(0, END_TIME, NUM_POINT)
FITTED_TS = NamedTimeseries(namedArray=dataArr)
# ## Helper Functions
# +
# Generator that constructs indices for training and test data
# for alternating between folds.
def foldGenerator(numPoint, numFold):
"""
Generates pairs of trainining and test indices.
Parameters:
----------
numPoint: int
number of time points
numFold: int
number of pairs of testIndices and trainIndices
Returns:
--------
list of pairs of train indices, test indices
"""
indices = range(numPoint)
for remainder in range(numFold):
testIndices = []
for idx in indices:
if idx % numFold == remainder:
testIndices.append(idx)
trainIndices = list(set(indices).difference(testIndices))
yield trainIndices, testIndices
# Tests
numFold = 10
numPoint = 100
generator = foldGenerator(numPoint, numFold)
result = [(trainArr, testArr) for trainArr, testArr in generator]
assert(len(result) == numFold)
assert(isinstance(n, int) for n in result[0][0])
# +
def plotTS(timeseries, ax=None, linetype="scatter", title="", isPlot=True):
"""
Plots the variables in a timeseries.
Parameters
----------
timeseries: NamedTimeseries
ax: Matplotlib.axes
linetype: str
title: str
isPlot: bool
Show the plot
"""
if ax is None:
fig, ax = plt.subplots(1)
else:
fig = None
legend = []
for col in timeseries.colnames:
legend.append(col)
if linetype == "scatter":
_ = ax.scatter(timeseries[TIME], timeseries[col])
else:
_ = ax.plot(timeseries[TIME], timeseries[col])
_ = ax.set_xlabel("time")
_ = ax.set_title(title)
_ = plt.legend(legend)
if not isPlot:
if fig is not None:
fig.clear()
return ax
# Smoke Test
_ = plotTS(FITTED_TS, linetype="line", title="Fitted Data", isPlot=False)
# -
# ## Construct Synthetic Data
# +
def makeSyntheticData(fittedTS=FITTED_TS, std=NOISE_STD):
synthesizer = ObservationSynthesizerRandomErrors(fittedTS=fittedTS, std=std)
return synthesizer.calculate()
# Tests
observedTS = makeSyntheticData(fittedTS=FITTED_TS)
assert(len(observedTS) == len(FITTED_TS))
# -
# Plot the data
OBSERVED_TS = makeSyntheticData()
ax = plotTS(FITTED_TS, linetype="line")
_ = plotTS(OBSERVED_TS, ax=ax)
# # Writing a Fitter as a Class
# It is common that you need to adapt your analysis codes to new requirements.
# Cross validation imposes on fitting the ability to handle observational data that may not have all of the timepoints present in the
# fitted data (simulation results).
# Writing codes that are easy to adapt is an essential part of computational science.
# It turns out that object oriented programming (**OOP**) greatly facilitates writing readable, testable, and extensible software.
# This section provides a brief introdution to OOP and shows how to implement a fitter in OOP.
# ## Object Oriented Programming (OOP) in Brief
# OOP is probably the most widely used approach to software design and implementation in the software industry.
# It extends 3GL (third generation languages, those that have conditionals, for-loops, and functions) to consider
# a new concept called a ``class``.
#
# A ``class`` is an abstraction of a thing or entity, like a car.
# An entity has **attributes** that describe it.
# For example a car has a color, a number of doors, and an engine size.
# Each of these as a value associated with it, such as:
# * color: red
# * number of doors: 4
# * engine size: 1.2
# An entity also has **methods** which are operations that it can perform. For example,
# a car can start, stop, and accelerate.
#
# OOP strutures codes in terms of objects that are instances of classes.
# The class is a template.
# That is, a class specifies all of the attributes and methods, but not all of the attributes have values.
# For example, a car class has attributes for color, number of doors, and engine capacity, but there are no values.
# However, an instance of a car, might be red, with four doors, and a 1.2 L engine.
#
# A very powerful capability of OOP is **inheritance**.
# That is, we define a new class that has all of the attributes and methods of its parent and adds other attributes and methods.
# For example, there may be a base car class that specifies color and engine size, and then separate child classes
# for 2 doors and 4 doors.
# Inheritance allows us to isolate common codes and so promotes reuse.
# ## OOP In Python
# The python ``class`` keyword defines a new class.
# For example
#
# class Car():
# pass
#
# The definition of the class is the block of code that lies below the ``class`` statement
# and is indented.
# In this case, it is just ``pass``.
# An object the class is created by using the class as a function or **constructor**.
#
# car = Car()
#
# A class may have variables for each instance created.
# These are specified as arguments of the constructor.
# For example:
#
# car = Car(color, doors, engineSize)
#
# If there are such arguments, then there must be a special function named ``__init__`` that takes these arguments.
# That is, the class definition should look like:
#
# class Car():
#
# def __init__(self, color, doors, engineSize):
# self.color = color
# self.doors = doors
# self.engineSize = engineSize
#
# Here, the ``Car`` class has the constructor function ``__init__`` with four arguments.
# Three of the arguments appear when an object is constructed.
# The first argument, ``self``, is used to identify the object that is created, the new instance of the class.
#
# A class may have many methods (functions) that act on the object.
# For example,
#
# class Car():
#
# def __init__(self, color, doors, engineSize):
# self.color = color
# self.doors = doors
# self.engineSize = engineSize
#
# def stop(self):
# print("The %s car has stopped!" % self.color)
#
# def turn(self, direction):
# print("The %s car has turned %s" % (self.color, direction)
#
# The instance variables contained within the object and so provide a convenient way to encapsulate functions
# that share data.
# Below is another example of a class.
# A simple class
class Simple():
def __init__(self, a, b):
"""
a and b are used when constructing an instance of the class.
"""
self.a = a
self.b = b
self.result = None # placeholder
def add(self):
"""
calculaes the sum of a and b
"""
self.result = self.a + self. b
return self.result
# Using a class
simple = Simple(3, 4) # This calls the __init__ method, with a=3 and b=4
print(simple.add())
# You can also retrieve the result from the properties of the object
print(simple.result)
# ## Writing and Testing a Fitter Class
# +
class SimpleFitter(object):
def __init__(self, model, observedTS, parameterNames,
lower=LOWER, upper=UPPER, method="leastsq"):
"""
model: str (Antimony Model)
observedTS: NamedTimeseries
paramterNames: list-str
lower: float
lower range for parameter value
upper: float
upper range for parameter value
method: str
optimizer method
"""
self.rr = te.loada(model)
self.observedTS = observedTS.copy()
self.columns = list(self.observedTS.colnames)
self.allColumns = self.observedTS.allColnames
self.parameterNames = parameterNames
self.colnames = self.observedTS.colnames
self.lower = lower
self.upper = upper
self.value = (lower + upper)/2
self.method = method
# Internal variables
self._fittedArr = None
self._residuals = None
# Results
self.params = None
self.fittedTS = self.observedTS.copy()
self.residualsTS = None
def mkParams(self):
"""
Construct lmfit parameters for the parameters.
"""
parameters = lmfit.Parameters()
for parameterName in self.parameterNames:
parameters.add(parameterName,
min=self.lower,
max=self.upper,
value=self.value)
return parameters
def calcResiduals(self, params):
"""
Calculate residuals for the fit using the parameters.
Update self.fittedTS.
"""
self.rr.reset() # Put back to time zero
# Update the simulation parameters
for name, value in params.valuesdict().items():
self.rr[name] = value
fittedArr = self.rr.simulate(0, self.observedTS.end,
len(self.observedTS))
self._fittedArr = fittedArr.copy()
fittedArr = fittedArr[:, 1:] # Delete time column
observedArr = self.observedTS[self.colnames]
self._residualsArr = observedArr - fittedArr
residualsArr = self._residualsArr.flatten()
return residualsArr
def fit(self, params=None):
if params is None:
newParams = self.mkParams()
else:
newParams = params.copy()
# Find the best parameters
minimizer = lmfit.Minimizer(self.calcResiduals, newParams)
minimizerResult = minimizer.minimize(method=self.method, max_nfev=100)
# Record the results
self.fittedTS = NamedTimeseries(array=self._fittedArr, colnames=self.allColumns)
self.params = minimizerResult.params.copy()
self.calcResiduals(self.params) # Update the fitted and residuals
self.residualsTS = self.observedTS.copy()
self.residualsTS[self.columns] = self._residualsArr
# Tests
fitter = SimpleFitter(MODEL, OBSERVED_TS, PARAMETER_NAMES)
params = fitter.mkParams()
k1ParameterValue = params.valuesdict()[PARAMETER_NAMES[0]]
assert(np.isclose(k1ParameterValue, (LOWER+UPPER)/2))
#
dataArr = fitter.calcResiduals(params)
assert(np.shape(dataArr) == (NUM_POINT*4,))
#
fitter.fit()
fittedResultDct = fitter.params.valuesdict()
for parameterName, parameterValue in fittedResultDct.items():
#print(parameterName, parameterValue)
assert(np.abs(parameterValue - PARAMETER_DCT[parameterName]) < 0.1)
# -
# # Timestamp Alignment
# ## Problem description
# Suppose that we have observational data that has only a subset of the timestamps of the simulation data.
# How do we change ``SimpleFitter`` to accommodate this?
# +
observedSubTS = OBSERVED_TS[list(range(10))]
observedSubTS
# -
FITTED_TS
# We want to select the indices in ``FITTED_TS`` that have times corresonding to those in obseredSubTS.
observedSubTimes = observedSubTS[TIME]
observedSubTimes
fittedTSTimes = FITTED_TS[TIME]
fittedTSTimes
# Here's a hint to get started. Suppose we want to find the time value in fittedTSTimes that's closest to the 4th
# value in observedSubTime. This is a kind of distance measure.
(observedSubTimes[4] - fittedTSTimes)**2
# ## An Approach
# That is, we want to find the index for the array element that is 0.
# We can do this by sorting the indices of fittedTSTimes by their distance from designated time in observedSubTimes.
values = (observedSubTimes[4] - fittedTSTimes)**2
def finder(index):
return values[index]
sorted(range(len(values)), key=finder)
# +
def selectCompatibleIndices(bigTimes, smallTimes):
"""
Finds the indices such that smallTimes[n] is close to bigTimes[indices[n]]
Parameters
----------
bigTimes: np.ndarray
smalltimes: np.ndarray
Returns
np.ndarray
"""
indices = []
for idx in range(len(smallTimes)):
distances = (bigTimes - smallTimes[idx])**2
def getValue(k):
return distances[k]
thisIndices = sorted(range(len(distances)), key=getValue)
indices.append(thisIndices[0])
return np.array(indices)
# Tests
indices = selectCompatibleIndices(FITTED_TS[TIME], observedSubTS[TIME])
assert(len(indices) == len(observedSubTS))
for i, index in enumerate(indices):
assert(index == i)
# -
# ## Revise the residuals calculation
# The only method impacted is ``calcResiduals``.
# Let's figure this out incrementally.
# Start by doing the residuals calculation using ``observedSubTS`` and ``fittedArr``.
colnames = FITTED_TS.colnames
fittedArr = FITTED_TS[FITTED_TS.allColnames]
observedTS = observedSubTS.copy()
# Fragment of the residual calculation
#fittedArr = fittedArr[:, 1:]
indices = selectCompatibleIndices(FITTED_TS[TIME], observedTS[TIME])
observedArr = observedTS[colnames]
fittedSubArr = fittedArr[indices, 1:]
residualsArr = observedArr - fittedSubArr
residualsArr = residualsArr.flatten()
# # Fitting With Timestamp Alignment
# There's no assurance that the observational data have the same timestamps as the simulation data.
# Solving this problem in general requires some sophistication.
# For now, we will assume that the observational data contains a subset of the timestamps.
# +
# Modified calcResduals
class AligningFitter(SimpleFitter):
"""Does fitting with aligning fitted values to timestamps of observed values."""
def __init__(self, model, observedTS, parameterNames, endTime=None, numPoint=None, **kwargs):
"""
model: str (Antimony Model)
observedTS: NamedTimeseries
paramterNames: list-str
endTime: float
ending time for the simulation
numPoint: int
number of points in the simulation
"""
super().__init__(model, observedTS, parameterNames, **kwargs)
self.endTime = endTime
if self.endTime is None:
self.endTIme = observedTS.end
self.numPoint = numPoint
if self.numPoint is None:
self.numPoint = len(observedTS)
@staticmethod
def selectCompatibleIndices(bigTimes, smallTimes):
"""
Finds the indices such that smallTimes[n] is close to bigTimes[indices[n]]
Parameters
----------
bigTimes: np.ndarray
smalltimes: np.ndarray
Returns
np.ndarray
"""
indices = []
for idx in range(len(smallTimes)):
distances = (bigTimes - smallTimes[idx])**2
def getValue(k):
return distances[k]
thisIndices = sorted(range(len(distances)), key=getValue)
index = thisIndices[0]
if isinstance(index, np.generic):
index = np.asscalar(index)
indices.append(index)
return indices
def calcResiduals(self, params):
self.rr.reset() # Put back to time zero
# Update the simulation parameters
for name, value in params.valuesdict().items():
self.rr[name] = value
fittedArr = self.rr.simulate(0, self.endTime, self.numPoint)
self._fittedArr = fittedArr.copy()
indices = AligningFitter.selectCompatibleIndices(fittedArr[:, 0], self.observedTS[TIME])
fittedArr = fittedArr[indices, 1:] # Delete time column
observedArr = self.observedTS[self.colnames]
self._residualsArr = observedArr - fittedArr
residualsArr = self._residualsArr.flatten()
return residualsArr
# Tests
# selectCompatibleIndices
size = 10
randomIndices = np.random.randint(0, len(OBSERVED_TS), size)
indices = AligningFitter.selectCompatibleIndices(OBSERVED_TS[TIME], OBSERVED_TS[TIME][randomIndices])
diff = set(indices).symmetric_difference(randomIndices)
assert(len(diff) == 0)
# #
size = 100
observedTS = OBSERVED_TS[list(range(size))]
fitter = AligningFitter(MODEL, observedTS, PARAMETER_NAMES, endTime=OBSERVED_TS.end)
fitter.fit()
assert(len(fitter.observedTS) == size)
assert(len(fitter.params.valuesdict()) == 3)
# -
fitter.params
# +
def analyzeQuality(numSample):
"""
Analyzes the quality of a fit for the number of points in an observed timeseries.
Parameters
----------
numSample: int
Returns
-------
float: sum of squared differences from true parameter values
pd.DataFrame: values of true and estimated parameters
"""
TRUE = "true"
ESTIMATED = "estimated"
if len(OBSERVED_TS) < numSample:
raise ValueError("numSample cannot exceed %d" % len(OBSERVED_TS))
# Select a random sample of the observational data
numPoint = len(OBSERVED_TS)
randomIndices = np.random.permutation(list(range(numPoint)))
randomIndices = randomIndices[:numSample]
observedSubTS = OBSERVED_TS[randomIndices.tolist()]
# Fit the data
fitter = AligningFitter(MODEL, observedSubTS, PARAMETER_NAMES,
numPoint=numPoint, endTime=OBSERVED_TS.end)
fitter.fit()
# Evaluate the fit quality
dct = {TRUE: PARAMETER_DCT.values(), ESTIMATED: fitter.params.valuesdict().values()}
df = pd.DataFrame(dct)
df.index = PARAMETER_DCT.keys()
trueValues = np.array(df[TRUE])
estimatedValues = np.array(df[ESTIMATED])
rsq = 1 - np.var(trueValues-estimatedValues)/np.var(trueValues)
rsq = max(0, rsq)
return rsq, df
# Tests
rsq, df = analyzeQuality(100)
assert(rsq > 0.95)
assert(len(df) == len(PARAMETER_DCT))
# -
rsq, df = analyzeQuality(10)
print("SSQ Difference: %2.4f" % rsq)
print("\nValues\n%s" % str(df))
# # Cross Validation for Simulation Models
# ## Pseudo Code
# 1. Create training and test data for the folds
# 1. For the training and test data
# 1. Fit to training data
# 1. predictedValues = Fitted values that correspond to test data
# 1. Calculate $R^2$ from test data and predictedValues
# 1. Report the parameters and $R^2$ for each fold.
# ## Implementation
# +
class CrossValidator():
"""Performs cross validation using parameter fitting. Reports parameters by fold and R2."""
# Dataframe columns and dictionary keys
PREDICTED = "predicted"
TRUE = "true"
FOLD = "fold"
RSQ = "rsq"
PARAMETER = "parameter"
def __init__(self, numFold, model, observedTS, parameterNames, trueParameterDct=None, **kwargs):
"""
numFold: int
number of folds
model: str (Antimony Model)
observedTS: NamedTimeseries
paramterNames: list-str
trueParameterDct: dict
key: parameter name, value: parameter value
kwargs: dict
optional arguments passed to fitter
"""
self.numFold = numFold
self.model = model
self.observedTS = observedTS.copy()
self.parameterNames = parameterNames
self.colnames = self.observedTS.colnames
self.kwargs = kwargs
self.trueParameterDct = trueParameterDct
self.parametersCol = []
self.rsqs = []
@staticmethod
def _calcRsq(observedTS, fittedTS):
columns = observedTS.colnames
residualsArr = observedTS[columns] - fittedTS[columns]
rsq = 1 - np.var(residualsArr)/np.var(observedTS[columns])
return rsq
def execute(self):
numPoint = len(self.observedTS)
generator = foldGenerator(numPoint, self.numFold)
for trainIndices, testIndices in generator:
fitter = AligningFitter(self.model, self.observedTS[trainIndices],
self.parameterNames, endTime=self.observedTS.end,
numPoint=numPoint, **self.kwargs)
fitter.fit()
self.parametersCol.append(fitter.params.copy())
rsq = self._calcRsq(self.observedTS[testIndices], fitter.fittedTS[testIndices])
self.rsqs.append(rsq)
def reportParameters(self):
"""
Constructs a report for the parameter values by fold.
Returns
-------
pd.DataFrame
"""
if self.trueParameterDct is None:
raise ValueError("Must construct CrossValidator with trueParameterDct")
# Construct parameter information
keys = [CrossValidator.FOLD, CrossValidator.TRUE, CrossValidator.PREDICTED, CrossValidator.PARAMETER]
dct = {}
for key in keys:
dct[key] = []
for fold in range(len(self.parametersCol)):
for parameterName in self.parameterNames:
dct[CrossValidator.FOLD].append(fold)
dct[CrossValidator.PARAMETER].append(parameterName)
dct[CrossValidator.TRUE].append(self.trueParameterDct[parameterName])
dct[CrossValidator.PREDICTED].append(self.parametersCol[fold].valuesdict()[parameterName])
reportDF = pd.DataFrame(dct)
#
return reportDF
def reportRsq(self):
return pd.DataFrame({CrossValidator.RSQ: self.rsqs})
# TESTS
# _calcRsq
rsq = CrossValidator._calcRsq(OBSERVED_TS, OBSERVED_TS)
assert(np.isclose(rsq, 1.0))
# execute
numFold = 5
validator = CrossValidator(numFold, MODEL, OBSERVED_TS, PARAMETER_NAMES,
trueParameterDct=PARAMETER_DCT)
validator.execute()
assert(len(validator.rsqs) == numFold)
assert(len(validator.parametersCol) == numFold)
assert(isinstance(validator.parametersCol[0], lmfit.Parameters))
# reportParameters
df = validator.reportParameters()
for key in [CrossValidator.FOLD, CrossValidator.TRUE, CrossValidator.PREDICTED, CrossValidator.PARAMETER]:
assert(key in df.columns)
assert(len(df) > 0)
# reportRsq
df = validator.reportRsq()
assert(CrossValidator.RSQ in df.columns)
assert(len(df) > 0)
# -
# # Analysis With Cross Validation
for numFold in [5, 10, 50, 100]:
validator = CrossValidator(numFold, MODEL, OBSERVED_TS, PARAMETER_NAMES, trueParameterDct=PARAMETER_DCT)
validator.execute()
averageRsq = np.mean(validator.rsqs)
print("Num Fold: %d Avg Rsq: %2.2f" % (numFold, averageRsq))
# **Questions**
# 1. Why does average $R^2$ *decrease* with the number of folds?
# 1. What happens if there is more variablility in the data? Less variability in the data?
# 1. Write a new script that evaluates the combined effects of the number folds and the noise in the observed time series.
# +
def analyzeNoiseFold(noiseStds, numFolds):
"""
Evaluates the impact on R2 of the number of noise std and the number of folds
Parameters
----------
noiseStds: list-float
numFolds: list-int
Returns
-------
pd.DataFrame
columns: std, numfold, avgrsq
"""
STD = "std"
NUMFOLD = "numfold"
RSQ = "avgrsq"
COLUMNS = [STD, NUMFOLD, RSQ]
#
dct = {c: [] for c in COLUMNS}
#
for std in noiseStds:
observedTS = makeSyntheticData(std=std)
for numFold in numFolds:
validator = CrossValidator(numFold, MODEL, observedTS, PARAMETER_NAMES, trueParameterDct=PARAMETER_DCT)
validator.execute()
averageRsq = np.mean(validator.rsqs)
dct[STD].append(std)
dct[NUMFOLD].append(numFold)
dct[RSQ].append(averageRsq)
return pd.DataFrame(dct)
# Tests
df = analyzeNoiseFold([0.5, 1.5], [5, 10, 100])
assert(len(df) > 0)
assert(sum(df["avgrsq"]) > 0)
# -
df
| labs/Lab_Cross_Validation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Interpolating Metric Quantities on Cell Faces
#
# ## Author: <NAME>
#
# **Notebook Status:** <font color='green'><b>Validated</b></font>
#
# **Validation Notes:** This module will be self-validated against [its module](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_FCVAL.py) and will also be validated against the corresponding algorithm in the old `GiRaFFE` code in [this tutorial](Tutorial-Start_to_Finish-GiRaFFE_NRPy-FCVAL.ipynb).
#
# # This module presents the functionality of [GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) .
# This notebook presents the macros from the original `GiRaFFE` that provide the values of the metric gridfunctions interpolated to the cell faces along with the code needed to implement this in NRPy.
#
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 0. [Step 0](#prelim): Preliminaries
# 1. [Step 1](#interpolator): The Interpolator
# 1. [Step 1.a](#macros): Interpolator coefficients and definition
# 1. [Step 1.b](#gf_struct): Create an array to easily define the gridfunctions we want to interpolate.
# 1. [Step 1.c](#loops): Define the loop parameters and body
# 1. [Step 2](#code_validation): Code Validation against original C code
# 1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='prelim'></a>
#
# # Step 0: Preliminaries \[Back to [top](#toc)\]
# $$\label{prelim}$$
#
# This first block of code just sets up a subdirectory within `GiRaFFE_standalone_Ccodes/` to which we will write the C code and adds core NRPy+ functionality to `sys.path`.
# +
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
from outputC import * # NRPy+: Core C code output module
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
Ccodesdir = "GiRaFFE_standalone_Ccodes/FCVAL"
cmd.mkdir(os.path.join(Ccodesdir))
# -
# <a id='interpolator'></a>
#
# # Step 1: The Interpolator \[Back to [top](#toc)\]
# $$\label{interpolator}$$
#
# Here, we we will write the code necessary to interpolate the metric gridfunction $\alpha, \beta^i, \gamma_{ij}$ onto the cell faces. These values will be necessary to compute fluxes of the Poynting vector and electric field through those faces.
#
# <a id='macros'></a>
#
# ## Step 1.a: Interpolator coefficients and definition \[Back to [top](#toc)\]
# $$\label{macros}$$
#
# First, we will define the functional form of our interpolator. At some point on our grid $i$, we will calculate the value of some gridfunction $Q$ at position $i-\tfrac{1}{2}$ with
# $$
# Q_{i-1/2} = a_{i-2} Q_{i-2} +a_{i-1} Q_{i-1} +a_{i} Q_{i} +a_{i+1} Q_{i+1}
# $$
# and the coefficients we will use for it,
# \begin{align}
# a_{i-2} &= -0.0625 \\
# a_{i-1} &= 0.5625 \\
# a_{i} &= 0.5625 \\
# a_{i+1} &= -0.0625 \\
# \end{align}.
# %%writefile $Ccodesdir/interpolate_metric_gfs_to_cell_faces.h
// Side note: the following values could be used for cell averaged gfs:
// am2=-1.0/12.0, am1=7.0/12.0, a0=7.0/12.0, a1=-1.0/12.0
// However, since the metric gfs store the grid point values instead of the cell average,
// the following coefficients should be used:
// am2 = -1/16, am1 = 9/16, a0 = 9/16, a1 = -1/16
// This will yield the third-order-accurate face values at m-1/2,
// using values specified at {m-2,m-1,m,m+1}
#define AM2 -0.0625
#define AM1 0.5625
#define A0 0.5625
#define A1 -0.0625
#define COMPUTE_FCVAL(METRICm2,METRICm1,METRIC,METRICp1) (AM2*(METRICm2) + AM1*(METRICm1) + A0*(METRIC) + A1*(METRICp1))
# <a id='gf_struct'></a>
#
# ## Step 1.b: Create an array to easily define the gridfunctions we want to interpolate. \[Back to [top](#toc)\]
# $$\label{gf_struct}$$
#
# We will need to apply this interpolation to each gridpoint for several gridfunctions: the lapse $\alpha$, the shift $\beta^i$, and the three-metric $\gamma_{ij}$. Consider that in NRPy+, each gridfunction is assigned an integer identifier with the C macro `#define`. So, the simplest (and shortest to write!) way to ensure we hit each of these is to create arrays that list each of these identifiers in order, so we can always hit the write gridfunction no matter where each gridfunction lies in the list. We use two arrays; the first identifies the usual gridfunctions from which we will read, and the second identifies the face-sampled gridfunctions to which we will write.
# +
# %%writefile -a $Ccodesdir/interpolate_metric_gfs_to_cell_faces.h
const int metric_gfs_list[10] = {GAMMADD00GF,
GAMMADD01GF,
GAMMADD02GF,
GAMMADD11GF,
GAMMADD12GF,
GAMMADD22GF,
BETAU0GF,
BETAU1GF,
BETAU2GF,
ALPHAGF};
const int metric_gfs_face_list[10] = {GAMMA_FACEDD00GF,
GAMMA_FACEDD01GF,
GAMMA_FACEDD02GF,
GAMMA_FACEDD11GF,
GAMMA_FACEDD12GF,
GAMMA_FACEDD22GF,
BETA_FACEU0GF,
BETA_FACEU1GF,
BETA_FACEU2GF,
ALPHA_FACEGF};
const int num_metric_gfs = 10;
# -
# <a id='loops'></a>
#
# ## Step 1.c: Define the loop parameters and body \[Back to [top](#toc)\]
# $$\label{loops}$$
#
# Next, we will write the function that loops over the entire grid. One additional parameter to consider here is the direction in which we need to do the interpolation. This direction exactly corresponds to the parameter `flux_dirn` used in the calculation of the flux of the [Poynting vector](Tutorial-GiRaFFE_NRPy-Stilde-flux.ipynb) and [electric field](Tutorial-GiRaFFE_NRPy-Induction_Equation.ipynb).
#
# The outermost loop will iterate over the gridfunctions we listed above. Nested inside of that, there will be three loops that go through the grid in the usual way. However, the upper bound will be a little unusual. Instead of covering all points or all interior points, we will write these loops to cover all interior points *and one extra past that*. This is because we have define our interpolation on the $i-\tfrac{1}{2}$ face of a cell, but any given calculation will require both that and an interolation on the $i+\tfrac{1}{2}$ face as well.
# +
desc = "Interpolate metric gridfunctions to cell faces"
name = "interpolate_metric_gfs_to_cell_faces"
interp_Cfunc = outCfunction(
outfile = "returnstring", desc=desc, name=name,
params ="const paramstruct *params,REAL *auxevol_gfs,const int flux_dirn",
preloop =""" int in_gf,out_gf;
REAL Qm2,Qm1,Qp0,Qp1;
""" ,
body =""" for(int gf = 0;gf < num_metric_gfs;gf++) {
in_gf = metric_gfs_list[gf];
out_gf = metric_gfs_face_list[gf];
for (int i2 = NGHOSTS;i2 < Nxx2+NGHOSTS+1;i2++) {
for (int i1 = NGHOSTS;i1 < Nxx1+NGHOSTS+1;i1++) {
for (int i0 = NGHOSTS;i0 < Nxx0+NGHOSTS+1;i0++) {
Qm2 = auxevol_gfs[IDX4S(in_gf,i0-2*kronecker_delta[flux_dirn][0],i1-2*kronecker_delta[flux_dirn][1],i2-2*kronecker_delta[flux_dirn][2])];
Qm1 = auxevol_gfs[IDX4S(in_gf,i0-kronecker_delta[flux_dirn][0],i1-kronecker_delta[flux_dirn][1],i2-kronecker_delta[flux_dirn][2])];
Qp0 = auxevol_gfs[IDX4S(in_gf,i0,i1,i2)];
Qp1 = auxevol_gfs[IDX4S(in_gf,i0+kronecker_delta[flux_dirn][0],i1+kronecker_delta[flux_dirn][1],i2+kronecker_delta[flux_dirn][2])];
auxevol_gfs[IDX4S(out_gf,i0,i1,i2)] = COMPUTE_FCVAL(Qm2,Qm1,Qp0,Qp1);
}
}
}
}
""",
rel_path_for_Cparams=os.path.join("../"))
with open(os.path.join(Ccodesdir,"interpolate_metric_gfs_to_cell_faces.h"),"a") as file:
file.write(interp_Cfunc)
# -
# <a id='code_validation'></a>
#
# # Step 2: Code Validation against `GiRaFFE_NRPy_FCVAL.py` \[Back to [top](#toc)\]
# $$\label{code_validation}$$
#
# Now, we will confirm that the code we have written here is the same as that generated by the module [`GiRaFFE_NRPy_FCVAL.py`](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_FCVAL.py).
# +
# Define the directory that we wish to validate against:
valdir = "GiRaFFE_NRPy/GiRaFFE_Ccode_library/FCVAL/"
import GiRaFFE_NRPy.GiRaFFE_NRPy_Metric_Face_Values as FCVAL
FCVAL.GiRaFFE_NRPy_FCVAL(valdir)
import difflib
import sys
print("Printing difference between original C code and this code...")
# Open the files to compare
file = "interpolate_metric_gfs_to_cell_faces.h"
print("Checking file " + file)
with open(os.path.join(valdir,file)) as file1, open(os.path.join(Ccodesdir,file)) as file2:
# Read the lines of each file
file1_lines = file1.readlines()
file2_lines = file2.readlines()
num_diffs = 0
for line in difflib.unified_diff(file1_lines, file2_lines, fromfile=os.path.join(valdir+file), tofile=os.path.join(Ccodesdir+file)):
sys.stdout.writelines(line)
num_diffs = num_diffs + 1
if num_diffs == 0:
print("No difference. TEST PASSED!")
else:
print("ERROR: Disagreement found with .py file. See differences above.")
# -
# <a id='latex_pdf_output'></a>
#
# # Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-GiRaFFE_NRPy-FCVAL.pdf](Tutorial-GiRaFFE_NRPy-FCVAL.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb
# !pdflatex -interaction=batchmode Tutorial-GiRaFFE_NRPy-Metric_Face_Values.tex
# !pdflatex -interaction=batchmode Tutorial-GiRaFFE_NRPy-Metric_Face_Values.tex
# !pdflatex -interaction=batchmode Tutorial-GiRaFFE_NRPy-Metric_Face_Values.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
| in_progress/Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import glob
import pandas as pd
import seaborn as sns
for i in glob.glob('/home/jupyter-zyh/Gnhe/analysis_profile/global_setting/Integer/Texas/profile-1/Texas*.csv'):
print(i)
index = '0.' + i.split('.')[1]
print(index)
unnormalized = pd.read_csv(i,index_col=0)
unnormalized['normalize'] = False
normalized = pd.read_csv(
'/home/jupyter-zyh/Gnhe/analysis_setting/normalize/Integer/Texas_Int_Renewable1_1092Days_168HrsPerPeriod_{}.csv'.format(index),
index_col=0)
df_out = pd.concat([normalized,unnormalized])
df_out['mae_ex_tc'] = df_out[['renewable_cap_0_err','renewable_cap_1_err','N_err','max_energy_err','max_power_err']].mean(axis=1)
df_out.to_csv('/home/jupyter-zyh/Gnhe/analysis_setting/normalize/Integer/compare/Texas_Renewable1_1092Days_168HrsPerPeriod_{}.csv'.format(index))
df_list = []
for i in glob.glob('/home/jupyter-zyh/Gnhe/analysis_setting/normalize/Integer/compare/Texas*.csv'):
index = i.split('.')[0].split('_')[-1]
temp = pd.read_csv(i,index_col=0)
temp['index'] = index
df_list.append(temp)
df = pd.concat(df_list,ignore_index=True)
df.to_csv('/home/jupyter-zyh/Gnhe/analysis_setting/normalize/Integer/compare/all.csv')
p = sns.catplot(data=df,row='trial',col='method',x='ncluster',y='mae_ex_tc',hue='normalize',kind='box')
| analysis_setting/normalization/merge_normalize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Achieve AD using a deep CAE generative model: train the generative model on supposedly normal data and use the reconstruction error as the AD score.
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# +
import torch
import torchvision
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import pandas as pd
from seaborn import boxplot, heatmap
import random
from torch.optim.lr_scheduler import MultiStepLR
from utils import *
from networks import *
from centroids import *
from batchscores import *
from epochscores import *
# -
nbr_epochs = 350 # paper says 250 + 100
batch_size = 512 # paper says 200
# dataset = 'MNIST'
dataset = 'FashionMNIST'
# dataset = 'CIFAR10'
normal_cls = [0,1,2]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
learning_rate = 1e-4
nbr_seeds = 3 # how many different random init to try
valid_AUCs = torch.zeros((nbr_seeds, nbr_epochs+1))
test_AUCs = torch.zeros((nbr_seeds, nbr_epochs+1))
losses = torch.zeros((nbr_seeds, nbr_epochs))
for seed_idx in range(nbr_seeds):
torch.manual_seed(seed_idx)
random.seed(seed_idx)
np.random.seed(seed_idx)
train_loader, valid_loader, test_loader, _, _, _, _, _, _ = get_dataloaders_MNIST_FashionMNIST(batch_size, normal_cls, dataset, seed=seed_idx)
if dataset == 'MNIST':
net = MNIST_LeNet_Autoencoder().to(device)
elif dataset == 'FashionMNIST':
net = FashionMNIST_LeNet_Autoencoder().to(device)
else:
net = CIFAR10_LeNet_Autoencoder().to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=1e-6)
scheduler = MultiStepLR(optimizer, milestones=[250], gamma=0.1)
pytorch_total_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
valid_AUCs[seed_idx,0], test_AUCs[seed_idx,0], _, _ = get_epoch_performances_baseline_CAE(valid_loader, test_loader, device, net, normal_cls)
for epoch in tqdm(range(nbr_epochs), position=0, leave=True):
net.train()
running_loss = 0.0
for i, (data,targets) in enumerate(train_loader, 0):
inputs, labels = data.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
reconstruction_error = torch.sum((outputs - inputs) ** 2, dim=tuple((range(1, outputs.dim()))))
loss = torch.mean(reconstruction_error)
loss.backward()
optimizer.step()
running_loss += loss.item()
losses[seed_idx,epoch] = running_loss
valid_AUCs[seed_idx,epoch+1],test_AUCs[seed_idx,epoch+1], scores_test, scores_labels_test = get_epoch_performances_baseline_CAE(valid_loader, test_loader, device, net, normal_cls)
scheduler.step()
save_pretrained_weights(net, dataset, normal_cls, seed_idx)
plt.figure(figsize=(10, 10))
boxplot(y=scores_test, x=scores_labels_test)
plt.xlabel('{} class'.format(dataset))
plt.ylabel('Reconstruction error after training') # minimum over all centroids for test samples
plt.title('Normal class: {} - LAST SEED EXPERIMENT ONLY'.format(normal_cls))
plt.show()
plt.figure()
for seed_idx in range(nbr_seeds):
plt.plot(range(nbr_epochs), losses[seed_idx,:], linestyle=':')
plt.xlabel('Epoch')
plt.ylabel('Mean reconstruction error')
plt.title('Training loss (normal class: {})'.format(normal_cls))
# +
plt.figure(figsize=(20, 15))
plt.subplot(1,2,1)
for seed_idx in range(nbr_seeds):
plt.scatter(range(0, nbr_epochs+1), test_AUCs[seed_idx,:], alpha=0.6, s=30)
plt.ylim(0.5,0.99)
plt.xlabel('Epoch')
plt.ylabel('Test AUC')
plt.title('Test AUC - normal class: {} - {} trainable params'.format(normal_cls, pytorch_total_params))
plt.subplot(1,2,2)
for seed_idx in range(nbr_seeds):
plt.scatter(range(0, nbr_epochs+1), valid_AUCs[seed_idx,:], alpha=0.6, s=30)
plt.ylim(0.5,0.99)
plt.xlabel('Epoch')
plt.ylabel('Valid AUC')
plt.title('Valid AUC - normal class: {} - {} trainable params'.format(normal_cls, pytorch_total_params))
# -
test_AUC_at_best_valid = torch.gather(test_AUCs,1,torch.max(valid_AUCs, dim=1)[1].long().unsqueeze(1))
test_AUC_at_best_test = torch.gather(test_AUCs,1,torch.max(test_AUCs, dim=1)[1].long().unsqueeze(1))
print("MEAN TEST AUC +/- STD: {} +/- {}".format(torch.mean(test_AUC_at_best_valid),torch.std(test_AUC_at_best_valid)))
with open('last_results.txt', "a") as file:
file.write("DeepCAE-{}-Norm{}-BSize{}-LR{}-{}epochs-{}seeds - MEAN TEST AUC (best valid epoch) +/- STD: {} +/- {} | MEAN TEST AUC (best test epoch) +/- STD: {} +/- {}\n".format(dataset, normal_cls, batch_size, learning_rate, nbr_epochs, nbr_seeds, torch.mean(test_AUC_at_best_valid),torch.std(test_AUC_at_best_valid),torch.mean(test_AUC_at_best_test),torch.std(test_AUC_at_best_test)))
| DeepCAE_baseline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # These are the Cartography visuals.
# # Imports Section
import sys
sys.path.extend(["../notebooks/scripts/"])
# +
import altair as alt
from altair_saver import save
from augur.utils import json_to_tree
import json
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import re
#from reportlab.graphics import renderPDF
import seaborn as sns
#from svglib.svglib import svg2rlg
from Helpers import linking_tree_with_plots_clickable, linking_tree_with_plots_brush, scatterplot_with_tooltip_interactive
from Helpers import get_y_positions, get_euclidean_data_frame
# #%matplotlib inline
# -
alt.renderers.set_embed_options(
padding={"left": 0, "right": 0, "bottom": 1, "top": 1}
)
sns.set_style("ticks")
# Disable top and right spines.
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.spines.right'] = False
# Display and save figures at higher resolution for presentations and manuscripts.
mpl.rcParams['savefig.dpi'] = 300
mpl.rcParams['figure.dpi'] = 100
# Display text at sizes large enough for presentations and manuscripts.
mpl.rcParams['font.weight'] = "normal"
mpl.rcParams['axes.labelweight'] = "normal"
mpl.rcParams['font.size'] = 10
mpl.rcParams['axes.labelsize'] = 10
mpl.rcParams['legend.fontsize'] = 8
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 10
mpl.rcParams['axes.titlesize'] = 8
mpl.rc('text', usetex=False)
try:
snakemake.input.node_df
import selenium
from selenium.webdriver import Chrome
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("--remote-debugging-port=9222")
browser = webdriver.Chrome(options=chrome_options)
except:
print("not in Snakemake, imports unnecessary")
# ## Pathogen-specific variables
#
# Consider consolidating these into a single configuration file that can be passed to the notebook as a command line argument for more scriptable generation of these figures.
# # Flu Specific Variables
# +
try:
node_df_ha = snakemake.input.node_df_ha,
pca_df_ha = snakemake.input.pca_df_ha,
explained_variance_pca_ha = snakemake.input.explained_variance_pca_ha,
pca_df_concatenated = snakemake.input.pca_df_concatenated,
explained_variance_pca_concatenated = snakemake.input.explained_variance_pca_concatenated,
mds_df_ha = snakemake.input.mds_df_ha,
mds_df_concatenated = snakemake.input.mds_df_concatenated,
mds_df_ma_concatenated = snakemake.input.mds_df_ma_concatenated,
tsne_df_ha = snakemake.input.tsne_df_ha,
tsne_df_concatenated = snakemake.input.tsne_df_concatenated,
tsne_df_ma_concatenated = snakemake.input.tsne_df_ma_concatenated,
umap_df_ha = snakemake.input.umap_df_ha,
umap_df_ma_concatenated = snakemake.input.umap_df_ma_concatenated,
except:
print("not in Snakemake, imports unnecessary")
# + active=""
# clades_to_plot = ['3c2', '3c2.A', '3c3.A', 'A1','A1b', 'A1b/131K','A1b/135K', 'A1b/135N', 'A1b/137F', 'A1b/186D', 'A1b/197R', 'A1b/94N', 'A2', 'A2/re', 'A3']
# domain = ['3c', '3c2', '3c2.A', '3c3', '3c3.A', 'A1', 'A1a', 'A1b', 'A1b/131K','A1b/135K', 'A1b/135N', 'A1b/137F', 'A1b/186D', 'A1b/197R', 'A1b/94N', 'A2', 'A2/re', 'A3', 'A4']
# range_ = ['#4e38d5', '#6626d4', '#4138c3', '#4c89e8', '#4e70ff', '#5499ff', '#79c9a1', '#61b8f0', '#5499ff', '#87dfb3','#a0e994', '#bdee78', '#ddee64', '#f8e957', '#ffdb4e', '#ffc348', '#ff9e40', '#ff6e36', '#f93529']
#
# + active=""
# #!/usr/bin/env python3
#
# import itertools
# import pandas as pd
#
# methods = ["pca", "mds", "t-sne", "umap"]
# distance_thresholds = range(0, 16, 2)
#
# columns = (
# "distance_threshold",
# )
#
# tsne_parameters = itertools.product(
# distance_thresholds
# )
# tsne_df = pd.DataFrame(
# tsne_parameters,
# columns=columns
# )
# tsne_df["method"] = "t-sne"
#
# columns = (
# "distance_threshold",
# )
# umap_parameters = itertools.product(
# distance_thresholds
# )
# umap_df = pd.DataFrame(
# umap_parameters,
# columns=columns
# )
# umap_df["method"] = "umap"
#
# # MDS
# mds_df = pd.DataFrame({
# "method": "mds",
# "distance_threshold": distance_thresholds
# })
#
# # PCA
# pca_df = pd.DataFrame({
# "method": "pca",
# "distance_threshold": distance_thresholds
# })
#
# # Collect methods parameters.
# df = pd.concat([
# pca_df,
# mds_df,
# tsne_df,
# umap_df,
# ], ignore_index=True)
#
# # Maintain the same column order.
# columns = (
# "distance_threshold",
# "method",
# )
#
# df.to_csv(
# "config/method_parameters.tsv",
# sep="\t",
# index=False,
# na_rep="N/A",
# columns=columns,
# )
#
# -
# # Reading in all the data from the scripts
# +
import os
os.getcwd()
# -
colors = pd.read_csv("../notebooks/config/color_schemes.tsv", sep="\t", names=[i for i in range(0,101)])
#node_df = pd.read_csv(node_df_ha, sep="\t")
node_df_ha = pd.read_csv("results/table_ha.tsv", sep="\t")
node_df_ha.rename(columns={'num_date':'date', 'y_value':"y"}, inplace=True)
node_df_ha.head()
# # HDBSCAN project:
# - cluster on HA, find MCC value, same for HA+NA (from cluster_results script)
# - check if HA+NA MCC > HA only
# ## MDS
MDS_df_ha = pd.read_csv("results/embed_mds_ha.csv",index_col=0)
MDS_df_concatenated = pd.read_csv("results/embed_mds_concatenated.csv",index_col=0)
MDS_df_ma_concatenated = pd.read_csv("results/embed_mds_ma_concatenated.csv", index_col=0)
merged_mds_df_ha = MDS_df_ha.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
merged_mds_df_concatenated = MDS_df_concatenated.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
merged_mds_df_ma_concatenated = MDS_df_ma_concatenated.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
KDE_df_normal = get_euclidean_data_frame(sampled_df=merged_mds_df_ha, column_for_analysis="clade_membership", embedding="method", column_list=['mds1', 'mds2'])
domain = merged_mds_df_ha["mds_label"].drop_duplicates().values
range_ = colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
chart_12_mds = scatterplot_with_tooltip_interactive(merged_mds_df_ha,'mds1','mds2',"mds1","mds2",['strain','clade_membership'],'mds_label:N', domain, range_)
chart_12_mds
from sklearn.metrics import confusion_matrix, matthews_corrcoef
KDE_df_cluster = get_euclidean_data_frame(sampled_df=merged_mds_df_ha[["mds1", "mds2", "strain", "mds_label"]], column_for_analysis="mds_label", embedding="mds", column_list=["mds1", "mds2"])
confusion_matrix_val_ha = confusion_matrix(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
matthews_cc_val_ha = matthews_corrcoef(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
KDE_df_cluster = get_euclidean_data_frame(sampled_df=merged_mds_df_concatenated[["mds1", "mds2", "strain", "mds_label"]], column_for_analysis="mds_label", embedding="mds", column_list=["mds1", "mds2"])
confusion_matrix_val_concatenated = confusion_matrix(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
matthews_cc_val_concatenated = matthews_corrcoef(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
KDE_df_cluster = get_euclidean_data_frame(sampled_df=merged_mds_df_ma_concatenated[["mds1", "mds2", "strain", "mds_label"]], column_for_analysis="mds_label", embedding="mds", column_list=["mds1", "mds2"])
confusion_matrix_val_ma_concatenated = confusion_matrix(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
matthews_cc_val_ma_concatenated = matthews_corrcoef(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
#domain = merged_mds_df_ha["mds_label"].drop_duplicates().values
domain = sorted(merged_mds_df_ha["mds_label"].drop_duplicates().values)
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
list_of_chart_ha = linking_tree_with_plots_brush(merged_mds_df_ha,['mds1','mds2'],["MDS1", "MDS2"], 'mds_label:N', ['strain','clade_membership'], domain, range_)
chart_ha = list_of_chart_ha[0]|list_of_chart_ha[1].properties(title="HA only MCC: " + str(round(matthews_cc_val_ha,4)))
#domain = merged_mds_df_concatenated["mds_label"].drop_duplicates().values
domain = sorted(merged_mds_df_concatenated["mds_label"].drop_duplicates().values)
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
list_of_chart_concatenated = linking_tree_with_plots_brush(merged_mds_df_concatenated,['mds1','mds2'],["MDS1", "MDS2"], 'mds_label:N', ['strain','clade_membership'], domain, range_)
chart_concat = list_of_chart_concatenated[0]|list_of_chart_concatenated[1].properties(title="HA + NA MCC: " + str(round(matthews_cc_val_concatenated,4)))
alt.vconcat(chart_ha, chart_concat).resolve_scale(color='independent')
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
list_of_chart_concatenated = linking_tree_with_plots_brush(merged_mds_df_ma_concatenated,['mds1','mds2'],["MDS1", "MDS2"], 'mds_label:N', ['strain','clade_membership'], domain, range_)
chart_concat_ma = list_of_chart_concatenated[0]|list_of_chart_concatenated[1].properties(title="HA + NA + MA MCC: " + str(round(matthews_cc_val_ma_concatenated,4)))
final_chart = alt.vconcat(chart_ha, chart_concat, chart_concat_ma).resolve_scale(color='independent')
save(final_chart, "../docs/HANAMAFullChartBrushableMDS.html")
save(final_chart, "../docs/HANAMAFullChartBrushableMDS.png", scale_factor=2.0)
# ## HDBSCAN clustering on t-SNE
TSNE_df_ha = pd.read_csv("results/embed_t-sne_ha.csv",index_col=0)
TSNE_df_concatenated = pd.read_csv("results/embed_t-sne_concatenated.csv",index_col=0)
TSNE_df_ma_concatenated = pd.read_csv("results/embed_t-sne_ma_concatenated.csv", index_col=0)
merged_tsne_df_ha = TSNE_df_ha.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
merged_tsne_df_concatenated = TSNE_df_concatenated.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
merged_tsne_df_ma_concatenated = TSNE_df_ma_concatenated.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
KDE_df_normal = get_euclidean_data_frame(sampled_df=merged_tsne_df_ha, column_for_analysis="clade_membership", embedding="method", column_list=['tsne_x', 'tsne_y'])
domain = merged_tsne_df_ha["t-sne_label"].drop_duplicates().values
range_ = colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
chart_12_tsne = scatterplot_with_tooltip_interactive(merged_tsne_df_ha,'tsne_x','tsne_y',"tsne_x","tsne_y",['strain','clade_membership'],'t-sne_label:N', domain, range_)
chart_12_tsne
from sklearn.metrics import confusion_matrix, matthews_corrcoef
KDE_df_cluster = get_euclidean_data_frame(sampled_df=merged_tsne_df_ha[["tsne_x", "tsne_y", "strain", "t-sne_label"]], column_for_analysis="t-sne_label", embedding="tsne", column_list=["tsne_x", "tsne_y"])
confusion_matrix_val_ha = confusion_matrix(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
matthews_cc_val_ha = matthews_corrcoef(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
KDE_df_cluster = get_euclidean_data_frame(sampled_df=merged_tsne_df_concatenated[["tsne_x", "tsne_y", "strain", "t-sne_label"]], column_for_analysis="t-sne_label", embedding="tsne", column_list=["tsne_x", "tsne_y"])
confusion_matrix_val_concatenated = confusion_matrix(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
matthews_cc_val_concatenated = matthews_corrcoef(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
KDE_df_cluster = get_euclidean_data_frame(sampled_df=merged_tsne_df_ma_concatenated[["tsne_x", "tsne_y", "strain", "t-sne_label"]], column_for_analysis="t-sne_label", embedding="tsne", column_list=["tsne_x", "tsne_y"])
confusion_matrix_val_ma_concatenated = confusion_matrix(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
matthews_cc_val_ma_concatenated = matthews_corrcoef(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
#domain = merged_tsne_df_ha["t-sne_label"].drop_duplicates().values
domain = sorted(merged_tsne_df_ha["t-sne_label"].drop_duplicates().values)
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
list_of_chart_ha = linking_tree_with_plots_brush(merged_tsne_df_ha,['tsne_x','tsne_y'],["MDS1", "MDS2"], 't-sne_label:N', ['strain','clade_membership'], domain, range_)
chart_ha = list_of_chart_ha[0]|list_of_chart_ha[1].properties(title="HA only MCC: " + str(round(matthews_cc_val_ha,4)))
#domain = merged_tsne_df_concatenated["t-sne_label"].drop_duplicates().values
domain = sorted(merged_tsne_df_concatenated["t-sne_label"].drop_duplicates().values)
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
list_of_chart_concatenated = linking_tree_with_plots_brush(merged_tsne_df_concatenated,['tsne_x','tsne_y'],["MDS1", "MDS2"], 't-sne_label:N', ['strain','clade_membership'], domain, range_)
chart_concat = list_of_chart_concatenated[0]|list_of_chart_concatenated[1].properties(title="HA + NA MCC: " + str(round(matthews_cc_val_concatenated,4)))
alt.vconcat(chart_ha, chart_concat).resolve_scale(color='independent')
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
list_of_chart_concatenated = linking_tree_with_plots_brush(merged_tsne_df_ma_concatenated,['tsne_x','tsne_y'],["MDS1", "MDS2"], 't-sne_label:N', ['strain','clade_membership'], domain, range_)
chart_concat_ma = list_of_chart_concatenated[0]|list_of_chart_concatenated[1].properties(title="HA + NA + MA MCC: " + str(round(matthews_cc_val_ma_concatenated,4)))
final_chart = alt.vconcat(chart_ha, chart_concat, chart_concat_ma).resolve_scale(color='independent')
save(final_chart, "../docs/HANAMAFullChartBrushableTSNE.html",)
save(final_chart, "../docs/HANAMAFullChartBrushableTSNE.png", scale_factor=2.0)
# # Running T-SNE on the Dataset
TSNE_df_ha = pd.read_csv("results/embed_t-sne_ha.csv",index_col=0)
TSNE_df_concatenated = pd.read_csv("results/embed_t-sne_concatenated.csv",index_col=0)
merged_tsne_df_ha = TSNE_df_ha.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
merged_tsne_df_concatenated = TSNE_df_concatenated.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
domain = sorted(merged_tsne_df_ha["clade_membership"].drop_duplicates().values)
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
scatterplot_with_tooltip_interactive(merged_tsne_df_ha,'tsne_x','tsne_y','tsne_x','tsne_y',['strain', "clade_membership"],'clade_membership:N', domain, range_)
domain = sorted(merged_tsne_df_concatenated["clade_membership"].drop_duplicates().values)
if -1 in domain:
range_ = ["#999999"] + colors[len(domain)-1:len(domain)].dropna(axis=1).values.tolist()[0]
else:
range_ = colors[len(domain):len(domain)+1].dropna(axis=1).values.tolist()[0]
scatterplot_with_tooltip_interactive(merged_tsne_df_concatenated,'tsne_x','tsne_y','tsne_x','tsne_y',['strain', "clade_membership"],'clade_membership:N', domain, range_)
list_of_chart_ha = linking_tree_with_plots_brush(
merged_tsne_df_ha,
['tsne_x','tsne_y'],
['tsne_x','tsne_y'],
'clade_membership:N',
["strain:N", "clade_membership:N"],
domain,
range_
)
chart_tsne_ha = list_of_chart_ha[0]|list_of_chart_ha[1]
chart_tsne_ha
list_of_chart_concatenated = linking_tree_with_plots_brush(
merged_tsne_df_concatenated,
['tsne_x','tsne_y'],
['tsne_x','tsne_y'],
'clade_membership:N',
["strain:N", "clade_membership:N"],
domain,
range_
)
chart_tsne_concatenated = list_of_chart_concatenated[0]|list_of_chart_concatenated[1]
chart_tsne_concatenated
chart_tsne_ha & chart_tsne_concatenated
# # Running UMAP on the Dataset
UMAP_df_ha = pd.read_csv("results/embed_umap_ha.csv",index_col=0)
UMAP_df_concatenated = pd.read_csv("results/embed_umap_concatenated.csv",index_col=0)
UMAP_df_concatenated
merged_umap_df_ha = UMAP_df_ha.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
merged_umap_df_concatenated = UMAP_df_concatenated.merge(node_df_ha[["strain", "date", "y", "clade_membership"]], on="strain")
UMAP_df_ha.index.tolist() == UMAP_df_concatenated.index.values.tolist()
| ha-na-ma-nextstrain/2021-8-8NotebookFluHaNaMa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %autosave 900
# (The above changes autosave to every 15 minutes<a id = autosave_time>[1]</a>, since (following feedback from previous assignment) I have written a script which (when I remember to run it) forces a commit every time a file within the project directory is changed<a id = my_force_commit_script>[2]</a>, and a 2 minute autosave would be ridiculous.)
# #### References
#
# [1](#autosave_time): https://www.webucator.com/blog/2016/03/change-default-autosave-interval-in-ipython-notebook/
#
# [2](#my_force_commit_script): https://stackoverflow.com/a/59121121/12462056
#
# [3](#convert_to_csv): https://stackoverflow.com/questions/21546739/load-data-from-txt-with-pandas
#
# [4](#multiple_distplots): https://stackoverflow.com/questions/46045750/python-distplot-with-multiple-distributions
#
# [5](#accessing_columns): https://datacarpentry.org/python-ecology-lesson/03-index-slice-subset/
#
# [6](#show_legend): https://stackoverflow.com/questions/44968012/unable-to-show-legend-in-seaborn-distplot
#
# [7](#combinations): https://stackoverflow.com/questions/15315452/selecting-with-complex-criteria-from-pandas-dataframe
#
# [8](#rounding): https://www.geeksforgeeks.org/round-function-python/
#
# [9](#generating_random_data): https://towardsdatascience.com/understanding-the-normal-distribution-with-python-e70bb855b027
#
# [10](#pick_from_a_list): https://pynative.com/python-random-choice/
#
# [11](#assign_df_vals): https://stackoverflow.com/questions/13842088/set-value-for-particular-cell-in-pandas-dataframe-using-index
#
# [12](#empty_cols): https://stackoverflow.com/questions/16327055/how-to-add-an-empty-column-to-a-dataframe
#
# [13](#rating_diffs): https://www.chess.com/forum/view/general/win-percentages-for-specific-rating-differences
#
# [14](#my_table):https://stackoverflow.com/questions/48655801/tables-in-markdown-in-jupyter/48657695
#
# [15](#check_range): https://stackoverflow.com/questions/13628791/determine-whether-integer-is-between-two-other-integers
#
# #### The Data
# This data was downloaded from games archive on lichess.org (My_Games_Original in the project directory). The Data_Acquisition file in the project directory goes into further detail on my reasoning and processes for converting to this format. <a id = convert_to_csv>[3]</a>
# +
import csv
import pandas as pd
fintan_games=pd.read_csv('My_Games', sep=",", header=None)
fintan_games.columns=['My_Colour', 'My_Result', 'Opponent_Rating', 'Time_Control', 'First_Move', 'Second_Move']
# -
fintan_games.head(10)
# We shall also import some other packages.
import numpy as np
import numpy.random as npr
import seaborn as sb
import matplotlib.pyplot as plt
from collections import Counter as cnt
# #### Some Analysis
fintan_games.describe()
# So there are 6250 games in the dataset, and my opponents had a mean rating of 1863, ranging between 1182 and 2468. <a id ="accessing_columns">[5]</a>
for i in ['My_Colour','My_Result','Time_Control','First_Move','Second_Move']:
print (cnt(fintan_games.iloc[:][i]),'\n')
# So 3308 games as Black, 2942 as White (this is actually a little surprising over a sample of this size - I will assume an even distribution between the two), 2993 wins, 2923 losses and 334 draws. The vast majority of the games were 3 minutes a side, with a wide range of other time controls, most of which I don't ever remember playing.
#
# Most of the games opened with g4 (this is not really a good opening move, so it might seem surprising, but it is was my opening of choice for short games for a very long time, as there are some nice traps). e4 is much more standard, and makes perfect sense. The rest of the distribution all seems pretty credible. There is a 'nan' which is annoying, but I will leave it in because it's good to see how they are dealt with/ignored.
#
# Black's moves seem unsurprising at first glance, and there are two 'nan's again, which presumably match up with those for the White moves.
sb.distplot(fintan_games['Opponent_Rating'],kde=False,bins=200)
plt.show()
# The distribution appears normal with a mean of about 1850 (we saw earlier that the mean is 1864 with a standard distribution of 104).
# +
win=fintan_games.loc[(fintan_games['My_Result']=='1')]
draw=fintan_games.loc[fintan_games['My_Result']=='1/2']
loss=fintan_games.loc[fintan_games['My_Result']=='0']
sb.distplot(win[['Opponent_Rating']], hist=False, label='win')
sb.distplot(draw[['Opponent_Rating']], hist=False, label='draw')
sb.distplot(loss[['Opponent_Rating']], hist=False, label='loss')
plt.show()
# -
# This graph shows that, unsurprisingly, I win more games against lower-rated players, and lose more against higher-rated players. <a id = multiple_distplots>[4]</a> <a id = "combinations">[7]</a>
# +
win_white=fintan_games.loc[(fintan_games['My_Result']=='1') & (fintan_games['My_Colour']=='White')]
win_black=fintan_games.loc[(fintan_games['My_Result']=='1') & (fintan_games['My_Colour']=='Black')]
loss_white=fintan_games.loc[(fintan_games['My_Result']=='0') & (fintan_games['My_Colour']=='White')]
loss_black=fintan_games.loc[(fintan_games['My_Result']=='0') & (fintan_games['My_Colour']=='Black')]
draw_white=fintan_games.loc[(fintan_games['My_Result']=='1/2') & (fintan_games['My_Colour']=='White')]
draw_black=fintan_games.loc[(fintan_games['My_Result']=='1/2') & (fintan_games['My_Colour']=='Black')]
sb.distplot(win_white[['Opponent_Rating']], hist=False, label='White win')
sb.distplot(loss_white[['Opponent_Rating']], hist=False, label='White loss')
sb.distplot(draw_white[['Opponent_Rating']], hist=False, label='White draw')
sb.distplot(win_black[['Opponent_Rating']], hist=False, label='Black win')
sb.distplot(loss_black[['Opponent_Rating']], hist=False, label='Black loss')
sb.distplot(draw_black[['Opponent_Rating']], hist=False, label='Black draw')
plt.show()
# +
print(loss_white.mean())
print(loss_black.mean())
print(win_white.mean())
print(win_black.mean())
# -
# There is nothing tremendously exciting here. Now I will look only at games with the 1.g4 opening, so see if there is anything noteworthy there. <a id ='show_legend'>[6]</a>
# +
win_white_g4=fintan_games.loc[(fintan_games['My_Result']=='1') & (fintan_games['My_Colour']=='White') & (fintan_games['First_Move']=='g4')]
loss_white_g4=fintan_games.loc[(fintan_games['My_Result']=='0') & (fintan_games['My_Colour']=='White') & (fintan_games['First_Move']=='g4')]
draw_white_g4=fintan_games.loc[(fintan_games['My_Result']=='1/2') & (fintan_games['My_Colour']=='White') & (fintan_games['First_Move']=='g4')]
sb.distplot(win_white[['Opponent_Rating']], hist=False, label='White win')
sb.distplot(loss_white[['Opponent_Rating']], hist=False, label='White loss')
sb.distplot(draw_white[['Opponent_Rating']], hist=False, label='White draw')
sb.distplot(win_white_g4[['Opponent_Rating']], hist=False, label='g4: White win')
sb.distplot(loss_white_g4[['Opponent_Rating']], hist=False, label='g4: White loss')
sb.distplot(draw_white_g4[['Opponent_Rating']], hist=False, label='g4: White draw')
plt.show()
# -
# Given that g4 starts made up the majority of my games, it is not unusual that they match very closely with the overall results. The next most common appears to be e4; let me examine that.
# +
win_white_e4=fintan_games.loc[(fintan_games['My_Result']=='1') & (fintan_games['My_Colour']=='White') & (fintan_games['First_Move']=='e4')]
loss_white_e4=fintan_games.loc[(fintan_games['My_Result']=='0') & (fintan_games['My_Colour']=='White') & (fintan_games['First_Move']=='e4')]
draw_white_e4=fintan_games.loc[(fintan_games['My_Result']=='1/2') & (fintan_games['My_Colour']=='White') & (fintan_games['First_Move']=='e4')]
sb.distplot(win_white[['Opponent_Rating']], hist=False, label='White win')
sb.distplot(loss_white[['Opponent_Rating']], hist=False, label='White loss')
sb.distplot(draw_white[['Opponent_Rating']], hist=False, label='White draw')
sb.distplot(win_white_e4[['Opponent_Rating']], hist=False, label='e4: White win')
sb.distplot(loss_white_e4[['Opponent_Rating']], hist=False, label='e4: White loss')
sb.distplot(draw_white_e4[['Opponent_Rating']], hist=False, label='e4: White draw')
plt.show()
# -
# So playing e4 as White does not seem so good for me. Let me look at the responses to, say, g4.
#
# First, let me see the options.
# +
fg=fintan_games
print(cnt(fintan_games.loc[(fintan_games['First_Move']=='g4')]['Second_Move']))
# +
win_white_g4d5=fg.loc[(fg['My_Result']=='1') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='d5')]
loss_white_g4d5=fg.loc[(fg['My_Result']=='0') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='d5')]
draw_white_g4d5=fg.loc[(fg['My_Result']=='1/2') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='d5')]
sb.distplot(win_white_g4d5[['Opponent_Rating']], hist=False, label='White win d5')
sb.distplot(loss_white_g4d5[['Opponent_Rating']], hist=False, label='White loss d5')
sb.distplot(draw_white_g4d5[['Opponent_Rating']], hist=False, label='White draw d5')
win_white_g4e5=fg.loc[(fg['My_Result']=='1') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='e5')]
loss_white_g4e5=fg.loc[(fg['My_Result']=='0') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='e5')]
draw_white_g4e5=fg.loc[(fg['My_Result']=='1/2') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='e5')]
sb.distplot(win_white_g4e5[['Opponent_Rating']], hist=False, label='White win e5')
sb.distplot(loss_white_g4e5[['Opponent_Rating']], hist=False, label='White loss e5')
sb.distplot(draw_white_g4e5[['Opponent_Rating']], hist=False, label='White draw e5')
win_white_g4c5=fg.loc[(fg['My_Result']=='1') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='c5')]
loss_white_g4c5=fg.loc[(fg['My_Result']=='0') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='c5')]
draw_white_g4c5=fg.loc[(fg['My_Result']=='1/2') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='c5')]
sb.distplot(win_white_g4c5[['Opponent_Rating']], hist=False, label='White win c5')
sb.distplot(loss_white_g4c5[['Opponent_Rating']], hist=False, label='White loss c5')
sb.distplot(draw_white_g4c5[['Opponent_Rating']], hist=False, label='White draw c5')
win_white_g4e6=fg.loc[(fg['My_Result']=='1') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='e6')]
loss_white_g4e6=fg.loc[(fg['My_Result']=='0') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='e6')]
draw_white_g4e6=fg.loc[(fg['My_Result']=='1/2') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='e6')]
sb.distplot(win_white_g4e6[['Opponent_Rating']], hist=False, label='White win e6')
sb.distplot(loss_white_g4e6[['Opponent_Rating']], hist=False, label='White loss e6')
sb.distplot(draw_white_g4e6[['Opponent_Rating']], hist=False, label='White draw e6')
win_white_g4g6=fg.loc[(fg['My_Result']=='1') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='g6')]
loss_white_g4g6=fg.loc[(fg['My_Result']=='0') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='g6')]
draw_white_g4g6=fg.loc[(fg['My_Result']=='1/2') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='g6')]
sb.distplot(win_white_g4g6[['Opponent_Rating']], hist=False, label='White win g6')
sb.distplot(loss_white_g4g6[['Opponent_Rating']], hist=False, label='White loss g6')
sb.distplot(draw_white_g4g6[['Opponent_Rating']], hist=False, label='White draw g6')
win_white_g4c6=fg.loc[(fg['My_Result']=='1') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='c6')]
loss_white_g4c6=fg.loc[(fg['My_Result']=='0') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='c6')]
draw_white_g4c6=fg.loc[(fg['My_Result']=='1/2') & (fg['My_Colour']=='White') & (fg['First_Move']=='g4') & (fg['Second_Move']=='c6')]
sb.distplot(win_white_g4c6[['Opponent_Rating']], hist=False, label='White win c6')
sb.distplot(loss_white_g4c6[['Opponent_Rating']], hist=False, label='White loss c6')
sb.distplot(draw_white_g4c6[['Opponent_Rating']], hist=False, label='White draw c6')
plt.show()
# -
# In hindsight, preserving the rating change after each game would have been a more useful variable. Ah well, next time...
print(cnt(fg['First_Move']),'\n')
for i in cnt(fg['First_Move']):
print(i)
print(cnt(fintan_games.loc[(fintan_games['First_Move']==i)]['Second_Move']),'\n')
# Of course, this counts both games where I am White and Black, and if I'm just looking to prepare against my opponents' first move, I need only look at games where I am Black. Somewhat interestingly, I note here that 19 of the possible 20 responses to g4 have been played, with only Na6 avoided entirely.
#
# So, to look only at games where I played Black:
print(cnt(fg['First_Move']),'\n')
for i in cnt(fg['First_Move']):
print(i)
print(cnt(fg.loc[(fg['First_Move']==i) & (fg['My_Colour']=='Black')]['Second_Move']),'\n')
# So, due to an interesting (to me) but perhaps unwisely chosen, or at least, unwisely reduced data set, there are no numerical correlation analyses to conduct. I guess I will simulate a normally distributed data set with mean 1863 and standard deviation of 104. I will randomly assign half each to Black and White, a Win:Draw:Loss ratio of about 30:29:3. Of the white openings (for me), about 5/6ths should be g4 (my opponents, being presumably sane, would no\t play it much). I'll let the other 1/6th be randomly selected, though this won't necessarily be strictly accurate. For games where I am Black, I think I will just let both White's and Black's first moves be random, because there is nothing terribly interesting to be gleaned from them anyway. The time variable is boring, so I will ignore it entirely.
#
#
# #### Generate a normally distributed data set
# with a mean of 1863 and a standard deviation of 104.<a id ="rounding">[8]</a>,<a id ="generating_random_data">[9]</a>
# +
from scipy.stats import norm
mean_rating = 1863
std_devn_rating = 104
next_games = []
for i in range(1000):
new_val=mean_rating + np.random.normal()*std_devn_rating
next_games.append(round(new_val))
# -
fake = pd.DataFrame(next_games, columns=['Fake_Opponent_Rating'])
sb.distplot(fake['Fake_Opponent_Rating'],kde=False,bins=200)
plt.show()
# #### Randomly assign Black and White
# <a id ="pick_from_a_list">[10]</a>
# +
import random
bw=['White','Black']
colours=[]
for i in range(1000):
colours.append(random.choice(bw))
fake.insert(1,"Fake_Colour",colours,True)
fake.head()
# +
fake_my_colour_w=fake.loc[fake['Fake_Colour']=='White']
fake_my_colour_b=fake.loc[fake['Fake_Colour']=='Black']
sb.distplot(fake_my_colour_w[['Fake_Opponent_Rating']], hist=False, label='Play white')
sb.distplot(fake_my_colour_b[['Fake_Opponent_Rating']], hist=False, label='Play black')
plt.show()
# -
# Checks out.
#
# #### Choosing openings
# For games where I am White, I will make g4 be 5/6ths of the first moves, and the rest be, say, e4 or Nf3. There is no real educational benefit to making this list longer and including more openings.
# For the second move, I will again let the move be randomish, except where I am Black I will play d4 in response to e5, and e6 in response to e4.
#
# First, creating the First move and Second move columns.<a id ="empty_cols">[12]</a>
fake["Fake_First_Move"]=""
fake["Fake_Second_Move"]=""
fake.head(10)
# Now we set the First and Second move values <a id ="assign_df_vals">[11]</a>
# +
my_weight_dict = {
"g4": 5/6,
"e4": 1/12,
"Nf3": 1/12,
}
other_weight_dict={
"e4":1/3,
"d4":1/3,
"Nf3":1/3,
}
for i in range(1000):
if fake['Fake_Colour'][i]=='White':
fake.at[i,'Fake_First_Move']=random.choice(list(my_weight_dict))
else:
fake.at[i,'Fake_First_Move']=random.choice(list(other_weight_dict))
fake.head(10)
# +
weight_dict = {
"d5": 1/4,
"c6": 1/4,
"Nf6": 1/4,
"e5":1/4
}
for i in range(1000):
if fake['Fake_Colour'][i]=='Black' and fake['Fake_First_Move'][i]=='d4':
fake.at[i,'Fake_Second_Move']='e5'
else:
if fake['Fake_Colour'][i]=='Black' and fake['Fake_First_Move'][i]=='e4':
fake.at[i,'Fake_Second_Move']='e6'
else:
fake.at[i,'Fake_Second_Move']=random.choice(list(weight_dict))
fake.head(20)
# -
# Now we have generated the first and second moves. There just remains to assign a Win:Draw:Loss to each of the games. It will be more interesting if I am more likely to lose to a stronger player, and less likely to lose to a weaker player.
#
# This jpeg grabbed from chess.com <a id ="rating_diffs">[13]</a> (original source seems to be gone), indicates an estimated win/loss prediction for a given rating difference. This list is more involved than I need, so I will create my own simpler table.<a id ="my_table">[14]</a>,<a id ="check_range">[15]</a>
#
# 
#
# | Rating Diff | Win percentage |
# | --- | --- |
# | <-800 | 0 |
# | -600 | 1 |
# | -400 | 8 |
# | -200 | 24 |
# | 0 | 50 |
# | 200 | 76 |
# | 400 | 92 |
# | 600 | 99 |
# | >800 | 100 |
# +
chess_min=[-2000,-801,-601,-401,-201,1,201,401,601,801]
chess_max=[-800,-600,-400,-200,0,200,400,600,800,2000]
chess_chance=[0,1,8,24,50,50,76,92,99,100]
rating_change = pd.DataFrame(chess_min, columns=['rating_min'])
rating_change.insert(1,"rating_max",chess_max,True)
rating_change
rating_change.insert(2,"chance",chess_chance,True)
rating_change
# -
# Okay, so new database created which we can use to estimate how many games I should win, depending on the difference between my rating and my opponents'. (Will assume my rating is mean rating: 1863).
# +
fake["Fake_Result"]=""
fake.head()
for i in range(1000):
for j in range(9):
if rating_change['rating_min'][j]<=(1863-fake['Fake_Opponent_Rating'][i])<=rating_change['rating_max'][j]:
weight_dict={
1: rating_change['chance'][j]/100,
0: 100-rating_change['chance'][j]/100,
}
fake.at[i,'Fake_Result']=random.choice(list(weight_dict))
fake
# +
win=fake.loc[(fake['Fake_Result']==1)]
loss=fake.loc[fake['Fake_Result']==0]
sb.distplot(win[['Fake_Opponent_Rating']], hist=False, label='win')
sb.distplot(loss[['Fake_Opponent_Rating']], hist=False, label='loss')
plt.show()
# -
# This graph appears to agree with the real data's assessment that I perform quite similarly against players of any strength (which is probably concerning from a chess point of view), but there is an indication that in my fake dataset (like in the real data set), I do lose (slightly) more games against higher rated players than against lower rated players. (I ignored draws for my fake data set).
| pda_proj.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 波士顿房价预测
from keras.datasets import boston_housing
from keras import layers
from keras import models
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
print(train_data.shape, train_data[0])
print(train_targets.shape, train_targets[:13])
# ## 数据标准化
# +
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
print('mean:', mean)
print('std:', std)
train_data -= mean
train_data /= std
test_data -= mean
test_data /= std
# -
# ## 构建网络
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
history = model.fit(test_data, test_targets, epochs=50, batch_size=20)
print(history.history)
model.evaluate(test_data, test_targets)
model.predict(test_data[:10])
# ## 编写生成网络的函数
def build_model():
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
# ## K折验证
# +
import numpy as np
k = 4
num_val_samples = len(train_data) // k #//取整运算符
num_epochs = num_val_samples
maes = []
for i in range(k):
print('processing fold #', i)
val_data = train_data[i*num_val_samples: (i+1)*num_val_samples]
val_targets = train_targets[i*num_val_samples: (i+1)*num_val_samples]
partial_train_data = np.concatenate([train_data[:i*num_val_samples],
train_data[(i+1)*num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate([train_targets[:i*num_val_samples],
train_targets[(i+1)*num_val_samples:]],
axis=0)
model = build_model()
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
val_mse, val_mae = model.evaluate(val_data, val_targets)
maes.append(val_mae)
mae = np.mean(maes)
print(maes, mae)
# -
# ## epochs = 500
# +
import numpy as np
k = 4
num_val_samples = len(train_data) // k #//取整运算符
num_epochs = 500
mae_histories = []
for i in range(k):
print('processing fold #', i)
val_data = train_data[i*num_val_samples: (i+1)*num_val_samples]
val_targets = train_targets[i*num_val_samples: (i+1)*num_val_samples]
partial_train_data = np.concatenate([train_data[:i*num_val_samples],
train_data[(i+1)*num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate([train_targets[:i*num_val_samples],
train_targets[(i+1)*num_val_samples:]],
axis=0)
model = build_model()
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
print(history.history.keys())
mae_histories.append(history.history['val_mean_absolute_error'])
# -
#计算每轮K折的平均值
print(len(mae_histories), len(mae_histories[0]))
avg_mae_history = [np.mean([n[i] for n in mae_histories]) for i in range(num_epochs)]
print(len(avg_mae_history), avg_mae_history[:5])
# +
import matplotlib.pyplot as plt
plt.plot(range(1, len(avg_mae_history)+1), avg_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
# -
#把前面10个异常值移除
after_avg_mae_history_10 = avg_mae_history[10:]
plt.plot(range(1, len(after_avg_mae_history_10)+1), after_avg_mae_history_10)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
# ## 通过指数移动平均值,以得到光滑的曲线。
# +
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous_point = smoothed_points[-1]
smoothed_points.append(previous_point * factor + point * (1-factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(avg_mae_history[10:])
plt.plot(range(1, len(smooth_mae_history)+1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Smooth Validation MAE')
plt.show()
# -
# ## 训练最终模型
model = build_model()
model.fit(train_data, train_targets, epochs=80, batch_size=16)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
print(test_mae_score)
#加入静默模式
model = build_model()
model.fit(train_data, train_targets, epochs=80, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
print(test_mae_score)
# ## 预测的房价和实际价格相差约2774美元
model.predict(test_data[:10])
| 3.6_boston_housing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
'''
File: fn_simp_03.py
This more complicated test advances two equations simultaneously:
1) a laser pulse envelope, propagating to the right, while also dispersing
2) a static distribution of excited states in a crystal
Something is wrong...
I posted a question in the FEniCS user forum on 20201227
'''
from dolfin import *
import matplotlib.pyplot as plt
# Specify the BCs
def on_left(x, on_boundary):
return (on_boundary and near(x[0], 0.))
def on_right(x, on_boundary):
return (on_boundary and near (x[0], 1.))
# configuration parameters
V_x = 0.7212389380530974
n_ds = 226
ds = 0.004424778761061947
c_width = 0.4424778761061947
c_density = 1.e8
lp_width = 0.27876106194690264
lp_density = 1.
# create a 1D mesh on the interval [0,1]
mesh = UnitIntervalMesh(n_ds)
V = VectorFunctionSpace(mesh, "Lagrange", 1, dim=2)
v_1, v_2 = TestFunctions(V)
u = Function(V)
uprev = Function(V)
# instantiate the Dirichlet BCs
bc_left = DirichletBC(V, [Constant(0),Constant(0)], on_left)
bc_right = DirichletBC(V, [Constant(0),Constant(0)], on_right)
bc = [bc_left, bc_right]
# specify and apply the initial conditions
u0 = Expression(('x[0]>2.*ds && x[0]<(lp_width-2.*ds) ? \
lp_density*sin(pi*(x[0]-2.*ds)/(lp_width-4.*ds)) : 0.', \
'x[0]>=lp_width && x[0]<=(lp_width+c_width) ? \
c_density*sin(pi*(x[0]-lp_width)/c_width) : 0.'),
degree=2, lp_width=0.2, c_width=0.6, lp_density=1., c_density=1., ds=0.01)
u0.c_width = c_width
u0.lp_width = lp_width
u0.lp_density = lp_density
u0.c_density = c_density
u0.ds = ds
# project the above expression onto the solution vector
u = interpolate(u0, V)
uprev.assign(u)
u_1, u_2 = split(u)
uprev_1, uprev_2 = split(uprev)
n_steps = 100
time = 0.8
dt = time / (n_steps + 1)
DT = Constant(dt)
VX = Constant(V_x)
uplot = project(u, V)
uplot_1, uplot_2 = split(uplot)
# plot the curves at t=0
plot(uplot_1, title=('Photon density in laser pulse (initial)'))
plt.grid(True)
plt.show()
plt.close()
plot(uplot_2, title=('Density of crystal excited states (initial)'))
plt.grid(True)
plt.show()
plt.close()
# solve the system of equations
for i_loop in range(0, n_steps):
F = ( (u_1-uprev_1)*v_1/DT + VX*u_1.dx(0)*v_1 ) * dx \
+ ( (u_2-uprev_2)*v_2/DT ) * dx
solve(F==0, u, bc)
uprev.assign(u)
uplot = project(u, V)
uplot_1, uplot_2 = split(uplot)
# plot the curves at the end of the simulation
plot(uplot_1, title=('Photon density in laser pulse (final)'))
plt.grid(True)
plt.show()
plt.close()
plot(uplot_2, title=('Density of crystal excited states (final)'))
plt.grid(True)
plt.show()
plt.close()
# -
| examples/notebooks/amplifier/fn_simp_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from bluesky import RunEngine
from bluesky.plans import scan
from ophyd.sim import motor, det
from bluesky_widgets.utils.streaming import stream_documents_into_runs
from bluesky_widgets.models.plot_builders import PromptPlotter
from bluesky_widgets.jupyter.figures import JupyterFigures
from bluesky_widgets.models.plot_specs import LineSpec, AxesSpec, FigureSpec
def prompt_line_builder(run):
"""
This is a simple example.
This makes a hard-coded assumption that the data has columns "motor" and
"det" in the primary stream.
"""
def func(run):
"Return any arrays x, y. They must be of equal length."
# *Lazily* read the data so that large arrays are not loaded unless
# the yare used.
ds = run.primary.read()
# Do any computation you want in here....
return ds["motor"], ds["det"]
label = f"Scan {run.metadata['start']['scan_id']}"
line_spec = LineSpec(func, run, label=label)
axes_spec = AxesSpec(lines=[line_spec], x_label="motor", y_label="det")
figure_spec = FigureSpec((axes_spec,), title="det v motor")
return figure_spec
RE = RunEngine()
model = PromptPlotter([prompt_line_builder])
view = JupyterFigures(model.figures)
RE.subscribe(stream_documents_into_runs(model.add_run))
# -
view
model.figures.clear()
motor.delay = 0.1 # Turn up simulated motor movement delay.
def plan():
for i in range(1, 5):
yield from scan([det], motor, -1, 1, 1 + 2 * i)
RE(plan())
del model.figures[2]
model.figures[0].axes[0].by_label["Scan 1"][0].style.update(color="red")
# Generate example data. (This takes a couple seconds to simulate some scans.)
from bluesky_widgets.examples.utils.generate_msgpack_data import get_catalog
catalog = get_catalog()
scans = catalog.search({"plan_name": "scan"})
model.add_run(scans[-2])
| bluesky_widgets/examples/PromptPlotter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''ML_38'': conda)'
# name: python3
# ---
# # Smart Beta Portfolio and Portfolio Optimization
import sys
# !{sys.executable} -m pip install -r requirements.txt
# ### Load Packages
# +
import pandas as pd
import numpy as np
import helper
import project_helper
import project_tests
import warnings
warnings.filterwarnings('ignore')
# -
# ## Market Data
# ### Load Data
# For this universe of stocks, we'll be selecting large dollar volume stocks. We're using this universe, since it is highly liquid.
# +
df = pd.read_csv('./data/eod-quotemedia.csv')
percent_top_dollar = 0.2
high_volume_symbols = project_helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar)
df = df[df['ticker'].isin(high_volume_symbols)]
close = df.reset_index().pivot(index='date', columns='ticker', values='adj_close')
volume = df.reset_index().pivot(index='date', columns='ticker', values='adj_volume')
dividends = df.reset_index().pivot(index='date', columns='ticker', values='dividends')
# -
# ### View Data
# To see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix.
project_helper.print_dataframe(close)
# # Part 1: Smart Beta Portfolio
# build a portfolio using dividend yield to choose the portfolio weights. A portfolio such as this could be incorporated into a smart beta ETF. This portfolio is then compared to a market cap weighted index to see how well it performs.
#
# In practice, one probably gets the index weights from a data vendor (such as companies that create indices, like MSCI, FTSE, Standard and Poor's). For the sake of practice, a market cap weighted index is simulated.
#
# ## Index Weights
# The index we'll be using is based on large dollar volume stocks. `generate_dollar_volume_weights` generates the weights for this index. For each date, the weights based on dollar volume traded for that date are generated. For example, assume the following is close prices and volume data:
# ```
# Prices
# A B ...
# 2013-07-08 2 2 ...
# 2013-07-09 5 6 ...
# 2013-07-10 1 2 ...
# 2013-07-11 6 5 ...
# ... ... ... ...
#
# Volume
# A B ...
# 2013-07-08 100 340 ...
# 2013-07-09 240 220 ...
# 2013-07-10 120 500 ...
# 2013-07-11 10 100 ...
# ... ... ... ...
# ```
# The weights created from the function `generate_dollar_volume_weights` should be the following:
# ```
# A B ...
# 2013-07-08 0.126.. 0.194.. ...
# 2013-07-09 0.759.. 0.377.. ...
# 2013-07-10 0.075.. 0.285.. ...
# 2013-07-11 0.037.. 0.142.. ...
# ... ... ... ...
# ```
# +
def generate_dollar_volume_weights(close, volume):
"""
Generate dollar volume weights.
Parameters
----------
close : DataFrame
Close price for each ticker and date
volume : str
Volume for each ticker and date
Returns
-------
dollar_volume_weights : DataFrame
The dollar volume weights for each ticker and date
"""
assert close.index.equals(volume.index)
assert close.columns.equals(volume.columns)
price_volume = close * volume
dollar_volume_weights = price_volume.div(price_volume.sum(axis=1), axis=0)
return dollar_volume_weights
project_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights)
# -
# ### View Data
# Let's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap.
index_weights = generate_dollar_volume_weights(close, volume)
project_helper.plot_weights(index_weights, 'Index Weights')
# ## Portfolio Weights
# Having the index weights, next is choosing the portfolio weights based on dividend -normally calculated based on trailing dividend yield. For simplicity the index weights are rather estimated by calculating the total dividend yield over time.
#
# `calculate_dividend_weights` returns the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's using dividend data instead.
#
# For example, assume the following is `dividends` data:
# ```
# Prices
# A B
# 2013-07-08 0 0
# 2013-07-09 0 1
# 2013-07-10 0.5 0
# 2013-07-11 0 0
# 2013-07-12 2 0
# ... ... ...
# ```
# The weights created from the function `calculate_dividend_weights` should be the following:
# ```
# A B
# 2013-07-08 NaN NaN
# 2013-07-09 0 1
# 2013-07-10 0.333.. 0.666..
# 2013-07-11 0.333.. 0.666..
# 2013-07-12 0.714.. 0.285..
# ... ... ...
# ```
# +
def calculate_dividend_weights(dividends):
"""
Calculate dividend weights.
Parameters
----------
dividends : DataFrame
Dividend for each stock and date
Returns
-------
dividend_weights : DataFrame
Weights for each stock and date
"""
dividend_growth = dividends.cumsum()
dividend_weights = dividend_growth.div(dividend_growth.sum(axis=1),0)
return dividend_weights
project_tests.test_calculate_dividend_weights(calculate_dividend_weights)
# -
# ### View Data
# Just like the index weights, let's generate the ETF weights and view them using a heatmap.
etf_weights = calculate_dividend_weights(dividends)
project_helper.plot_weights(etf_weights, 'ETF Weights')
# ## Returns
# `generate_returns` generates returns data for all the stocks and dates from price data. Since we're not dealing with volatility, we don't have to use ***log returns***.
# +
def generate_returns(prices):
"""
Generate returns for ticker and date.
Parameters
----------
prices : DataFrame
Price for each ticker and date
Returns
-------
returns : Dataframe
The returns for each ticker and date
"""
shifted_price = prices.shift(1)
returns = (prices-shifted_price)/shifted_price
return returns
project_tests.test_generate_returns(generate_returns)
# -
# ### View Data
# Let's generate the closing returns using `generate_returns` and view them using a heatmap.
returns = generate_returns(close)
project_helper.plot_returns(returns, 'Close Returns')
# ## Weighted Returns
# With the returns of each stock computed, we can use it to compute the returns for an index or ETF. `generate_weighted_returns` creates weighted returns using the returns and weights.
# +
def generate_weighted_returns(returns, weights):
"""
Generate weighted returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weights : DataFrame
Weights for each ticker and date
Returns
-------
weighted_returns : DataFrame
Weighted returns for each ticker and date
"""
assert returns.index.equals(weights.index)
assert returns.columns.equals(weights.columns)
weighted_returns = returns*weights
return weighted_returns
project_tests.test_generate_weighted_returns(generate_weighted_returns)
# -
# ### View Data
# Let's generate the ETF and index returns using `generate_weighted_returns` and view them using a heatmap.
index_weighted_returns = generate_weighted_returns(returns, index_weights)
etf_weighted_returns = generate_weighted_returns(returns, etf_weights)
project_helper.plot_returns(index_weighted_returns, 'Index Returns')
project_helper.plot_returns(etf_weighted_returns, 'ETF Returns')
# ## Cumulative Returns
# To compare performance between the ETF and Index, we're going to calculate the tracking error. Before we do that, we first need to calculate the index and ETF comulative returns. `calculate_cumulative_returns` calculates the cumulative returns over time given the returns.
# +
def calculate_cumulative_returns(returns):
"""
Calculate cumulative returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
cumulative_returns : Pandas Series
Cumulative returns for each date
"""
# cumulative return = [p(t)/p(0)] -1
# return was calculated with respected to the previous value
returns_sum = returns.sum(axis=1) + 1 # add one to multiply the ratios ()
returns_sum[0] = np.nan # not considered because of the shift in 'returns'
cumulative_returns = returns_sum.cumprod(axis= 0, skipna=True)
return cumulative_returns
project_tests.test_calculate_cumulative_returns(calculate_cumulative_returns)
# -
# ### View Data
# Let's generate the ETF and index cumulative returns using `calculate_cumulative_returns` and compare the two.
index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns)
etf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index')
# ## Tracking Error
# In order to check the performance of the smart beta portfolio, we can calculate the annualized tracking error against the index. `tracking_error` returns the tracking error between the ETF and benchmark.
#
# For reference, we'll be using the following annualized tracking error function:
# $$ TE = \sqrt{252} * SampleStdev(r_p - r_b) $$
#
# Where $ r_p $ is the portfolio/ETF returns and $ r_b $ is the benchmark returns.
#
# _Note: When calculating the sample standard deviation, the delta degrees of freedom is 1, which is the also the default value._
# +
def tracking_error(benchmark_returns_by_date, etf_returns_by_date):
"""
Calculate the tracking error.
Parameters
----------
benchmark_returns_by_date : Pandas Series
The benchmark returns for each date
etf_returns_by_date : Pandas Series
The ETF returns for each date
Returns
-------
tracking_error : float
The tracking error
"""
assert benchmark_returns_by_date.index.equals(etf_returns_by_date.index)
delta_return = etf_returns_by_date - benchmark_returns_by_date
tracking_error = np.sqrt(252)*np.std(delta_return, ddof=1)
return tracking_error
project_tests.test_tracking_error(tracking_error)
# -
# ### View Data
# Let's generate the tracking error using `tracking_error`.
smart_beta_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(etf_weighted_returns, 1))
print('Smart Beta Tracking Error: {}'.format(smart_beta_tracking_error))
# # Part 2: Portfolio Optimization
#
# Create a second portfolio that still reuses the market cap weighted index, but it will be independent of the dividend-weighted portfolio that was created in part 1.
#
# >**Objective**: We want to both minimize the portfolio variance and also want to closely track a market cap weighted index. In other words, we're trying to minimize the distance between the weights of our portfolio and the weights of the index.
#
# $Minimize \left [ \sigma^2_p + \lambda \sqrt{\sum_{1}^{m}(weight_i - indexWeight_i)^2} \right ]$
# <br>where $m$ is the number of stocks in the portfolio,<br>
# and $\lambda$ is a scaling factor that you can choose.
#
# **Why?**
#
# One way that investors evaluate a fund is by how well it tracks its index. The fund is still expected to deviate from the index within a certain range in order to improve fund performance. A way for a fund to track the performance of its benchmark is by keeping its asset weights similar to the weights of the index. We’d expect that if the fund has the same stocks as the benchmark, and also the same weights for each stock as the benchmark, the fund would yield about the same returns as the benchmark. By minimizing a linear combination of both the portfolio risk and distance between portfolio and benchmark weights, we attempt to balance the desire to minimize portfolio variance with the goal of tracking the index.
#
#
# ## Covariance
# `get_covariance_returns` calculates the covariance of the `returns`. This will be used to calculate the portfolio variance.
#
# If we have $m$ stock series, the covariance matrix is an $m \times m$ matrix containing the covariance between each pair of stocks. [`Numpy.cov`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html) is used to get the covariance. The input is a 2D array in which each row is a stock series, and each column is an observation at the same period of time. `NaN` values are replaced with zeros using the [`DataFrame.fillna`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html) function.
#
# The covariance matrix $\mathbf{P} =
# \begin{bmatrix}
# \sigma^2_{1,1} & ... & \sigma^2_{1,m} \\
# ... & ... & ...\\
# \sigma_{m,1} & ... & \sigma^2_{m,m} \\
# \end{bmatrix}$
# +
def get_covariance_returns(returns):
"""
Calculate covariance matrices.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
returns_covariance : 2 dimensional Ndarray
The covariance of the returns
"""
returns_covariance = np.cov(returns.fillna(0).T)
return returns_covariance
project_tests.test_get_covariance_returns(get_covariance_returns)
# -
# ### View Data
# Let's look at the covariance generated from `get_covariance_returns`.
# +
covariance_returns = get_covariance_returns(returns)
covariance_returns = pd.DataFrame(covariance_returns, returns.columns, returns.columns)
covariance_returns_correlation = np.linalg.inv(np.diag(np.sqrt(np.diag(covariance_returns))))
covariance_returns_correlation = pd.DataFrame(
covariance_returns_correlation.dot(covariance_returns).dot(covariance_returns_correlation),
covariance_returns.index,
covariance_returns.columns)
'''This generates an error with plotly 6. on my local machine
project_helper.plot_covariance_returns_correlation(
covariance_returns_correlation,
'Covariance Returns Correlation Matrix')
'''
# -
# ### portfolio variance
# We can write the portfolio variance $\sigma^2_p = \mathbf{x^T} \mathbf{P} \mathbf{x}$
#
# where $\mathbf{x^T} \mathbf{P} \mathbf{x}$ is called the quadratic form.
# We can use the cvxpy function `quad_form(x,P)` to get the quadratic form.
#
# ### Distance from index weights
# We want portfolio weights that track the index closely. So we want to minimize the distance between them by calculating the L2 norm. So: $\sqrt{\sum_{1}^{n}(weight_i - indexWeight_i)^2}$ Can also be written as $\left \| \mathbf{x} - \mathbf{index} \right \|_2$. There's a cvxpy function called [norm()](https://www.cvxpy.org/api_reference/cvxpy.atoms.other_atoms.html#norm)
# `norm(x, p=2, axis=None)`. The default is already set to find an L2 norm, so you would pass in one argument, which is the difference between your portfolio weights and the index weights.
#
# ### objective function
# We want to minimize both the portfolio variance and the distance of the portfolio weights from the index weights.
# We also want to choose a `scale` constant, which is $\lambda$ in the expression.
#
# $\mathbf{x^T} \mathbf{P} \mathbf{x} + \lambda \left \| \mathbf{x} - \mathbf{index} \right \|_2$
#
#
# This lets us choose how much priority we give to minimizing the difference from the index, relative to minimizing the variance of the portfolio. If you choose a higher value for `scale` ($\lambda$).
#
# We can find the objective function using cvxpy `objective = cvx.Minimize()`.
#
#
# ### constraints
# We can also define our constraints in a list. For example, you'd want the weights to sum to one. So $\sum_{1}^{n}x = 1$. You may also need to go long only, which means no shorting, so no negative weights. So $x_i >0 $ for all $i$. you could save a variable as `[x >= 0, sum(x) == 1]`, where x was created using `cvx.Variable()`.
#
# ### optimization
# So now that we have our objective function and constraints, we can solve for the values of $\mathbf{x}$.
# cvxpy has the constructor `Problem(objective, constraints)`, which returns a `Problem` object.
#
# The `Problem` object has a function solve(), which returns the minimum of the solution. In this case, this is the minimum variance of the portfolio.
#
# It also updates the vector $\mathbf{x}$.
#
# We can check out the values of $x_A$ and $x_B$ that gave the minimum portfolio variance by using `x.value`
# +
import cvxpy as cvx
def get_optimal_weights(covariance_returns, index_weights, scale=2.0):
"""
Find the optimal weights.
Parameters
----------
covariance_returns : 2 dimensional Ndarray
The covariance of the returns
index_weights : Pandas Series
Index weights for all tickers at a period in time
scale : int
The penalty factor for weights the deviate from the index
Returns
-------
x : 1 dimensional Ndarray
The solution for x
"""
assert len(covariance_returns.shape) == 2
assert len(index_weights.shape) == 1
assert covariance_returns.shape[0] == covariance_returns.shape[1] == index_weights.shape[0]
# number of stocks m is number of rows of returns, and also number of index weights
m = len(index_weights)
# x variables (to be found with optimization)
x = cvx.Variable(m)
#portfolio variance, in quadratic form
portfolio_variance = cvx.quad_form(x,covariance_returns)
# euclidean distance (L2 norm) between portfolio and index weights
distance_to_index = cvx.norm(x-index_weights)
#objective function
objective = cvx.Minimize(portfolio_variance + scale * distance_to_index)
#constraints: long and sum to one
constraints = [x >= 0, sum(x) == 1]
#use cvxpy to solve the objective
cvx.Problem(objective, constraints).solve()
#retrieve the weights of the optimized portfolio
x_values = x.value
return x_values
project_tests.test_get_optimal_weights(get_optimal_weights)
# -
# ## Optimized Portfolio
# Using the `get_optimal_weights` function, let's generate the optimal ETF weights without rebalanceing. We can do this by feeding in the covariance of the entire history of data. We also need to feed in a set of index weights. We'll go with the average weights of the index over time.
raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns.values, index_weights.iloc[-1])
optimal_single_rebalance_etf_weights = pd.DataFrame(
np.tile(raw_optimal_single_rebalance_etf_weights, (len(returns.index), 1)),
returns.index,
returns.columns)
# With ETF weights built, we compare it to the index. The next cell calculates the ETF returns and compare it to the index returns.
# +
optim_etf_returns = generate_weighted_returns(returns, optimal_single_rebalance_etf_weights)
optim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index')
optim_etf_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(optim_etf_returns, 1))
print('Optimized ETF Tracking Error: {}'.format(optim_etf_tracking_error))
# -
# ## Rebalance Portfolio Over Time
# The single optimized ETF portfolio used the same weights for the entire history. This might not be the optimal weights for the entire period. Another strategy is to rebalance the portfolio over the same period instead of using the same weights. `rebalance_portfolio` rebalances a portfolio.
#
# Reblance the portfolio every n number of days, which is given as `shift_size`. When rebalancing, one should look back a certain number of days of data in the past, denoted as `chunk_size`. Using this data, optoimal weights are computed using `get_optimal_weights` and `get_covariance_returns`.
# +
def rebalance_portfolio(returns, index_weights, shift_size, chunk_size):
"""
Get weights for each rebalancing of the portfolio.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
index_weights : DataFrame
Index weight for each ticker and date
shift_size : int
The number of days between each rebalance
chunk_size : int
The number of days to look in the past for rebalancing
for example (from Prateer k: Udacity Forum):
shift_size = 2 and chunk_size = 4 means that we should rebalance the portfolio every 2 days.
While rebalancing, we should look back into the past 4 days of data.
So, if we are rebalancing on Oct-17, then we need to look into the data
for the days Oct-14 through Oct-17.
The next rebalancing should be done on Oct-19 (as the shift_size is 2 days).
For the rebalacing on Oct-19, we will be looking into the data from Oct-16 through Oct-19
(as the chunk size is 4 days).
Returns
-------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
"""
assert returns.index.equals(index_weights.index)
assert returns.columns.equals(index_weights.columns)
assert shift_size > 0
assert chunk_size >= 0
all_rebalance_weights = []
for rebalance_window in range(chunk_size, returns.shape[0], shift_size):
start_idx = rebalance_window - chunk_size
local_var_returns = get_covariance_returns(returns.iloc[start_idx:rebalance_window])
local_optm_weights = get_optimal_weights(local_var_returns,
index_weights.iloc[rebalance_window-1])
all_rebalance_weights.append(local_optm_weights)
return all_rebalance_weights
project_tests.test_rebalance_portfolio(rebalance_portfolio)
# -
# Run the following cell to rebalance the portfolio using `rebalance_portfolio`.
chunk_size = 250
shift_size = 5
all_rebalance_weights = rebalance_portfolio(returns, index_weights, shift_size, chunk_size)
# ## Portfolio Turnover
# With the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. `get_portfolio_turnover` calculates the annual portfolio turnover. The following formulas will be used:
#
# $ AnnualizedTurnover =\frac{SumTotalTurnover}{NumberOfRebalanceEvents} * NumberofRebalanceEventsPerYear $
#
# $ SumTotalTurnover =\sum_{t,n}{\left | x_{t,n} - x_{t+1,n} \right |} $ Where $ x_{t,n} $ are the weights at time $ t $ for equity $ n $.
#
# $ SumTotalTurnover $ is just a different way of writing $ \sum \left | x_{t_1,n} - x_{t_2,n} \right | $
# +
def get_portfolio_turnover(all_rebalance_weights, shift_size, rebalance_count, n_trading_days_in_year=252):
"""
Calculage portfolio turnover.
Parameters
----------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
shift_size : int
The number of days between each rebalance
rebalance_count : int
Number of times the portfolio was rebalanced
n_trading_days_in_year: int
Number of trading days in a year
Returns
-------
portfolio_turnover : float
The portfolio turnover
"""
assert shift_size > 0
assert rebalance_count > 0
weights_df = pd.DataFrame(all_rebalance_weights)
delta_x = (weights_df-weights_df.shift(-1)).fillna(0).values #(xt -xt+1)
sum_total_turn_over = np.sum(np.abs(delta_x))
num_rebalance_events = shift_size * rebalance_count
portfolio_turnover = sum_total_turn_over*n_trading_days_in_year/num_rebalance_events
return portfolio_turnover
project_tests.test_get_portfolio_turnover(get_portfolio_turnover)
# -
# Run the following cell to get the portfolio turnover from `get_portfolio turnover`.
print(get_portfolio_turnover(all_rebalance_weights, shift_size, len(all_rebalance_weights) - 1))
# That's it! You've built a smart beta portfolio in part 1 and did portfolio optimization in part 2. You can now submit your project.
#
| PortfolioOptimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ElasticNet Regression (L1/L2 penalty)
# * `simi5` -- ElasticNet, Coordinate Descent
# * `simi6` -- ElasticNet, SGD
#
# +
# add path
import sys; import os; sys.path.append(os.path.realpath("../"))
# general hyperparameter optimization settings
from seasalt import (select_the_best, refit_model)
from seasalt.si import (cv_settings, scorerfun, print_scores)
from sklearn.model_selection import RandomizedSearchCV
# -
# demo datasets
from datasets.demo2 import X_train, Y_train, fold_ids, X_valid, Y_valid, meta as meta_data
#meta_data
# model implementations
#from potpourri.simi4 import model, hyper, meta # Coordinate Descent
from potpourri.simi5 import model, hyper, meta # SGD
meta
# ## Train
# +
# %%time
rscv = RandomizedSearchCV(**{'estimator': model, 'param_distributions': hyper}, **cv_settings)
rscv.fit(X = X_train, y = Y_train) # Run CV
bestparam, summary = select_the_best(rscv) # find the "best" parameters
bestmodel = refit_model(model, bestparam, X_train, Y_train) # Refit the "best" model
# +
#rscv.cv_results_
# -
# ## Evaluate
# +
print("Infer/predict on validation set")
Y_pred = bestmodel.predict(X_valid)
print("\nOut of sample score")
print(scorerfun(Y_valid, Y_pred))
print("\nOut of sample score (Other metrics)")
print_scores(Y_pred, Y_valid)
print("\nBest model parameters")
print(bestparam)
print("\nIn-sample scores and model variants (from CV)")
summary
# -
# ### Parameters
bestmodel.steps[1][1].coef_
# ### Target vs Predicted
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.figure(figsize=(6,6))
plt.scatter(y=Y_pred, x=Y_valid);
#plt.scatter(x=np.log(Y_pred), y=np.log(Y_valid));
plt.xlabel('target');
plt.ylabel('predicted');
# -
# ## Debug, Memory, Misc
#del summary
#locals()
# %whos
| nbs/simi - ElasticNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Examin the size of an environment, package by package
#
# **NOTE**: this is created mostly from:
#
# > [https://uwekorn.com/2020/09/08/trimming-down-pyarrow-conda-1-of-x.html](https://uwekorn.com/2020/09/08/trimming-down-pyarrow-conda-1-of-x.html)
# ## Import and utilities
# +
from pathlib import Path
from glob import glob
import json
import pandas as pd
import matplotlib.pyplot as plt
def get_clean_suffix(name):
"""Get the filename suffix without numeric elements"""
suffixes = [x for x in name.split(".")[1:] if not x.isnumeric()]
return (suffixes or [""])[-1]
def gather_files(environment, verbose=False):
"""Gather the list of file in an environment"""
files = []
for meta in glob(f'{environment}/conda-meta/*.json'):
with open(meta, "r") as f:
info = json.load(f)
for file in info["files"]:
try:
path = Path(f"{environment}/{file}")
if not path.is_symlink():
files.append({
"package": info["name"],
"name": file,
"size": path.stat().st_size,
"suffix": get_clean_suffix(path.name)
})
except:
if verbose:
print(f"Package: {meta} | File: {file}")
else:
pass
return pd.DataFrame(files)
# -
# ## Collect sizes
# %time fs = gather_files(Path("/opt/conda"))
fs["size_mb"] = fs["size"] / 1024 / 1024
fs.head()
# ## Explore sizes
fs["size_mb"].sum() / 1024
# + jupyter={"outputs_hidden": true}
from qgrid import show_grid
pkgs = fs.groupby("package")\
["size_mb"]\
.sum()\
.sort_values(ascending=False)\
.reset_index()
show_grid(pkgs)
# -
pkgs[pkgs["package"].str.contains("geos")]
| utils/size_explorer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Audio I/O and Pre-Processing with torchaudio
# ============================================
#
# PyTorch is an open source deep learning platform that provides a
# seamless path from research prototyping to production deployment with
# GPU support.
#
# Significant effort in solving machine learning problems goes into data
# preparation. ``torchaudio`` leverages PyTorch’s GPU support, and provides
# many tools to make data loading easy and more readable. In this
# tutorial, we will see how to load and preprocess data from a simple
# dataset. Please visit
# `Audio I/O and Pre-Processing with torchaudio <https://pytorch.org/tutorials/beginner/audio_preprocessing_tutorial.html>`__ to learn more.
#
# For this tutorial, please make sure the ``matplotlib`` package is
# installed for easier visualization.
#
#
#
# Uncomment the following line to run in Google Colab
# # !pip install torchaudio
import torch
import torchaudio
import requests
import matplotlib.pyplot as plt
# Opening a file
# -----------------
#
# ``torchaudio`` also supports loading sound files in the wav and mp3 format. We
# call waveform the resulting raw audio signal.
#
#
#
# +
url = "https://pytorch.org/tutorials/_static/img/steam-train-whistle-daniel_simon-converted-from-mp3.wav"
r = requests.get(url)
with open('steam-train-whistle-daniel_simon-converted-from-mp3.wav', 'wb') as f:
f.write(r.content)
filename = "steam-train-whistle-daniel_simon-converted-from-mp3.wav"
waveform, sample_rate = torchaudio.load(filename)
print("Shape of waveform: {}".format(waveform.size()))
print("Sample rate of waveform: {}".format(sample_rate))
plt.figure()
plt.plot(waveform.t().numpy())
# -
# When you load a file in ``torchaudio``, you can optionally specify the backend to use either
# `SoX <https://pypi.org/project/sox/>`_ or `SoundFile <https://pypi.org/project/SoundFile/>`_
# via ``torchaudio.set_audio_backend``. These backends are loaded lazily when needed.
#
# ``torchaudio`` also makes JIT compilation optional for functions, and uses ``nn.Module`` where possible.
#
#
# Transformations
# ---------------
#
# ``torchaudio`` supports a growing list of
# `transformations <https://pytorch.org/audio/stable/transforms.html>`_.
#
# - **Resample**: Resample waveform to a different sample rate.
# - **Spectrogram**: Create a spectrogram from a waveform.
# - **GriffinLim**: Compute waveform from a linear scale magnitude spectrogram using
# the Griffin-Lim transformation.
# - **ComputeDeltas**: Compute delta coefficients of a tensor, usually a spectrogram.
# - **ComplexNorm**: Compute the norm of a complex tensor.
# - **MelScale**: This turns a normal STFT into a Mel-frequency STFT,
# using a conversion matrix.
# - **AmplitudeToDB**: This turns a spectrogram from the
# power/amplitude scale to the decibel scale.
# - **MFCC**: Create the Mel-frequency cepstrum coefficients from a
# waveform.
# - **MelSpectrogram**: Create MEL Spectrograms from a waveform using the
# STFT function in PyTorch.
# - **MuLawEncoding**: Encode waveform based on mu-law companding.
# - **MuLawDecoding**: Decode mu-law encoded waveform.
# - **TimeStretch**: Stretch a spectrogram in time without modifying pitch for a given rate.
# - **FrequencyMasking**: Apply masking to a spectrogram in the frequency domain.
# - **TimeMasking**: Apply masking to a spectrogram in the time domain.
#
# Each transform supports batching: you can perform a transform on a single raw
# audio signal or spectrogram, or many of the same shape.
#
# Since all transforms are ``nn.Modules`` or ``jit.ScriptModules``, they can be
# used as part of a neural network at any point.
#
#
#
# To start, we can look at the log of the spectrogram on a log scale.
#
#
#
# +
specgram = torchaudio.transforms.Spectrogram()(waveform)
print("Shape of spectrogram: {}".format(specgram.size()))
plt.figure()
plt.imshow(specgram.log2()[0,:,:].numpy(), cmap='gray')
# -
# Or we can look at the Mel Spectrogram on a log scale.
#
#
#
# +
specgram = torchaudio.transforms.MelSpectrogram()(waveform)
print("Shape of spectrogram: {}".format(specgram.size()))
plt.figure()
p = plt.imshow(specgram.log2()[0,:,:].detach().numpy(), cmap='gray')
# -
# We can resample the waveform, one channel at a time.
#
#
#
# +
new_sample_rate = sample_rate/10
# Since Resample applies to a single channel, we resample first channel here
channel = 0
transformed = torchaudio.transforms.Resample(sample_rate, new_sample_rate)(waveform[channel,:].view(1,-1))
print("Shape of transformed waveform: {}".format(transformed.size()))
plt.figure()
plt.plot(transformed[0,:].numpy())
# -
# As another example of transformations, we can encode the signal based on
# Mu-Law enconding. But to do so, we need the signal to be between -1 and
# 1. Since the tensor is just a regular PyTorch tensor, we can apply
# standard operators on it.
#
#
#
# Let's check if the tensor is in the interval [-1,1]
print("Min of waveform: {}\nMax of waveform: {}\nMean of waveform: {}".format(waveform.min(), waveform.max(), waveform.mean()))
# Since the waveform is already between -1 and 1, we do not need to
# normalize it.
#
#
#
# +
def normalize(tensor):
# Subtract the mean, and scale to the interval [-1,1]
tensor_minusmean = tensor - tensor.mean()
return tensor_minusmean/tensor_minusmean.abs().max()
# Let's normalize to the full interval [-1,1]
# waveform = normalize(waveform)
# -
# Let’s apply encode the waveform.
#
#
#
# +
transformed = torchaudio.transforms.MuLawEncoding()(waveform)
print("Shape of transformed waveform: {}".format(transformed.size()))
plt.figure()
plt.plot(transformed[0,:].numpy())
# -
# And now decode.
#
#
#
# +
reconstructed = torchaudio.transforms.MuLawDecoding()(transformed)
print("Shape of recovered waveform: {}".format(reconstructed.size()))
plt.figure()
plt.plot(reconstructed[0,:].numpy())
# -
# We can finally compare the original waveform with its reconstructed
# version.
#
#
#
# +
# Compute median relative difference
err = ((waveform-reconstructed).abs() / waveform.abs()).median()
print("Median relative difference between original and MuLaw reconstucted signals: {:.2%}".format(err))
# -
# Functional
# ---------------
#
# The transformations seen above rely on lower level stateless functions for their computations.
# These functions are available under ``torchaudio.functional``. The complete list is available
# `here <https://pytorch.org/audio/functional.html>`_ and includes:
#
# - **istft**: Inverse short time Fourier Transform.
# - **gain**: Applies amplification or attenuation to the whole waveform.
# - **dither**: Increases the perceived dynamic range of audio stored at a
# particular bit-depth.
# - **compute_deltas**: Compute delta coefficients of a tensor.
# - **equalizer_biquad**: Design biquad peaking equalizer filter and perform filtering.
# - **lowpass_biquad**: Design biquad lowpass filter and perform filtering.
# - **highpass_biquad**:Design biquad highpass filter and perform filtering.
#
# For example, let's try the `mu_law_encoding` functional:
#
#
# +
mu_law_encoding_waveform = torchaudio.functional.mu_law_encoding(waveform, quantization_channels=256)
print("Shape of transformed waveform: {}".format(mu_law_encoding_waveform.size()))
plt.figure()
plt.plot(mu_law_encoding_waveform[0,:].numpy())
# -
# You can see how the output from ``torchaudio.functional.mu_law_encoding`` is the same as
# the output from ``torchaudio.transforms.MuLawEncoding``.
#
# Now let's experiment with a few of the other functionals and visualize their output. Taking our
# spectogram, we can compute it's deltas:
#
#
# +
computed = torchaudio.functional.compute_deltas(specgram.contiguous(), win_length=3)
print("Shape of computed deltas: {}".format(computed.shape))
plt.figure()
plt.imshow(computed.log2()[0,:,:].detach().numpy(), cmap='gray')
# -
# We can take the original waveform and apply different effects to it.
#
#
#
# +
gain_waveform = torchaudio.functional.gain(waveform, gain_db=5.0)
print("Min of gain_waveform: {}\nMax of gain_waveform: {}\nMean of gain_waveform: {}".format(gain_waveform.min(), gain_waveform.max(), gain_waveform.mean()))
dither_waveform = torchaudio.functional.dither(waveform)
print("Min of dither_waveform: {}\nMax of dither_waveform: {}\nMean of dither_waveform: {}".format(dither_waveform.min(), dither_waveform.max(), dither_waveform.mean()))
# -
# Another example of the capabilities in ``torchaudio.functional`` are applying filters to our
# waveform. Applying the lowpass biquad filter to our waveform will output a new waveform with
# the signal of the frequency modified.
#
#
# +
lowpass_waveform = torchaudio.functional.lowpass_biquad(waveform, sample_rate, cutoff_freq=3000)
print("Min of lowpass_waveform: {}\nMax of lowpass_waveform: {}\nMean of lowpass_waveform: {}".format(lowpass_waveform.min(), lowpass_waveform.max(), lowpass_waveform.mean()))
plt.figure()
plt.plot(lowpass_waveform.t().numpy())
# -
# We can also visualize a waveform with the highpass biquad filter.
#
#
#
# +
highpass_waveform = torchaudio.functional.highpass_biquad(waveform, sample_rate, cutoff_freq=2000)
print("Min of highpass_waveform: {}\nMax of highpass_waveform: {}\nMean of highpass_waveform: {}".format(highpass_waveform.min(), highpass_waveform.max(), highpass_waveform.mean()))
plt.figure()
plt.plot(highpass_waveform.t().numpy())
# -
# Migrating to torchaudio from Kaldi
# ----------------------------------
#
# Users may be familiar with
# `Kaldi <http://github.com/kaldi-asr/kaldi>`_, a toolkit for speech
# recognition. ``torchaudio`` offers compatibility with it in
# ``torchaudio.kaldi_io``. It can indeed read from kaldi scp, or ark file
# or streams with:
#
# - read_vec_int_ark
# - read_vec_flt_scp
# - read_vec_flt_arkfile/stream
# - read_mat_scp
# - read_mat_ark
#
# ``torchaudio`` provides Kaldi-compatible transforms for ``spectrogram``,
# ``fbank``, ``mfcc``, and ``resample_waveform with the benefit of GPU support, see
# `here <compliance.kaldi.html>`__ for more information.
#
#
#
# +
n_fft = 400.0
frame_length = n_fft / sample_rate * 1000.0
frame_shift = frame_length / 2.0
params = {
"channel": 0,
"dither": 0.0,
"window_type": "hanning",
"frame_length": frame_length,
"frame_shift": frame_shift,
"remove_dc_offset": False,
"round_to_power_of_two": False,
"sample_frequency": sample_rate,
}
specgram = torchaudio.compliance.kaldi.spectrogram(waveform, **params)
print("Shape of spectrogram: {}".format(specgram.size()))
plt.figure()
plt.imshow(specgram.t().numpy(), cmap='gray')
# -
# We also support computing the filterbank features from waveforms,
# matching Kaldi’s implementation.
#
#
#
# +
fbank = torchaudio.compliance.kaldi.fbank(waveform, **params)
print("Shape of fbank: {}".format(fbank.size()))
plt.figure()
plt.imshow(fbank.t().numpy(), cmap='gray')
# -
# You can create mel frequency cepstral coefficients from a raw audio signal
# This matches the input/output of Kaldi’s compute-mfcc-feats.
#
#
#
# +
mfcc = torchaudio.compliance.kaldi.mfcc(waveform, **params)
print("Shape of mfcc: {}".format(mfcc.size()))
plt.figure()
plt.imshow(mfcc.t().numpy(), cmap='gray')
# -
# Available Datasets
# -----------------
#
# If you do not want to create your own dataset to train your model, ``torchaudio`` offers a
# unified dataset interface. This interface supports lazy-loading of files to memory, download
# and extract functions, and datasets to build models.
#
# The datasets ``torchaudio`` currently supports are:
#
# - **VCTK**: Speech data uttered by 109 native speakers of English with various accents
# (`Read more here <https://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html>`_).
# - **Yesno**: Sixty recordings of one individual saying yes or no in Hebrew; each
# recording is eight words long (`Read more here <https://www.openslr.org/1/>`_).
# - **Common Voice**: An open source, multi-language dataset of voices that anyone can use
# to train speech-enabled applications (`Read more here <https://voice.mozilla.org/en/datasets>`_).
# - **LibriSpeech**: Large-scale (1000 hours) corpus of read English speech (`Read more here <http://www.openslr.org/12>`_).
#
#
#
# +
yesno_data = torchaudio.datasets.YESNO('./', download=True)
# A data point in Yesno is a tuple (waveform, sample_rate, labels) where labels is a list of integers with 1 for yes and 0 for no.
# Pick data point number 3 to see an example of the the yesno_data:
n = 3
waveform, sample_rate, labels = yesno_data[n]
print("Waveform: {}\nSample rate: {}\nLabels: {}".format(waveform, sample_rate, labels))
plt.figure()
plt.plot(waveform.t().numpy())
# -
# Now, whenever you ask for a sound file from the dataset, it is loaded in memory only when you ask for it.
# Meaning, the dataset only loads and keeps in memory the items that you want and use, saving on memory.
#
#
#
# Conclusion
# ----------
#
# We used an example raw audio signal, or waveform, to illustrate how to
# open an audio file using ``torchaudio``, and how to pre-process,
# transform, and apply functions to such waveform. We also demonstrated how
# to use familiar Kaldi functions, as well as utilize built-in datasets to
# construct our models. Given that ``torchaudio`` is built on PyTorch,
# these techniques can be used as building blocks for more advanced audio
# applications, such as speech recognition, while leveraging GPUs.
#
#
#
| PyTorch/Visual-Audio/Torchscript/audio_preprocessing_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
sns.set()
imdb_df = pd.read_csv('../data/imdb.csv', dtype = {'genre': 'category'})
imdb_df.head(3)
imdb_df.tail(3)
imdb_df.describe()
imdb_df.describe(include = 'all')
type(imdb_df)
type(imdb_df.star_rating)
imdb_df.dtypes
len(imdb_df)
imdb_df.shape
imdb_df.info(memory_usage = 'deep')
imdb_df.memory_usage(deep = True)
imdb_df.memory_usage(deep = True).sum()
imdb_df.genre.unique()
imdb_df.genre.nunique()
imdb_df.genre.value_counts(dropna = False)
imdb_df.genre.cat.categories
# +
sns.set_context('talk')
imdb_df.genre.value_counts().plot(kind = 'bar', rot = 45, figsize = (15, 4))
plt.show()
# -
pd.crosstab(imdb_df.genre, imdb_df.content_rating)
# +
sns.set_context('talk')
pd.crosstab(imdb_df.genre, imdb_df.content_rating).plot(figsize = (17, 6))
plt.show()
| pandas/reading-metadata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1><center>Box Plot</center></h1><br />
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display, HTML
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import datetime as dt
from matplotlib.dates import DateFormatter, MonthLocator, DayLocator
import seaborn as sns
# %matplotlib inline
from matplotlib.backends.backend_pdf import PdfPages
from textwrap import fill
from matplotlib.ticker import FormatStrFormatter
from operator import add
# could also use holoview: http://holoviews.org/
list_csv_file = []
for i in range(90, 120):
list_csv_file.append("Results_model_run%s.csv" % i)
list_dataframe = []
for filename in list_csv_file:
list_dataframe.append(pd.read_csv(filename))
merged_dataframes = pd.concat(list_dataframe)
merged_dataframes['Repaired eol PV modules'] = (merged_dataframes['eol - new repaired weight'] +
merged_dataframes['eol - used repaired weight']) / 1E9
merged_dataframes['Sold eol PV modules'] = (merged_dataframes['eol - new sold weight'] +
merged_dataframes['eol - used sold weight']) / 1E9
merged_dataframes['Recycled eol PV modules'] = (merged_dataframes['eol - new recycled weight'] +
merged_dataframes['eol - used recycled weight']) / 1E9
merged_dataframes['Landfilled eol PV modules'] = (merged_dataframes['eol - new landfilled weight'] +
merged_dataframes['eol - used landfilled weight']) / 1E9
merged_dataframes['Stored eol PV modules'] = (merged_dataframes['eol - new stored weight'] +
merged_dataframes['eol - used stored weight']) / 1E9
merged_dataframes['Total waste'] = (
merged_dataframes['Repaired eol PV modules'] + merged_dataframes['Sold eol PV modules'] +
merged_dataframes['Recycled eol PV modules'] + merged_dataframes['Landfilled eol PV modules'] +
merged_dataframes['Stored eol PV modules'])
merged_dataframes['New product'] = merged_dataframes['New product'] / 1E9
merged_dataframes['Used product'] = merged_dataframes['Used product'] / 1E9
merged_dataframes = merged_dataframes.loc[merged_dataframes['Year'] == 2050]
merged_dataframes.to_csv("MergedData.csv")
data_in = pd.read_csv("DataHistogram_used-new.csv")
data_in['Average fraction of used modules'] *= 100
sns.set(style="whitegrid", color_codes=True)
from pylab import rcParams
import matplotlib.ticker as mtick
from matplotlib.ticker import PercentFormatter
plt.rc('xtick', labelsize=17)
plt.rc('ytick', labelsize=17)
plt.rc('axes', labelsize=17)
rcParams['figure.figsize'] = 5, 5
g = sns.catplot(x="Reuse rate", y="Cumulative installed capacity in 2050 (GW)", hue='PV modules type',
kind="bar", height=5.5, aspect=1.15, data=data_in, legend=False,
palette=[sns.diverging_palette(220, 20, n=7)[0], sns.diverging_palette(220, 20, n=7)[-1]])
plt.legend(loc='upper left', bbox_to_anchor=(0.67, 0.92), ncol=1, fontsize=17)
ax2 = plt.twinx()
sns.scatterplot(x="Reuse rate", y="Average fraction of used modules",
data=data_in, color='maroon', marker="D", s=100)
ax2.grid(False)
ax2.yaxis.label.set_color('maroon')
ax2.tick_params(axis='y', colors='maroon')
ax2.yaxis.set_major_formatter(mtick.PercentFormatter())
#ax.set_ylabel('')
#for ax in g.axes.flat:
# ax.yaxis.set_major_formatter(mtick.PercentFormatter())
import matplotlib as mpl
for ax in g.axes.flat:
ax.get_yaxis().set_minor_locator(mpl.ticker.AutoMinorLocator(2))
ax.grid(b=True, which='minor', color='lightgrey', linewidth=0.5)
ax2.set_ylim(0, 31)
plt.savefig("figure10.2.png", bbox_inches='tight', figsize=(5.5, 3.5), dpi=500)
| Fig4-b_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Deep multimodal Two Stream Action Recognition
# + [markdown] pycharm={"name": "#%% md\n"}
# Import Deep Learning streams (Video and Pulse)
# + pycharm={"name": "#%%\n"}
import numpy as np
from streams.rgbi3d import rgbi3d
from streams.cnn_lstm import cnn_lstm
from streams.two_stream import two_stream
# + [markdown] pycharm={"name": "#%% md\n"}
# Firstly, we obtain the action list with the name and identifier that this solution is able to identify
# + pycharm={"name": "#%%\n"}
import json
with open("actions_list.json", "r") as file:
actions_list = json.loads(file.read())
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Video Action Recognition Model
# Create the video model instance with a reduced spatial resolution (112x112) to analyze 64 frames videos
# recorded at 25 FPS (which represents 2.56 seconds clips). We also load the model trained
# weights to perform action recognition.
# + pycharm={"name": "#%%\n"}
video_model = rgbi3d(input_shape=(64, 112, 112, 3),
classes=10, endpoint_logit=False,
weights="weights/rgbi3d_model.hdf5")
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 1. Load and preprocess an input video to perform action recognition
# Firstly, import the *load_video* function from the preprocessing module. Then provide a path to a video you want to
# analyze. We obtain a list of clips of 64 frames each that represent the video and a list with information of the timestamp
# of the end of each clip. This information will be used below to retrieve pulse data.
# + pycharm={"name": "#%%\n"}
from preprocessing.preprocessing import load_video
video_file_name = "20210114_152557-153300[152742.822000,152753.322000] (Subject 1).mp4"
video_clips, video_end_timestamp = load_video("data/{}".format(video_file_name),
height=112, width=112,
frames_wanted=64, step=64)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 2. Show the loaded video
# Show the first clip of the video of a person walking. Note that **matplotlib** is required to plot the images
# + pycharm={"name": "#%%\n"}
from utils.utils import show_video
show_video(video_clips[0], frames=5)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 3. Infer the activity using the optimized RGBI3D network
# Firstly, we create a function to infer the data with the DL model easily
# + pycharm={"name": "#%%\n"}
def predict_data(model, data, actions_list):
prediction = model.predict(data, batch_size=8)
prediction = prediction.mean(axis=0)
detected_action = np.argmax(prediction, axis=-1)
print("Action recognizer detected: {} - with a confidence of: {:0.2f}%".format(actions_list[str(detected_action)],
prediction[detected_action]*100))
# + [markdown] pycharm={"name": "#%% md\n"}
# We analyze the video with the Video network. Note how the Video network identifies with a 98% confidence that someone is **cleaning**
# + pycharm={"name": "#%%\n"}
predict_data(model=video_model, data=video_clips, actions_list=actions_list)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Pulses (PPG) Action Recognition Model
# Create the pulses model instance with variable timesteps to provide more flexibility. Note that it was
# originally trained using around 55 timesteps and with features of size 8 (55x8 = 440 data points). Every pulse signal was recorded
# with a 66.67Hz sampling frequency (every 15ms). Therefore, around 440 data points represent 6.6 seconds
# of the signal. We also load the model trained weights to perform action recognition.
#
# + [markdown] pycharm={"name": "#%% md\n"}
# For the pulses model, we only considered 9 actions, excluding the *no_action* class, because it makes no sense with this
# kind of data
# + pycharm={"name": "#%%\n"}
pulses_model = cnn_lstm(num_classes=9,
timesteps=None, # Variable timesteps size
features=8,
endpoint_logit=False,
weights="weights/cnn-lstm_pulse.hdf5")
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 1. Load and preprocess pulse data to perform action recognition
# Firstly, we retrieve the pulse signal depending on the retrieved video to get a synchronized signal.
# For this, the video file name provides information about the date and initial and end hour of the clip. With this
# information, we can retrieve the corresponding PPG signal of that period.
#
#
# We import with pandas the pulse signal located in the data folder. Then, we normalize the signal
# between negative 1 and 1.
# + pycharm={"name": "#%%\n"}
import pandas as pd
pulse_data = "data/labeled_pulses.csv"
pulses = pd.read_csv(pulse_data)
numerator = pulses["blood_pressure"] - min(pulses["blood_pressure"])
denominator = max(pulses["blood_pressure"]) - min(pulses["blood_pressure"])
pulses["blood_pressure"] = 2 * (numerator / denominator) - 1
# + [markdown] pycharm={"name": "#%% md\n"}
# Concretely, the video name is formatted as follows:
# ```
# date_videostarttime-videoendtime[clipstarttime,clipendtime] (Subject ID)
# eg. 20210114_152557-153300[152854.072000,152858.822000] (Subject 1)
# ```
# **Important**, timestamp values on the retrieved pandas dataframe with the following time zone: GMT +1. If you are in
# a different time zone, *Review this*.
#
# We retrieve a 6.6 seconds long signal ending at the same time each video clip finishes.
# + pycharm={"name": "#%%\n"}
from preprocessing.preprocessing import load_pulses
n_timesteps = 55
n_features = 8
pulses_clips = np.empty((video_clips.shape[0], n_timesteps, n_features))
for i in range(len(video_end_timestamp)):
pulses_clips[i] = load_pulses(video_end_timestamp[i],
pulses,
n_timesteps=n_timesteps,
n_features=n_features)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 2. Show the loaded pulse signal
# We show the pulses signal of the person from the previous video walking.
# + pycharm={"name": "#%%\n"}
from utils.utils import plot_signal
plot_signal(pulses_clips[0], "Cleaning")
# -
# We analyze the PPG signal with the Pulses network. Note how the Pulse network identifies **cleaning** as the top action with a
# confidence of 28%.
# + pycharm={"name": "#%%\n"}
predict_data(model=pulses_model, data=pulses_clips, actions_list=actions_list)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Two Stream multimodal solution
# Finally, we create the Multimodal Two Stream architecture from the RGBI3D and 1DCNN+LSTM networks. We also load the weights
# of the WeighPerClass layer to weigh the contribution of every stream with respect to each action.
# + pycharm={"name": "#%%\n"}
multimodal_ts = two_stream(rgb_model=video_model,
pulses_model=pulses_model,
weights="weights/ts_weights_per_class.npy")
# + [markdown] pycharm={"name": "#%% md\n"}
# After using the multimodal architecture, we obtained that someone is cleaning with a confidence higher than 96%.
# + pycharm={"name": "#%%\n"}
predict_data(model=multimodal_ts, data=[video_clips, pulses_clips],
actions_list=actions_list)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 1. Critical action recognition
# The development of this multimodal architecture enabled the enhancement of the recognition of critical actions.
# Identify with high confidence when the patients suffer a potentially risky situation is crucial to
# notify caregivers, reducing false alarms.
#
# Below, we analyze two examples of critical actions (*Falling down* and *Lying on the floor*).
# + pycharm={"name": "#%%\n"}
video_file_name_fall = "20210114_152557-153300[153142.821000,153146.572000] (Subject 1).mp4"
video_file_name_floor = "20210114_152557-153300[153037.572000,153042.500000] (Subject 1).mp4"
video_clips_fall, video_end_timestamp_fall = load_video("data/{}".format(video_file_name_fall),
height=112, width=112,
frames_wanted=64, step=64)
video_clips_floor, video_end_timestamp_floor = load_video("data/{}".format(video_file_name_floor),
height=112, width=112,
frames_wanted=64, step=64)
# + pycharm={"name": "#%%\n"}
pulses_clips_fall = np.empty((video_clips_fall.shape[0], n_timesteps, n_features))
for i in range(len(video_end_timestamp_fall)):
pulses_clips_fall[i] = load_pulses(video_end_timestamp_fall[i],
pulses,
n_timesteps=n_timesteps,
n_features=n_features)
pulses_clips_floor = np.empty((video_clips_floor.shape[0], n_timesteps, n_features))
for i in range(len(video_end_timestamp_floor)):
pulses_clips_floor[i] = load_pulses(video_end_timestamp_floor[i],
pulses,
n_timesteps=n_timesteps,
n_features=n_features)
# + [markdown] pycharm={"name": "#%% md\n"}
# Once the input data is loaded (video and pulses), we evaluate the retrieved information using the
# Two Stream multimodal architecture. Both activities are identified with high confidence.
# + pycharm={"name": "#%%\n"}
show_video(video_clips_fall[0], frames=5)
predict_data(model=multimodal_ts, data=[video_clips_fall, pulses_clips_fall],
actions_list=actions_list)
# + pycharm={"name": "#%%\n"}
show_video(video_clips_floor[0], frames=5)
predict_data(model=multimodal_ts, data=[video_clips_floor, pulses_clips_floor],
actions_list=actions_list)
# + [markdown] pycharm={"name": "#%% md\n"}
# Finally, note how the Pulses stream is able to recognize with relatively high confidence critical activities such as
# **lying on the floor**. Taking into account the limitations about the information that this data can
# provide when identifying actions. But also, considering the high efficiency of the stream that
# analyzes the PPG (pulses signal) data.
# + pycharm={"name": "#%%\n"}
predict_data(model=pulses_model, data=pulses_clips_floor, actions_list=actions_list)
| demo_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np # linear algebra
import seaborn as sns
sns.set(style='whitegrid')
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import tensorflow as tf
iris = pd.read_csv('F:/Work/Python/Datasets/iris.csv')
iris.shape
iris = iris[:100]
iris.shape
iris.head()
iris.Species = iris.Species.replace(to_replace=['Iris-setosa', 'Iris-versicolor'], value=[0, 1])
plt.scatter(iris[:50].SepalLengthCm, iris[:50].SepalWidthCm, label='Iris-setosa')
plt.scatter(iris[51:].SepalLengthCm, iris[51:].SepalWidthCm, label='Iris-versicolo')
plt.xlabel('SepalLength')
plt.ylabel('SepalWidth')
plt.legend(loc='best')
X = iris.drop(labels=['Id', 'Species'], axis=1).values
y = iris.Species.values
seed = 5
np.random.seed(seed)
tf.set_random_seed(seed)
train_index = np.random.choice(len(X), round(len(X) * 0.8), replace=False)
test_index = np.array(list(set(range(len(X))) - set(train_index)))
train_X = X[train_index]
train_y = y[train_index]
test_X = X[test_index]
test_y = y[test_index]
def min_max_normalized(data):
col_max = np.max(data, axis=0)
col_min = np.min(data, axis=0)
return np.divide(data - col_min, col_max - col_min)
# Normalized processing, must be placed after the data set segmentation,
# otherwise the test set will be affected by the training set
train_X = min_max_normalized(train_X)
test_X = min_max_normalized(test_X)
# Begin building the model framework
# Declare the variables that need to be learned and initialization
# There are 4 features here, A's dimension is (4, 1)
A = tf.Variable(tf.random_normal(shape=[4, 1]))
b = tf.Variable(tf.random_normal(shape=[1, 1]))
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
data = tf.placeholder(dtype=tf.float32, shape=[None, 4])
target = tf.placeholder(dtype=tf.float32, shape=[None, 1])
# Declare the model you need to learn
mod = tf.matmul(data, A) + b
# Declare loss function
# Use the sigmoid cross-entropy loss function,
# first doing a sigmoid on the model result and then using the cross-entropy loss function
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=mod, labels=target))
# Define the learning rate, batch_size etc.
learning_rate = 0.003
batch_size = 30
iter_num = 1500
# Define the optimizer
opt = tf.train.GradientDescentOptimizer(learning_rate)
# Define the goal
goal = opt.minimize(loss)
# Define the accuracy
# The default threshold is 0.5, rounded off directly
prediction = tf.round(tf.sigmoid(mod))
# Bool into float32 type
correct = tf.cast(tf.equal(prediction, target), dtype=tf.float32)
# Average
accuracy = tf.reduce_mean(correct)
# End of the definition of the model framework
# Start training model
# Define the variable that stores the result
loss_trace = []
train_acc = []
test_acc = []
for epoch in range(iter_num):
# Generate random batch index
batch_index = np.random.choice(len(train_X), size=batch_size)
batch_train_X = train_X[batch_index]
batch_train_y = np.matrix(train_y[batch_index]).T
sess.run(goal, feed_dict={data: batch_train_X, target: batch_train_y})
temp_loss = sess.run(loss, feed_dict={data: batch_train_X, target: batch_train_y})
# convert into a matrix, and the shape of the placeholder to correspond
temp_train_acc = sess.run(accuracy, feed_dict={data: train_X, target: np.matrix(train_y).T})
temp_test_acc = sess.run(accuracy, feed_dict={data: test_X, target: np.matrix(test_y).T})
# recode the result
loss_trace.append(temp_loss)
train_acc.append(temp_train_acc)
test_acc.append(temp_test_acc)
# output
if (epoch + 1) % 300 == 0:
print('epoch: {:4d} loss: {:5f} train_acc: {:5f} test_acc: {:5f}'.format(epoch + 1, temp_loss,
temp_train_acc, temp_test_acc))
# Visualization of the results
# loss function
plt.plot(loss_trace)
plt.title('Cross Entropy Loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
# accuracy
plt.plot(train_acc, 'b-', label='train accuracy')
plt.plot(test_acc, 'k-', label='test accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Train and Test Accuracy')
plt.legend(loc='best')
plt.show()
| Capsule Networks - Learning/Tensorflow Logistic Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Test your knowledge.**
#
# **Answer the following questions**
#
# Type down your answers then save it and send it to your instructor.
#
# ## LIMIT YOUR ANSWERS TO ONE CELL.
# ## DO NOT RUN (SHIFT + ENTER) MARKDOWN CELLS.
# In your own words, describe these data types.
# Integers:
# Floating Point:
# Strings:
# Lists:
# Tuples:
# Dictionaries:
# ### Numbers
# Write an equation that uses multiplication, division, an exponent, addition,
# and subtraction that is equal to 100.25.
#
# Hint: This is just to test your memory of the basic arithmetic commands, work backwards from 100.25
# Multiplication:
# Divison:
# Exponent:
# Addition:
# Subtraction:
# What would you use to find a number’s square root, as well as its square?
# Answer (square root):
# Answer (square):
# ### Strings
# Given the string 'hello' give an index command that returns 'e'. Enter your code in the cell below:
# +
s = 'hello'
# Print out 'e' using indexing
# your code below
# -
# Reverse the string 'hello' using slicing:
# Given the string hello, give two methods of producing the letter 'o' using indexing.
# +
s ='hello'
# Print out the 'o'
# Method 1:
# -
# Method 2:
# ### Lists
# Build this list [0,0,0] two separate ways.
# Method 1:
# Method 2:
# Reassign 'hello' in this nested list to say 'goodbye' instead:
list3 = [1,2,[3,4,'hello']]
# your code below
# Sort the list below:
list4 = [5,3,4,6,1]
# your code below
# ### Dictionaries
#
# Using keys and indexing, grab the 'hello' from the following dictionaries:
d = {'simple_key':'hello'}
# Grab 'hello'
d = {'k1':{'k2':'hello'}}
# Grab 'hello'
d = {'k1':[{'nest_key':['this is deep',['hello']]}]}
# Grab hello
d = {'k1':[1,2,{'k2':['this is tricky',{'tough':[1,2,['hello']]}]}]}
# Grab hello
# Can you sort a dictionary? Why or why not?
# Answer:
# ### Tuples:
#
# What is the major difference between tuples and lists?
# Answer:
# In one sentence, how do you create a tuple?
# Answer:
# ### Sets
#
# In one sentence, what is unique about a set?
# Answer:
# Use a set to find the unique values of the list below:
list5 = [1,2,2,33,4,4,11,22,3,3,2]
# dont forget to limit your answer to one cell
# your code below
# ### Booleans
#
# What will be the resulting Boolean of the following pieces of code (answer first then you may check by typing it in!)
# 2 > 3
# Answer:
# 3 <= 2
# Answer:
# 3 == 2.0
# Answer:
# 3.0 == 3
# Answer:
# 4**0.5 != 2
# Answer:
| quizzes/long_quiz_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('flymazerl')
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import random
import numpy as np
import matplotlib.pyplot as plt
import gym
from collections import namedtuple, deque
from FlYMazeRL.agents.classical import CQLearner_acceptreject, CQLearner_esoftmax
from FlYMazeRL.gym.environment import ymaze_static
from FlYMazeRL.utils.generators import generate_params_from_fits
from FlYMazeRL.utils.visualization import draw_schedule
# Define a transition (state, action, next state, reward) to store in the replay memory as a named tuple
Transition = namedtuple("Transition", ["state", "action", "next_state", "reward"])
# Define a replay memory buffer to store the transitions in a circular queue and sample from it
class ReplayMemory(object):
def __init__(self, capacity):
self.memory = deque([], maxlen=capacity)
def push(self, *args):
"""Saves a transition."""
self.memory.append(Transition(*args))
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
# Define a simple network that will be used to learn the Q-function
class DQN(nn.Module):
def __init__(self, input_size, hidden_layer_sizes, output_size):
super(DQN, self).__init__()
self.layers = nn.ModuleList()
self.layers.append(nn.Linear(input_size, hidden_layer_sizes[0]))
for i in range(len(hidden_layer_sizes) - 1):
self.layers.append(nn.Linear(hidden_layer_sizes[i], hidden_layer_sizes[i+1]))
self.layers.append(nn.Linear(hidden_layer_sizes[-1], output_size))
def forward(self, x):
for i in range(len(self.layers) - 1):
x = torch.relu(self.layers[i](x))
return self.layers[-1](x)
# Define a DQN agent that will learn the Q-function with an epsilon-greedy policy
class DQNAgent(object):
def __init__(self, state_size, action_size, hidden_layer_sizes, gamma, epsilon, epsilon_decay, epsilon_min, learning_rate, batch_size, memory_capacity):
self.state_size = state_size
self.action_size = action_size
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.learning_rate = learning_rate
self.batch_size = batch_size
self.memory = ReplayMemory(memory_capacity)
self.policy_net = DQN(state_size, hidden_layer_sizes, action_size)
self.target_net = DQN(state_size, hidden_layer_sizes, action_size)
self.loss_func = nn.MSELoss()
self.optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=learning_rate)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
def select_action(self, state):
if random.random() < self.epsilon:
return random.randint(0, self.action_size - 1)
else:
state = state.float().unsqueeze(0)
return self.policy_net(state).max(1)[1].view(1, 1)
def update_epsilon(self):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def update_memory(self, state, action, next_state, reward):
# convert everything to tensors before adding to the memory
state = state.float().unsqueeze(0)
next_state = next_state.float().unsqueeze(0)
action = torch.tensor([action])
reward = torch.tensor([reward])
self.memory.push(state, action, next_state, reward)
def update_model(self):
if len(self.memory) < self.batch_size:
return
transitions = self.memory.sample(self.batch_size)
batch = Transition(*zip(*transitions))
non_final_mask = 1 - torch.tensor(tuple(map(lambda s: s is None, batch.next_state)), dtype=torch.uint8)
non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.stack(batch.action)
reward_batch = torch.stack(batch.reward).float()
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
state_action_values = self.policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
next_state_values = torch.zeros(self.batch_size, device=torch.device('cpu'))
next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * self.gamma) + reward_batch
# Compute loss
loss = self.loss_func(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def update_target_model(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
def load_model(self, path):
self.policy_net.load_state_dict(torch.load(path))
self.policy_net.eval()
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
def save_model(self, path):
torch.save(self.policy_net.state_dict(), path)
# create an environment using an existing learner model
class AdversaryEnvironment(gym.Env):
def __init__(self,n_trials, learner, learner_state_size, integration_time,):
self.learner = learner
self.learner_state_size = learner_state_size
self.n_trials = n_trials
self.trial_number = 0
self.action_space = gym.spaces.Discrete(4)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(learner_state_size,), dtype=np.float32)
self.done = False
self.learner_state = 0
self.state = torch.zeros(learner_state_size)
self.integration_time = integration_time
self.learner_actions = [0] * (integration_time - 1)
def reset(self):
self.trial_number = 0
self.done = False
self.learner_state = 0
self.state = torch.zeros(self.learner_state_size)
self.learner_actions = [0] * (self.integration_time - 1)
self.learner.env.reset()
self.learner.reset_variables()
self.learner.reset_recorder()
return self.state
def adv_action_to_learner_reward(self, action):
if action == 0:
reward = [0,0]
elif action == 1:
reward = [0,1]
elif action == 2:
reward = [1,0]
elif action == 3:
reward = [1,1]
return reward
def step(self, action):
learner_reward = self.adv_action_to_learner_reward(action)
# update learner environment with learner reward
if self.trial_number < self.n_trials-1:
self.learner.env.schedule[self.trial_number+1] = learner_reward
# take learner step
self.learner_state,self.done = self.learner.trial_step(self.learner_state)
# get learner action
learner_action = self.learner.action_history[self.trial_number]
# transform to -1 to 1
learner_action = 2*learner_action - 1
# update learner actions
self.learner_actions.append(learner_action)
self.learner_actions = self.learner_actions[1:]
# create adversary state using action and q-values from learner
self.state[0] = np.mean(self.learner_actions)
self.state[1] = self.learner.q_history[self.trial_number][0]
self.state[2] = self.learner.q_history[self.trial_number][1]
# # get learner policy
# policy = self.learner.vectorizedActionProbabilities(self.learner.alpha, self.learner.gamma, self.learner.weight, self.learner.intercept, self.action_history, self.reward_history)
# update trial number
self.trial_number += 1
# get adversary reward
adversary_reward = np.mean(self.learner_actions) #1 if learner_action == 1 else 0
# return adversary state, adversary reward, done, and info
return self.state, adversary_reward, self.done, {}
# test the agent on a simple task
def test_agent(agent, env):
state = env.reset()
total_reward = 0
while True:
action = agent.select_action(state)
next_state, reward, done, _ = env.step(action)
agent.update_memory(state, action, next_state, reward)
state = next_state
total_reward += reward
if done:
break
return total_reward
# train the agent on a simple task
def train_agent(agent, env, n_episodes):
adv_reward_history = []
lrn_reward_history = []
for episode in range(n_episodes):
state = env.reset()
total_reward = 0
while True:
action = agent.select_action(state)
next_state, reward, done, _ = env.step(action)
agent.update_memory(state, action, next_state, reward)
if len(agent.memory) > agent.batch_size:
agent.update_model()
agent.update_target_model()
state = next_state
total_reward += reward
if done:
break
adv_reward_history.append(total_reward)
lrn_reward_history.append(np.sum(env.learner.reward_history))
if episode % 100 == 0:
draw_schedule(env.learner.env.schedule,[env.learner.action_history])
if episode > 0:
# plot the reward history and running average
plt.plot(np.arange(len(adv_reward_history)), adv_reward_history, 'r-', label='adversary reward')
plt.plot(np.arange(len(lrn_reward_history)), lrn_reward_history, 'b-', label='learner reward')
if episode>100:
plt.plot(np.arange(len(adv_reward_history)), np.convolve(adv_reward_history, np.ones(100)/100, mode='full')[:-99], 'k--', label='running average (adversary)')
plt.plot(np.arange(len(lrn_reward_history)), np.convolve(lrn_reward_history, np.ones(100)/100, mode='full')[:-99], 'k--', label='running average (learner)')
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.legend()
plt.box(False)
plt.show()
print("Episode: {}".format(episode)+"\tTotal Reward: {:0.3f}".format(total_reward)+"\tLearner Reward: {:0.0f}".format(np.sum(env.learner.reward_history)))
# define world variables
n_trials = 100
n_episodes = 1000
# create empty ymaze
ymaze = ymaze_static(n_trials,0)
# schedule = generate_random_schedule_with_blocks(100,[0],[1])
# env = ymaze_static(100,schedule=schedule)
# load params from fit
params, policyparams = generate_params_from_fits(agentClass=CQLearner_esoftmax,n_samples=1,sample_from_population=False)
# params = {'alpha':0.1, 'gamma':0.6}
# policyparams = {'weight':2,'intercept':0}
# create learner
learner = CQLearner_esoftmax(ymaze,params,policyparams,True)
# create adversary environment
env = AdversaryEnvironment(n_trials, learner, 3, 10)
# create agent
agent = DQNAgent(3,4,[128,128,128],0.9,0.10,0.5,0.05,1e-2,512,100000)
# train agent
train_agent(agent, env, n_episodes)
# -
params, policyparams = generate_params_from_fits(agentClass=CQLearner_acceptreject,n_samples=1,sample_from_population=False)
# +
from FlYMazeRL.utils.evaluation import get_schedule_histories
from FlYMazeRL.utils.generators import generate_random_schedule_with_blocks
schedule = generate_random_schedule_with_blocks(100,[0],[1])
env = ymaze_static(100,schedule=schedule)
draw_schedule(schedule,get_schedule_histories(env,CQLearner_acceptreject,1,params,policyparams))
# -
| analysis/notebooks/adversarial_network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda-hq
# language: python
# name: conda-hq
# ---
# +
import glob
from os import path
import os
import sys
path_ = path.abspath('../scripts/')
if path_ not in sys.path:
sys.path.insert(0, path_)
import pickle
import astropy.coordinates as coord
from astropy.constants import G
from astropy.table import Table
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
from tqdm import tqdm
from schwimmbad import MultiPool
from hq.config import HQ_CACHE_PATH, config_to_alldata
from hq.plot import plot_two_panel, plot_phase_fold
from hq.data import get_rvdata
from hq.physics_helpers import period_at_surface, stellar_radius
from hq.log import logger
from helpers import get_metadata, get_rg_mask
from model_z import Model, lntruncnorm
from run_sampler import (logg_bincenters, teff_bincenters, mh_bincenters,
logg_binsize)
# -
cache_path = path.abspath('../cache/')
plot_path = path.abspath('../plots/')
# Load all data:
metadata = get_metadata()
rg_mask = get_rg_mask(metadata['TEFF'], metadata['LOGG'])
metadata = metadata[rg_mask]
from os import path
from astropy.io import fits
def get_z_samples(apogee_ids, n_samples=256):
samples_path = path.join(HQ_CACHE_PATH, 'dr16/samples')
z_samples = np.full((len(apogee_ids), n_samples), np.nan)
for n, apogee_id in enumerate(apogee_ids):
filename = path.join(samples_path, apogee_id[:4],
'{}.fits.gz'.format(apogee_id))
t = fits.getdata(filename)
K = min(n_samples, len(t))
z_samples[n, :K] = np.log10(t['P'][:K])
return z_samples
# +
for i, ctr in enumerate(logg_bincenters[8:9]):
l = ctr - logg_binsize / 2
r = ctr + logg_binsize / 2
print(l, r)
pixel_mask = ((metadata['LOGG'] > l) & (metadata['LOGG'] <= r))
# Load samples for this bin:
# logger.debug("{} {}: Loading samples".format(name, i))
z_samples = get_z_samples(metadata['APOGEE_ID'][pixel_mask])
# # Run
# with MultiPool() as pool:
# run_pixel(name, i, ez_samples, '/dev/null', '/dev/null', pool,
# nwalkers=80)
# +
from scipy.optimize import minimize
import emcee
import pickle
def run_pixel(name, i, z_samples, cache_path, plot_path, pool,
nwalkers=80, progress=False, overwrite=False):
min_filename = path.join(cache_path, '{}_{:02d}_res.npy'.format(name, i))
emcee_filename = path.join(cache_path,
'{}_{:02d}_emcee.pkl'.format(name, i))
# Create a model instance so we can evaluate likelihood, etc.
nparams = 2
mod = Model(z_samples)
if not path.exists(min_filename) and not path.exists(emcee_filename):
# Initial parameters for optimization
p0 = mod.pack_pars({'muz': np.log10(10**5.), 'lnsigz': np.log(4.)})
logger.debug("{} {}: Starting minimize".format(name, i))
res = minimize(lambda *args: -mod(*args), x0=p0, method='powell')
min_x = res.x
np.save(min_filename, min_x)
# emcee run:
logger.debug("{} {}: Done with minimize".format(name, i))
if not path.exists(emcee_filename) or overwrite:
min_x = np.load(min_filename)
# initialization for all walkers
all_p0 = emcee.utils.sample_ball(min_x, [1e-3] * nparams,
size=nwalkers)
print("HERE")
sampler = emcee.EnsembleSampler(nwalkers=nwalkers,
ndim=nparams,
log_prob_fn=mod,
pool=pool)
pos, *_ = sampler.run_mcmc(all_p0, 512, progress=progress)
sampler.pool = None
with open(emcee_filename, "wb") as f:
pickle.dump(sampler, f)
else:
with open(emcee_filename, "rb") as f:
sampler = pickle.load(f)
# Plot walker traces:
fig, axes = plt.subplots(nparams, 1, figsize=(8, 4*nparams),
sharex=True)
for k in range(nparams):
for walker in sampler.chain[..., k]:
axes[k].plot(walker, marker='',
drawstyle='steps-mid', alpha=0.4, color='k')
axes[0].set_title(str(i))
fig.tight_layout()
fig.savefig(path.join(plot_path, '{}_{:02d}_trace.png'.format(name, i)),
dpi=250)
return fig, sampler
# -
# Run
with MultiPool(processes=4) as pool:
_, sampler = run_pixel('test', i, z_samples,
cache_path, plot_path,
pool, nwalkers=80, progress=True,
overwrite=True)
| notebooks/Infer-P-hierarchical.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sns
import csv
data = pd.read_csv('upwork_Data.csv', delimiter = ',')
data_1 = pd.read_csv('upwork_Data_1.csv', delimiter = ',')
data_1.tail()
"https://erikrood.com/Python_References/create_new_col_pandas.html"
'Creating variable'
# +
# from datetime import datetime
# datetime_object = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')
# -
data["New_Date"] = pd.to_datetime(data["Date"])
print(type(data["New_Date"]))
data['Month'] = data["New_Date"].dt.month
data['Year'] = data["New_Date"].dt.year
# https://stackoverflow.com/questions/28133018/convert-pandas-series-to-datetime-in-a-dataframe
data = data.drop(['Balance','PO'], axis=1)
data = data.drop(['Amount'], axis=1)
data.head()
sns.barplot(x="Year", y="Amount in local currency", data=data, estimator=sum)
data.describe()
# Group multiple variable with target statistic
df = data.groupby(['Year', 'Month','Team'],as_index=False)['Amount in local currency'].sum()
df
# This method is for gaining result by single variable. "as_index=False is for table format"
data.groupby('Year', as_index=False).agg({"Amount in local currency":sum})
sns.barplot(x="Year", y="Amount in local currency", data=df, estimator=sum)
# +
df2 = pd.pivot_table(data_1,index=["Country"],values=['Life_Time_Earning'],aggfunc=np.sum)
df2.head()
# -
df = data.groupby(['Year', 'Month','Team'],as_index=False)['Amount in local currency'].count()
df.head()
# +
# This syntax below will export df to csv and save it to desktop
df.to_csv('d:\\temp.csv', sep='\t')
# +
import pandas as pd
import os
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
gauth = GoogleAuth()
# gauth.LocalWebserverAuth()
drive = GoogleDrive(gauth)
# -
df.to_csv("d:\\Upwork.csv")
with open("d:\\Upwork.csv", "r") as f:
fn = os.path.basename(f.name)
file_drive = drive.CreateFile({'title': fn})
file_drive.SetContentString(f.read())
file_drive.Upload()
print("The file:has been uploaded")
# +
# Upload file to folder
# file = drive.CreateFile({"parents": [{"kind": "https:\\drive.google.com\\drive\\folders\\1rt57nZS8xBQC4GNeQN3XzGR16M7x29KF", "id": folderid}]})
# file.SetContentFile(https:\\drive.google.com\\drive\\folders\\1rt57nZS8xBQC4GNeQN3XzGR16M7x29KF) # see carefully this is not SetContentString(f.read()) which is other method/function
# file.Upload({'convert': True}) # Convert csv to google sheet.
# -
| Upwork_Data Analysis_Excercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 1: Logistic Regression
# Welcome to week one of this specialization. You will learn about logistic regression. Concretely, you will be implementing logistic regression for sentiment analysis on tweets. Given a tweet, you will decide if it has a positive sentiment or a negative one. Specifically you will:
#
# * Learn how to extract features for logistic regression given some text
# * Implement logistic regression from scratch
# * Apply logistic regression on a natural language processing task
# * Test using your logistic regression
# * Perform error analysis
#
# We will be using a data set of tweets. Hopefully you will get more than 99% accuracy.
# Run the cell below to load in the packages.
# ## Import functions and data
# run this cell to import nltk
import nltk
from os import getcwd
# ### Imported functions
#
# Download the data needed for this assignment. Check out the [documentation for the twitter_samples dataset](http://www.nltk.org/howto/twitter.html).
#
# * twitter_samples: if you're running this notebook on your local computer, you will need to download it using:
# ```Python
# nltk.download('twitter_samples')
# ```
#
# * stopwords: if you're running this notebook on your local computer, you will need to download it using:
# ```python
# nltk.download('stopwords')
# ```
#
# #### Import some helper functions that we provided in the utils.py file:
# * `process_tweet()`: cleans the text, tokenizes it into separate words, removes stopwords, and converts words to stems.
# * `build_freqs()`: this counts how often a word in the 'corpus' (the entire set of tweets) was associated with a positive label '1' or a negative label '0', then builds the `freqs` dictionary, where each key is a (word,label) tuple, and the value is the count of its frequency within the corpus of tweets.
# +
# add folder, tmp2, from our local workspace containing pre-downloaded corpora files to nltk's data path
# this enables importing of these files without downloading it again when we refresh our workspace
filePath = f"{getcwd()}/../tmp2/"
nltk.data.path.append(filePath)
# +
import numpy as np
import pandas as pd
from nltk.corpus import twitter_samples
from utils import process_tweet, build_freqs
# -
# ### Prepare the data
# * The `twitter_samples` contains subsets of 5,000 positive tweets, 5,000 negative tweets, and the full set of 10,000 tweets.
# * If you used all three datasets, we would introduce duplicates of the positive tweets and negative tweets.
# * You will select just the five thousand positive tweets and five thousand negative tweets.
# select the set of positive and negative tweets
all_positive_tweets = twitter_samples.strings('positive_tweets.json')
all_negative_tweets = twitter_samples.strings('negative_tweets.json')
len(all_positive_tweets)
# * Train test split: 20% will be in the test set, and 80% in the training set.
#
# +
# split the data into two pieces, one for training and one for testing (validation set)
test_pos = all_positive_tweets[4000:]
train_pos = all_positive_tweets[:4000]
test_neg = all_negative_tweets[4000:]
train_neg = all_negative_tweets[:4000]
train_x = train_pos + train_neg
test_x = test_pos + test_neg
# -
# * Create the numpy array of positive labels and negative labels.
# combine positive and negative labels
train_y = np.append(np.ones((len(train_pos), 1)), np.zeros((len(train_neg), 1)), axis=0)
test_y = np.append(np.ones((len(test_pos), 1)), np.zeros((len(test_neg), 1)), axis=0)
# Print the shape train and test sets
print("train_y.shape = " + str(train_y.shape))
print("test_y.shape = " + str(test_y.shape))
# * Create the frequency dictionary using the imported `build_freqs()` function.
# * We highly recommend that you open `utils.py` and read the `build_freqs()` function to understand what it is doing.
# * To view the file directory, go to the menu and click File->Open.
#
# ```Python
# for y,tweet in zip(ys, tweets):
# for word in process_tweet(tweet):
# pair = (word, y)
# if pair in freqs:
# freqs[pair] += 1
# else:
# freqs[pair] = 1
# ```
# * Notice how the outer for loop goes through each tweet, and the inner for loop steps through each word in a tweet.
# * The `freqs` dictionary is the frequency dictionary that's being built.
# * The key is the tuple (word, label), such as ("happy",1) or ("happy",0). The value stored for each key is the count of how many times the word "happy" was associated with a positive label, or how many times "happy" was associated with a negative label.
# +
# create frequency dictionary
freqs = build_freqs(train_x, train_y)
# check the output
print("type(freqs) = " + str(type(freqs)))
print("len(freqs) = " + str(len(freqs.keys())))
# -
# #### Expected output
# ```
# type(freqs) = <class 'dict'>
# len(freqs) = 11346
# ```
# ### Process tweet
# The given function `process_tweet()` tokenizes the tweet into individual words, removes stop words and applies stemming.
# test the function below
print('This is an example of a positive tweet: \n', train_x[0])
print('\nThis is an example of the processed version of the tweet: \n', process_tweet(train_x[0]))
# #### Expected output
# ```
# This is an example of a positive tweet:
# #FollowFriday @France_Inte @PKuchly57 @Milipol_Paris for being top engaged members in my community this week :)
#
# This is an example of the processes version:
# ['followfriday', 'top', 'engag', 'member', 'commun', 'week', ':)']
# ```
# # Part 1: Logistic regression
#
#
# ### Part 1.1: Sigmoid
# You will learn to use logistic regression for text classification.
# * The sigmoid function is defined as:
#
# $$ h(z) = \frac{1}{1+\exp^{-z}} \tag{1}$$
#
# It maps the input 'z' to a value that ranges between 0 and 1, and so it can be treated as a probability.
#
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='../tmp2/sigmoid_plot.jpg' alt="alternate text" width="width" height="height" style="width:300px;height:200px;" /> Figure 1 </div>
# #### Instructions: Implement the sigmoid function
# * You will want this function to work if z is a scalar as well as if it is an array.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li><a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.exp.html" > numpy.exp </a> </li>
#
# </ul>
# </p>
#
#
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def sigmoid(z):
'''
Input:
z: is the input (can be a scalar or an array)
Output:
h: the sigmoid of z
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# calculate the sigmoid of z
h = 1/(1+np.exp(-z))
### END CODE HERE ###
return h
# +
# Testing your function
if (sigmoid(0) == 0.5):
print('SUCCESS!')
else:
print('Oops!')
if (sigmoid(4.92) == 0.9927537604041685):
print('CORRECT!')
else:
print('Oops again!')
# -
# ### Logistic regression: regression and a sigmoid
#
# Logistic regression takes a regular linear regression, and applies a sigmoid to the output of the linear regression.
#
# Regression:
# $$z = \theta_0 x_0 + \theta_1 x_1 + \theta_2 x_2 + ... \theta_N x_N$$
# Note that the $\theta$ values are "weights". If you took the Deep Learning Specialization, we referred to the weights with the `w` vector. In this course, we're using a different variable $\theta$ to refer to the weights.
#
# Logistic regression
# $$ h(z) = \frac{1}{1+\exp^{-z}}$$
# $$z = \theta_0 x_0 + \theta_1 x_1 + \theta_2 x_2 + ... \theta_N x_N$$
# We will refer to 'z' as the 'logits'.
# ### Part 1.2 Cost function and Gradient
#
# The cost function used for logistic regression is the average of the log loss across all training examples:
#
# $$J(\theta) = -\frac{1}{m} \sum_{i=1}^m y^{(i)}\log (h(z(\theta)^{(i)})) + (1-y^{(i)})\log (1-h(z(\theta)^{(i)}))\tag{5} $$
# * $m$ is the number of training examples
# * $y^{(i)}$ is the actual label of the i-th training example.
# * $h(z(\theta)^{(i)})$ is the model's prediction for the i-th training example.
#
# The loss function for a single training example is
# $$ Loss = -1 \times \left( y^{(i)}\log (h(z(\theta)^{(i)})) + (1-y^{(i)})\log (1-h(z(\theta)^{(i)})) \right)$$
#
# * All the $h$ values are between 0 and 1, so the logs will be negative. That is the reason for the factor of -1 applied to the sum of the two loss terms.
# * Note that when the model predicts 1 ($h(z(\theta)) = 1$) and the label $y$ is also 1, the loss for that training example is 0.
# * Similarly, when the model predicts 0 ($h(z(\theta)) = 0$) and the actual label is also 0, the loss for that training example is 0.
# * However, when the model prediction is close to 1 ($h(z(\theta)) = 0.9999$) and the label is 0, the second term of the log loss becomes a large negative number, which is then multiplied by the overall factor of -1 to convert it to a positive loss value. $-1 \times (1 - 0) \times log(1 - 0.9999) \approx 9.2$ The closer the model prediction gets to 1, the larger the loss.
# verify that when the model predicts close to 1, but the actual label is 0, the loss is a large positive value
-1 * (1 - 0) * np.log(1 - 0.9999) # loss is about 9.2
# * Likewise, if the model predicts close to 0 ($h(z) = 0.0001$) but the actual label is 1, the first term in the loss function becomes a large number: $-1 \times log(0.0001) \approx 9.2$. The closer the prediction is to zero, the larger the loss.
# verify that when the model predicts close to 0 but the actual label is 1, the loss is a large positive value
-1 * np.log(0.0001) # loss is about 9.2
# #### Update the weights
#
# To update your weight vector $\theta$, you will apply gradient descent to iteratively improve your model's predictions.
# The gradient of the cost function $J$ with respect to one of the weights $\theta_j$ is:
#
# $$\nabla_{\theta_j}J(\theta) = \frac{1}{m} \sum_{i=1}^m(h^{(i)}-y^{(i)})x_j \tag{5}$$
# * 'i' is the index across all 'm' training examples.
# * 'j' is the index of the weight $\theta_j$, so $x_j$ is the feature associated with weight $\theta_j$
#
# * To update the weight $\theta_j$, we adjust it by subtracting a fraction of the gradient determined by $\alpha$:
# $$\theta_j = \theta_j - \alpha \times \nabla_{\theta_j}J(\theta) $$
# * The learning rate $\alpha$ is a value that we choose to control how big a single update will be.
#
# ## Instructions: Implement gradient descent function
# * The number of iterations `num_iters` is the number of times that you'll use the entire training set.
# * For each iteration, you'll calculate the cost function using all training examples (there are `m` training examples), and for all features.
# * Instead of updating a single weight $\theta_i$ at a time, we can update all the weights in the column vector:
# $$\mathbf{\theta} = \begin{pmatrix}
# \theta_0
# \\
# \theta_1
# \\
# \theta_2
# \\
# \vdots
# \\
# \theta_n
# \end{pmatrix}$$
# * $\mathbf{\theta}$ has dimensions (n+1, 1), where 'n' is the number of features, and there is one more element for the bias term $\theta_0$ (note that the corresponding feature value $\mathbf{x_0}$ is 1).
# * The 'logits', 'z', are calculated by multiplying the feature matrix 'x' with the weight vector 'theta'. $z = \mathbf{x}\mathbf{\theta}$
# * $\mathbf{x}$ has dimensions (m, n+1)
# * $\mathbf{\theta}$: has dimensions (n+1, 1)
# * $\mathbf{z}$: has dimensions (m, 1)
# * The prediction 'h', is calculated by applying the sigmoid to each element in 'z': $h(z) = sigmoid(z)$, and has dimensions (m,1).
# * The cost function $J$ is calculated by taking the dot product of the vectors 'y' and 'log(h)'. Since both 'y' and 'h' are column vectors (m,1), transpose the vector to the left, so that matrix multiplication of a row vector with column vector performs the dot product.
# $$J = \frac{-1}{m} \times \left(\mathbf{y}^T \cdot log(\mathbf{h}) + \mathbf{(1-y)}^T \cdot log(\mathbf{1-h}) \right)$$
# * The update of theta is also vectorized. Because the dimensions of $\mathbf{x}$ are (m, n+1), and both $\mathbf{h}$ and $\mathbf{y}$ are (m, 1), we need to transpose the $\mathbf{x}$ and place it on the left in order to perform matrix multiplication, which then yields the (n+1, 1) answer we need:
# $$\mathbf{\theta} = \mathbf{\theta} - \frac{\alpha}{m} \times \left( \mathbf{x}^T \cdot \left( \mathbf{h-y} \right) \right)$$
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>use np.dot for matrix multiplication.</li>
# <li>To ensure that the fraction -1/m is a decimal value, cast either the numerator or denominator (or both), like `float(1)`, or write `1.` for the float version of 1. </li>
# </ul>
# </p>
#
#
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def gradientDescent(x, y, theta, alpha, num_iters):
'''
Input:
x: matrix of features which is (m,n+1)
y: corresponding labels of the input matrix x, dimensions (m,1)
theta: weight vector of dimension (n+1,1)
alpha: learning rate
num_iters: number of iterations you want to train your model for
Output:
J: the final cost
theta: your final weight vector
Hint: you might want to print the cost to make sure that it is going down.
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# get 'm', the number of rows in matrix x
m = x.shape[0]
for i in range(0, num_iters):
# get z, the dot product of x and theta
z = np.dot(x, theta)
# get the sigmoid of z
h = sigmoid(z)
# calculate the cost function
J = -(1/m)*(np.dot(y.T, np.log(h))+np.dot((1-y).T, np.log(1-h)))
# update the weights theta
theta = theta - (alpha/m)*(np.dot(x.T, h-y))
### END CODE HERE ###
J = float(J)
return J, theta
# +
# Check the function
# Construct a synthetic test case using numpy PRNG functions
np.random.seed(1)
# X input is 10 x 3 with ones for the bias terms
tmp_X = np.append(np.ones((10, 1)), np.random.rand(10, 2) * 2000, axis=1)
# Y Labels are 10 x 1
tmp_Y = (np.random.rand(10, 1) > 0.35).astype(float)
# Apply gradient descent
tmp_J, tmp_theta = gradientDescent(tmp_X, tmp_Y, np.zeros((3, 1)), 1e-8, 700)
print(f"The cost after training is {tmp_J:.8f}.")
print(f"The resulting vector of weights is {[round(t, 8) for t in np.squeeze(tmp_theta)]}")
# -
# #### Expected output
# ```
# The cost after training is 0.67094970.
# The resulting vector of weights is [4.1e-07, 0.00035658, 7.309e-05]
# ```
# ## Part 2: Extracting the features
#
# * Given a list of tweets, extract the features and store them in a matrix. You will extract two features.
# * The first feature is the number of positive words in a tweet.
# * The second feature is the number of negative words in a tweet.
# * Then train your logistic regression classifier on these features.
# * Test the classifier on a validation set.
#
# ### Instructions: Implement the extract_features function.
# * This function takes in a single tweet.
# * Process the tweet using the imported `process_tweet()` function and save the list of tweet words.
# * Loop through each word in the list of processed words
# * For each word, check the `freqs` dictionary for the count when that word has a positive '1' label. (Check for the key (word, 1.0)
# * Do the same for the count for when the word is associated with the negative label '0'. (Check for the key (word, 0.0).)
#
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>Make sure you handle cases when the (word, label) key is not found in the dictionary. </li>
# <li> Search the web for hints about using the `.get()` method of a Python dictionary. Here is an <a href="https://www.programiz.com/python-programming/methods/dictionary/get" > example </a> </li>
# </ul>
# </p>
#
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def extract_features(tweet, freqs):
'''
Input:
tweet: a list of words for one tweet
freqs: a dictionary corresponding to the frequencies of each tuple (word, label)
Output:
x: a feature vector of dimension (1,3)
'''
# process_tweet tokenizes, stems, and removes stopwords
word_l = process_tweet(tweet)
# 3 elements in the form of a 1 x 3 vector
x = np.zeros((1, 3))
#bias term is set to 1
x[0,0] = 1
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# loop through each word in the list of words
for word in word_l:
# increment the word count for the positive label 1
x[0,1] += freqs.get((word, 1),0)
# increment the word count for the negative label 0
x[0,2] += freqs.get((word, 0),0)
### END CODE HERE ###
assert(x.shape == (1, 3))
return x
freqs
# +
# Check your function
# test 1
# test on training data
tmp1 = extract_features(train_x[0], freqs)
print(tmp1)
# -
# #### Expected output
# ```
# [[1.00e+00 3.02e+03 6.10e+01]]
# ```
# test 2:
# check for when the words are not in the freqs dictionary
tmp2 = extract_features('blorb bleeeeb bloooob', freqs)
print(tmp2)
# #### Expected output
# ```
# [[1. 0. 0.]]
# ```
# ## Part 3: Training Your Model
#
# To train the model:
# * Stack the features for all training examples into a matrix `X`.
# * Call `gradientDescent`, which you've implemented above.
#
# This section is given to you. Please read it for understanding and run the cell.
# +
# collect the features 'x' and stack them into a matrix 'X'
X = np.zeros((len(train_x), 3))
for i in range(len(train_x)):
X[i, :]= extract_features(train_x[i], freqs)
# training labels corresponding to X
Y = train_y
# Apply gradient descent
J, theta = gradientDescent(X, Y, np.zeros((3, 1)), 1e-9, 1500)
print(f"The cost after training is {J:.8f}.")
print(f"The resulting vector of weights is {[round(t, 8) for t in np.squeeze(theta)]}")
# -
# **Expected Output**:
#
# ```
# The cost after training is 0.24216529.
# The resulting vector of weights is [7e-08, 0.0005239, -0.00055517]
# ```
# # Part 4: Test your logistic regression
#
# It is time for you to test your logistic regression function on some new input that your model has not seen before.
#
# #### Instructions: Write `predict_tweet`
# Predict whether a tweet is positive or negative.
#
# * Given a tweet, process it, then extract the features.
# * Apply the model's learned weights on the features to get the logits.
# * Apply the sigmoid to the logits to get the prediction (a value between 0 and 1).
#
# $$y_{pred} = sigmoid(\mathbf{x} \cdot \theta)$$
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def predict_tweet(tweet, freqs, theta):
'''
Input:
tweet: a string
freqs: a dictionary corresponding to the frequencies of each tuple (word, label)
theta: (3,1) vector of weights
Output:
y_pred: the probability of a tweet being positive or negative
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# extract the features of the tweet and store it into x
x = extract_features(tweet, freqs)
# make the prediction using x and theta
y_pred = sigmoid(np.dot(x,theta))
### END CODE HERE ###
return y_pred
# Run this cell to test your function
for tweet in ['I am happy', 'I am bad', 'this movie should have been great.', 'great', 'great great', 'great great great', 'great great great great']:
print( '%s -> %f' % (tweet, predict_tweet(tweet, freqs, theta)))
# **Expected Output**:
# ```
# I am happy -> 0.518580
# I am bad -> 0.494339
# this movie should have been great. -> 0.515331
# great -> 0.515464
# great great -> 0.530898
# great great great -> 0.546273
# great great great great -> 0.561561
# ```
# Feel free to check the sentiment of your own tweet below
my_tweet = 'I am learning :)'
predict_tweet(my_tweet, freqs, theta)
# ## Check performance using the test set
# After training your model using the training set above, check how your model might perform on real, unseen data, by testing it against the test set.
#
# #### Instructions: Implement `test_logistic_regression`
# * Given the test data and the weights of your trained model, calculate the accuracy of your logistic regression model.
# * Use your `predict_tweet()` function to make predictions on each tweet in the test set.
# * If the prediction is > 0.5, set the model's classification `y_hat` to 1, otherwise set the model's classification `y_hat` to 0.
# * A prediction is accurate when `y_hat` equals `test_y`. Sum up all the instances when they are equal and divide by `m`.
#
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>Use np.asarray() to convert a list to a numpy array</li>
# <li>Use np.squeeze() to make an (m,1) dimensional array into an (m,) array </li>
# </ul>
# </p>
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def test_logistic_regression(test_x, test_y, freqs, theta):
"""
Input:
test_x: a list of tweets
test_y: (m, 1) vector with the corresponding labels for the list of tweets
freqs: a dictionary with the frequency of each pair (or tuple)
theta: weight vector of dimension (3, 1)
Output:
accuracy: (# of tweets classified correctly) / (total # of tweets)
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# the list for storing predictions
y_hat = []
for tweet in test_x:
# get the label prediction for the tweet
y_pred = predict_tweet(tweet, freqs, theta)
if y_pred > 0.5:
# append 1.0 to the list
y_hat.append(1)
else:
# append 0 to the list
y_hat.append(0)
# With the above implementation, y_hat is a list, but test_y is (m,1) array
# convert both to one-dimensional arrays in order to compare them using the '==' operator
accuracy = np.sum([y_hat==np.squeeze(test_y)])/len(y_hat)
### END CODE HERE ###
return accuracy
tmp_accuracy = test_logistic_regression(test_x, test_y, freqs, theta)
print(f"Logistic regression model's accuracy = {tmp_accuracy:.4f}")
# #### Expected Output:
# ```0.9950```
# Pretty good!
# # Part 5: Error Analysis
#
# In this part you will see some tweets that your model misclassified. Why do you think the misclassifications happened? Specifically what kind of tweets does your model misclassify?
# Some error analysis done for you
print('Label Predicted Tweet')
for x,y in zip(test_x,test_y):
y_hat = predict_tweet(x, freqs, theta)
if np.abs(y - (y_hat > 0.5)) > 0:
print('THE TWEET IS:', x)
print('THE PROCESSED TWEET IS:', process_tweet(x))
print('%d\t%0.8f\t%s' % (y, y_hat, ' '.join(process_tweet(x)).encode('ascii', 'ignore')))
# Later in this specialization, we will see how we can use deep learning to improve the prediction performance.
# # Part 6: Predict with your own tweet
# Feel free to change the tweet below
my_tweet = 'This is a ridiculously bright movie. The plot was terrible but I was happy until the ending!'
print(process_tweet(my_tweet))
y_hat = predict_tweet(my_tweet, freqs, theta)
print(y_hat)
if y_hat > 0.5:
print('Positive sentiment')
else:
print('Negative sentiment')
| Logistic Regression on Sentimental Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="tF5VLcjwc_ei"
# # Parte 1: carga de datos y preparación
# + [markdown] id="eU67QglHdDTK"
# ## Crear un archivo Python y agregar las dependencias necesarias
# + id="Tg9sYExC1_xz"
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# + id="_nLLs4MhfDqS"
def plot_confusion_matrix(test_y, y_pred, printMode=True):
cm = confusion_matrix(test_y, y_pred)
if printMode:
print(cm)
sn.heatmap(cm, annot=True)
# + id="YevMTNi2fgjo"
def load_data(path,printMode=True):
df = pd.read_csv(path, header=0)
if printMode:
print(df.values)
return df
# + [markdown] id="of5SR3KwdF3i"
# ## Leer el dataset desde el archivo CSV utilizando la librería Pandas. Y ver como esta compuesto.
# + colab={"base_uri": "https://localhost:8080/"} id="1cpbObG62E32" outputId="6fe36424-a0c2-41d7-c5d7-d070527f96d9"
df = load_data("./sample.csv")
# + [markdown] id="c6qYbMCBdH2v"
# ## Graficar los datos utilizando la librería.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="oMHaQznn2GsJ" outputId="39506ec0-5c9e-4569-ae5b-9c46e2a75318"
colors = ("orange", "blue")
plt.scatter(df['x'], df['y'], s=300, c=df['label'],
cmap=matplotlib.colors.ListedColormap(colors))
plt.show()
# + [markdown] id="rfAnefyqdJ3V"
# ## Obtener a partir del dataset los datos y las clases.
# + id="c-KHw4NDfsiz"
attributes = ['x', 'y']
labels = ['label']
# + id="a3ptWpkX2O_1"
X = df[attributes].values
y = df[labels].values
# + [markdown] id="Z7cuj-eHdL9D"
# # Parte 2: entrenamiento y testing
# + [markdown] id="SZhO1ekfdN4B"
# ## Dividir el conjunto de datos en 2, uno para entrenamiento y otro para prueba.
# + id="iECbdk5j2Q8X"
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.25,
random_state=0, shuffle=True)
# + [markdown] id="VlvEbto7dPza"
# ## Crear el un modelo de LDA y entrenarlo.
# + id="O5mvr0O02S8x" colab={"base_uri": "https://localhost:8080/"} outputId="bae78725-2b5f-47fa-ff05-1c9f2c8b0934"
lda = LinearDiscriminantAnalysis()
lda = lda.fit(train_X, train_y)
# + [markdown] id="NY2krvxmdRvS"
# # Parte 3: evaluación
# + [markdown] id="MptDI0DzdTj3"
# ## Predecir las clases para los datos del conjunto de prueba y ver los resultados.
# + colab={"base_uri": "https://localhost:8080/"} id="FftgcAZf2UKQ" outputId="81a69504-3bd7-4969-89a4-ece495d44be0"
y_pred = lda.predict(test_X)
print("Predicted vs Expected")
print(y_pred)
print(test_y)
# + [markdown] id="V_HYDKZAdWQe"
# ## Probar el modelo y ver el reporte. Observar las columnas y que significan.
# + colab={"base_uri": "https://localhost:8080/"} id="z_uvYlwt2Z-w" outputId="362025e4-231d-4d73-d702-411cb40ae167"
print(classification_report(test_y, y_pred, digits=3))
# + [markdown] id="pQPKplSFdYKb"
# ## Ver la matriz de confusión y analizar los resultados.
# + colab={"base_uri": "https://localhost:8080/", "height": 304} id="eHb7n3ug2bbj" outputId="b845e3eb-f7d0-48ce-abfc-3cdb592c0312"
plot_confusion_matrix(test_y, y_pred)
# + [markdown] id="nTeP3x-XdrDd"
# # Parte 4 (opcional): Regresión Logística
# + id="nDWU78eM2c04"
lr = LogisticRegression()
# + id="7grdGieu2gfD" colab={"base_uri": "https://localhost:8080/"} outputId="dd7c05af-ee14-498c-c4f2-61ba2a15977a"
lr = lr.fit(train_X, train_y)
# + colab={"base_uri": "https://localhost:8080/"} id="4ZmWn4uw2iJ8" outputId="ca25504c-7170-48e1-d7da-cc2faa1e9c79"
y_pred = lr.predict(test_X)
print("Predicted vs Expected")
print(y_pred)
print(test_y)
# + colab={"base_uri": "https://localhost:8080/"} id="0jYPPFX32m4e" outputId="86f86c01-f2b1-492e-dff3-206a6d4efe18"
print(classification_report(test_y, y_pred, digits=3))
# + colab={"base_uri": "https://localhost:8080/", "height": 304} id="ERfBzLDYdwho" outputId="20f40fa5-48da-4417-b377-733660fbcf6e"
plot_confusion_matrix(test_y, y_pred)
# + [markdown] id="3kbgyw4p2-vx"
# # Ejercicio 2 - Dataset sport-training.csv
#
# Realizar nuevamente el ejercicio del trabajo de aplicación 6 para la clasificación de deportistas utilizando scikit-learn. En cada paso comparar los resultados con los obtenidos utilizando RapidMiner.
# + colab={"base_uri": "https://localhost:8080/"} id="UgumQvst2qju" outputId="4aa3ce67-ddf5-41fc-d0a8-19ec91ff7ae9"
df = load_data("./sports_Training.csv")
# + [markdown] id="r4u-xLcyeSmQ"
# Eliminar filas cuyo valor para el atributo 'CapacidadDecision' estan fuera de los limites. Esto
# se puede hacer de la siguiente forma utilizando la libreria Pandas
# + id="YQDVZWqo3CfW"
df = df[(df['CapacidadDecision'] >= 3) &
(df['CapacidadDecision'] <= 100)]
# + [markdown] id="Iy-3Uue8eVeW"
# Transformar atributos en string a numeros
# + id="JEEYX45K3L_z" colab={"base_uri": "https://localhost:8080/"} outputId="e762cf16-d701-4d3c-fb8f-36972e74d9ca"
le = LabelEncoder()
y_encoded = le.fit_transform(y)
# + id="SNHcGUcUfVcy"
attributes = ['Edad', 'Fuerza', 'Velocidad', 'Lesiones', 'Vision', 'Resistencia',
'Agilidad', 'CapacidadDecision']
labels = ['DeportePrimario']
# + id="iWp0F0T_3jWh" colab={"base_uri": "https://localhost:8080/"} outputId="445385f8-6483-4d88-8aa7-80aa9fd3a125"
X = df[attributes].values
y = df[labels].values
print(df[labels].value_counts())
# + id="ljN1cFjt3ly_"
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.25,
random_state=0, shuffle=True)
# + id="HMWo4q693Sx5"
lr = LinearDiscriminantAnalysis()
# + id="-oMxbwt43iwy" colab={"base_uri": "https://localhost:8080/"} outputId="92086afc-094f-4c03-df18-de0850020891"
lr = lr.fit(train_X, train_y)
# + colab={"base_uri": "https://localhost:8080/"} id="y-1-kBx66NLF" outputId="b8cd7880-449b-403f-9d89-f6669d5247c3"
y_pred = lr.predict(test_X)
print("Predicted vs Expected")
print(y_pred)
print(test_y)
# + colab={"base_uri": "https://localhost:8080/"} id="hrQLCPDu6WmZ" outputId="f9b33642-3164-4cca-c7c1-18e3180590d0"
print(classification_report(test_y, y_pred, digits=3))
# + colab={"base_uri": "https://localhost:8080/", "height": 334} id="Jy2wgZH_e2GK" outputId="a5ef1d3d-a42e-42c4-8500-3774318acfeb"
plot_confusion_matrix(test_y, y_pred)
# + [markdown] id="FO36FeDrejtq"
# Usar los datos del archivo sports_Scoring.csv para clasificar los nuevos individuos uilizando el
# modelo entrenado anteriormente. Comparar los resultados con los obtenidos en el TA6.
# + colab={"base_uri": "https://localhost:8080/"} id="qHHv_vBi30Za" outputId="21a74014-9e30-4f93-9d54-83151c516bd5"
df_test = load_data("./sports_Scoring.csv")
# + id="0JeWjNBClsdF"
df_test = df_test[(df_test['CapacidadDecision'] >= 3) &
(df_test['CapacidadDecision'] <= 100)]
# + id="aDj8VXY9luoq"
x_test = df_test[attributes].values
# + colab={"base_uri": "https://localhost:8080/"} id="zTW7RYkr4MWH" outputId="827d7d22-07ec-4cfe-e780-869809eea31c"
y_pred = lr.predict(x_test)
print("Predicted vs Expected")
print(y_pred)
print(df_test)
# + id="kkG7hkStg_jl"
df_test["prediction(DeportePrimario)"] = y_pred
# + id="_XLiGDgahE49"
df_test.to_csv("output_python.csv")
# + colab={"base_uri": "https://localhost:8080/"} id="2GCxH9XwhI50" outputId="1fb9fa46-d761-4017-8701-1c293b397430"
output_rapidminer = load_data("./output_rapidminer.csv")
output_python = load_data("./output_python.csv")
# + colab={"base_uri": "https://localhost:8080/"} id="Y56g8u2ZiD_W" outputId="580f9f5a-8c86-4407-ceab-95986e24cf15"
print(output_python["prediction(DeportePrimario)"].head())
# + colab={"base_uri": "https://localhost:8080/"} id="mdaQD2T_iIx9" outputId="987fdd8c-b86c-49ca-a6d5-270c5e59964e"
print(output_rapidminer["prediction(DeportePrimario)"].head())
# + colab={"base_uri": "https://localhost:8080/"} id="NBoMzM9_hbRF" outputId="929df21e-a111-46f6-b15a-714b6645636e"
print(len(output_python["prediction(DeportePrimario)"]))
print(len(output_rapidminer["prediction(DeportePrimario)"]))
comparison = output_python["prediction(DeportePrimario)"] == output_rapidminer["prediction(DeportePrimario)"]
print(comparison)
# + colab={"base_uri": "https://localhost:8080/"} id="fa7P4nNvhj-X" outputId="0505ea65-dfbc-4125-bcd9-876f245794b8"
comparison.value_counts()
# + [markdown] id="otTpLPuIiWez"
# Dado que hay una diferencia en el tratamiento de datos los resultados no son identicos, de todas formas son bastante similares
| content/posts/ut/ut3/pd/pd5/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:badeda]
# language: python
# name: conda-env-badeda-py
# ---
# +
import sys
sys.path.insert(0, "timm-efficientdet-pytorch")
sys.path.insert(0, "omegaconf")
import torch
import os
from datetime import datetime
import time
import random
import cv2
import pandas as pd
import numpy as np
import albumentations as A
import matplotlib.pyplot as plt
from albumentations.pytorch.transforms import ToTensorV2
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from glob import glob
SEED = 42
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything(SEED)
# +
marking = pd.read_csv('/home/hy/dataset/gwd/train.csv')
bboxs = np.stack(marking['bbox'].apply(lambda x: np.fromstring(x[1:-1], sep=',')))
for i, column in enumerate(['x', 'y', 'w', 'h']):
marking[column] = bboxs[:,i]
marking.drop(columns=['bbox'], inplace=True)
# +
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
df_folds = marking[['image_id']].copy()
df_folds.loc[:, 'bbox_count'] = 1
df_folds = df_folds.groupby('image_id').count()
df_folds.loc[:, 'source'] = marking[['image_id', 'source']].groupby('image_id').min()['source']
df_folds.loc[:, 'stratify_group'] = np.char.add(
df_folds['source'].values.astype(str),
df_folds['bbox_count'].apply(lambda x: f'_{x // 15}').values.astype(str)
)
df_folds.loc[:, 'fold'] = 0
for fold_number, (train_index, val_index) in enumerate(skf.split(X=df_folds.index, y=df_folds['stratify_group'])):
df_folds.loc[df_folds.iloc[val_index].index, 'fold'] = fold_number
# -
# ## Albumentations
# +
def get_train_transforms():
return A.Compose(
[
A.RandomSizedCrop(min_max_height=(800, 800), height=1024, width=1024, p=0.5),
A.OneOf([
A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit= 0.2,
val_shift_limit=0.2, p=0.9),
A.RandomBrightnessContrast(brightness_limit=0.2,
contrast_limit=0.2, p=0.9),
A.RandomGamma(p=0.9),
],p=0.9),
A.ToGray(p=0.01),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.Resize(height=1024, width=1024, p=1),
A.Cutout(num_holes=8, max_h_size=64, max_w_size=64, fill_value=0, p=0.5),
ToTensorV2(p=1.0),
],
p=1.0,
bbox_params=A.BboxParams(
format='pascal_voc',
min_area=0,
min_visibility=0,
label_fields=['labels']
)
)
def get_valid_transforms():
return A.Compose(
[
A.Resize(height=1024, width=1024, p=1.0),
ToTensorV2(p=1.0),
],
p=1.0,
bbox_params=A.BboxParams(
format='pascal_voc',
min_area=0,
min_visibility=0,
label_fields=['labels']
)
)
# -
# ## Dataset
# +
TRAIN_ROOT_PATH = '/home/hy/dataset/gwd/train'
class DatasetRetriever(Dataset):
def __init__(self, marking, image_ids, transforms=None, test=False):
super().__init__()
self.image_ids = image_ids
self.marking = marking
self.transforms = transforms
self.test = test
def __getitem__(self, index: int):
image_id = self.image_ids[index]
if self.test or random.random() > 0.5:
image, boxes = self.load_image_and_boxes(index)
else:
image, boxes = self.load_cutmix_image_and_boxes(index)
# there is only one class
labels = torch.ones((boxes.shape[0],), dtype=torch.int64)
target = {}
target['boxes'] = boxes
target['labels'] = labels
target['image_id'] = torch.tensor([index])
if self.transforms:
for i in range(10):
sample = self.transforms(**{
'image': image,
'bboxes': target['boxes'],
'labels': labels
})
if len(sample['bboxes']) > 0:
image = sample['image']
target['boxes'] = torch.stack(tuple(map(torch.tensor, zip(*sample['bboxes'])))).permute(1, 0)
target['boxes'][:,[0,1,2,3]] = target['boxes'][:,[1,0,3,2]] #yxyx: be warning
break
return image, target, image_id
def __len__(self) -> int:
return self.image_ids.shape[0]
def load_image_and_boxes(self, index):
image_id = self.image_ids[index]
image = cv2.imread(f'{TRAIN_ROOT_PATH}/{image_id}.jpg', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
records = self.marking[self.marking['image_id'] == image_id]
boxes = records[['x', 'y', 'w', 'h']].values
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
return image, boxes
def load_cutmix_image_and_boxes(self, index, imsize=1024):
"""
This implementation of cutmix author: https://www.kaggle.com/nvnnghia
Refactoring and adaptation: https://www.kaggle.com/shonenkov
"""
w, h = imsize, imsize
s = imsize // 2
xc, yc = [int(random.uniform(imsize * 0.25, imsize * 0.75)) for _ in range(2)] # center x, y
indexes = [index] + [random.randint(0, self.image_ids.shape[0] - 1) for _ in range(3)]
result_image = np.full((imsize, imsize, 3), 1, dtype=np.float32)
result_boxes = []
for i, index in enumerate(indexes):
image, boxes = self.load_image_and_boxes(index)
if i == 0:
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
result_image[y1a:y2a, x1a:x2a] = image[y1b:y2b, x1b:x2b]
padw = x1a - x1b
padh = y1a - y1b
boxes[:, 0] += padw
boxes[:, 1] += padh
boxes[:, 2] += padw
boxes[:, 3] += padh
result_boxes.append(boxes)
result_boxes = np.concatenate(result_boxes, 0)
np.clip(result_boxes[:, 0:], 0, 2 * s, out=result_boxes[:, 0:])
result_boxes = result_boxes.astype(np.int32)
result_boxes = result_boxes[np.where((result_boxes[:,2]-result_boxes[:,0])*(result_boxes[:,3]-result_boxes[:,1]) > 0)]
return result_image, result_boxes
# +
fold_number = 0
print("Fold_number:", fold_number)
train_dataset = DatasetRetriever(
image_ids=df_folds[df_folds['fold'] != fold_number].index.values,
marking=marking,
transforms=get_train_transforms(),
test=False,
)
validation_dataset = DatasetRetriever(
image_ids=df_folds[df_folds['fold'] == fold_number].index.values,
marking=marking,
transforms=get_valid_transforms(),
test=True,
)
# -
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def collate_fn(batch):
return tuple(zip(*batch))
# ## TrainGlobalConfig
# +
class TrainGlobalConfig:
num_workers = 6
batch_size = 3
n_epochs = 80
lr = 0.0002*2
grad_accumulation_steps = 8
folder = '0716_effdet7-cutmix-augmix_1024_ddp_grad_'
# -------------------
verbose = True
verbose_step = 1
# -------------------
# --------------------
step_scheduler = False # do scheduler.step after optimizer.step
validation_scheduler = True # do scheduler.step after validation stage loss
# SchedulerClass = torch.optim.lr_scheduler.OneCycleLR
# scheduler_params = dict(
# max_lr=0.001,
# epochs=n_epochs,
# steps_per_epoch=int(len(train_dataset) / batch_size),
# pct_start=0.1,
# anneal_strategy='cos',
# final_div_factor=10**5
# )
SchedulerClass = torch.optim.lr_scheduler.ReduceLROnPlateau
scheduler_params = dict(
mode='min',
factor=0.5,
patience=1,
verbose=False,
threshold=0.0001,
threshold_mode='abs',
cooldown=0,
min_lr=1e-8,
eps=1e-08
)
# --------------------
# -
# ## Model
# +
import argparse
from apex.parallel import DistributedDataParallel
from apex.parallel import convert_syncbn_model
from apex import amp
parser = argparse.ArgumentParser()
# FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied
# automatically by torch.distributed.launch.
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
# FOR DISTRIBUTED: If we are running under torch.distributed.launch,
# the 'WORLD_SIZE' environment variable will also be set automatically.
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
# FOR DISTRIBUTED: Set the device according to local_rank.
torch.cuda.set_device(args.local_rank)
# FOR DISTRIBUTED: Initialize the backend. torch.distributed.launch will provide
# environment variables, and requires that you use init_method=`env://`.
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
torch.backends.cudnn.benchmark = True
# +
from effdet import get_efficientdet_config, EfficientDet, DetBenchTrain
from effdet.efficientdet import HeadNet
from torchtools.lr_scheduler import DelayerScheduler
from over9000 import RangerLars
config = get_efficientdet_config('tf_efficientdet_d7')
net = EfficientDet(config, pretrained_backbone=False)
checkpoint = torch.load('efficientdet_d7-f05bf714.pth')
net.load_state_dict(checkpoint)
config.num_classes = 1
config.image_size = 1024
net.class_net = HeadNet(config, num_outputs=config.num_classes, norm_kwargs=dict(eps=.001, momentum=.01))
model = DetBenchTrain(net, config)
#device = torch.device('cuda:0')
#model.to(device)
model.cuda()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.001},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
#optimizer = RangerLars(model.parameters(),lr=TrainGlobalConfig.lr)
optimizer = torch.optim.AdamW(model.parameters(), lr=TrainGlobalConfig.lr)
model = convert_syncbn_model(model)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
model = DistributedDataParallel(model, delay_allreduce=True)
scheduler = TrainGlobalConfig.SchedulerClass(optimizer, **TrainGlobalConfig.scheduler_params)
#delay_epochs = 15
#total_epochs = TrainGlobalConfig.n_epochs
#base_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, delay_epochs) # delay the scheduler for 15 steps
#scheduler = DelayerScheduler(optimizer, total_epochs - delay_epochs, base_scheduler)
# -
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(validation_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=TrainGlobalConfig.batch_size,
sampler=RandomSampler(train_sampler),
#sampler=train_sampler,
pin_memory=False,
drop_last=True,
num_workers=TrainGlobalConfig.num_workers,
collate_fn=collate_fn,
)
val_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=TrainGlobalConfig.batch_size,
num_workers=TrainGlobalConfig.num_workers,
shuffle=False,
sampler=SequentialSampler(val_sampler),
#sampler=val_sampler,
pin_memory=False,
collate_fn=collate_fn,
)
# ## logger
# +
base_dir = f'./{TrainGlobalConfig.folder}'
if not os.path.exists(base_dir):
os.makedirs(base_dir)
log_path = f'{base_dir}/log.txt'
best_summary_loss = 10**5
# +
def log(message):
if TrainGlobalConfig.verbose:
print(message)
with open(log_path, 'a+') as logger:
logger.write(f'{message}\n')
import warnings
warnings.filterwarnings("ignore")
# -
def save(path):
model.eval()
torch.save({
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'amp': self.amp.state_dict(),
'best_summary_loss': self.best_summary_loss,
'epoch': self.epoch,
}, path)
epoch = 0
import torch.nn as nn
# +
import torch.distributed as dist
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= 2
return rt
# -
for e in range(TrainGlobalConfig.n_epochs):
if TrainGlobalConfig.verbose:
lr = optimizer.param_groups[0]['lr']
timestamp = datetime.utcnow().isoformat()
log(f'\n{timestamp}\nLR: {lr}')
t = time.time()
model.train()
summary_loss = AverageMeter()
for step, (images, targets, image_ids) in enumerate(train_loader):
##train##
if TrainGlobalConfig.verbose:
if step % TrainGlobalConfig.verbose_step == 0:
print(
f'Train Step {step}/{len(train_loader)}, ' + \
f'summary_loss: {summary_loss.avg:.5f}, ' + \
f'time: {(time.time() - t):.5f}', end='\r'
)
images = torch.stack(images)
#images = images.to(device).float()
images = images.cuda().float()
batch_size = images.shape[0]
#boxes = [target['boxes'].to(device).float() for target in targets]
#labels = [target['labels'].to(device).float() for target in targets]
boxes = [target['boxes'].cuda().float() for target in targets]
labels = [target['labels'].cuda().float() for target in targets]
optimizer.zero_grad()
loss_ori, _, _ = model(images, boxes, labels)
loss = loss_ori / TrainGlobalConfig.grad_accumulation_steps
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
#loss.backward()
#nn.utils.clip_grad_value_(model.parameters(), clip_value=2.0)
#summary_loss.update(reduce_tensor(scaled_loss).detach().item(), batch_size)
summary_loss.update(reduce_tensor(loss_ori).item(), batch_size)
if (step + 1) % TrainGlobalConfig.grad_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
if TrainGlobalConfig.step_scheduler:
scheduler.step()
#optimizer.step()
log(f'[RESULT]: Train. Epoch: {epoch}, summary_loss: {summary_loss.avg:.5f}, time: {(time.time() - t):.5f}')
##train & save##
model.eval()
if args.local_rank == 0:
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict()
}, f'{base_dir}/last-checkpoint.bin')
##valid##
t = time.time()
model.eval()
summary_loss_valid = AverageMeter()
t = time.time()
for step, (images, targets, image_ids) in enumerate(val_loader):
if TrainGlobalConfig.verbose:
if step % TrainGlobalConfig.verbose_step == 0:
print(
f'Val Step {step}/{len(val_loader)}, ' + \
f'summary_loss: {summary_loss_valid.avg:.5f}, ' + \
f'time: {(time.time() - t):.5f}', end='\r'
)
with torch.no_grad():
images = torch.stack(images)
batch_size = images.shape[0]
images = images.cuda().float()
boxes = [target['boxes'].cuda().float() for target in targets]
labels = [target['labels'].cuda().float() for target in targets]
loss, _, _ = model(images, boxes, labels)
summary_loss_valid.update(reduce_tensor(loss).detach().item(), batch_size)
log(f'[RESULT]: Val. Epoch: {epoch}, summary_loss: {summary_loss_valid.avg:.5f}, time: {(time.time() - t):.5f}')
if summary_loss_valid.avg < best_summary_loss:
best_summary_loss = summary_loss_valid.avg
model.eval()
if args.local_rank == 0:
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict()
}, f'{base_dir}/best-checkpoint-{str(epoch).zfill(3)}epoch.bin')
for path in sorted(glob(f'{base_dir}/best-checkpoint-*epoch.bin'))[:-3]:
os.remove(path)
if TrainGlobalConfig.validation_scheduler:
print('summary_loss_valid.avg:',summary_loss_valid.avg)
scheduler.step(metrics=summary_loss_valid.avg)
epoch += 1
| training-efficientdet-forward-ddp-grad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Challenge 1
#
# In this challenge you will be working on **pokemons**. You will answer a series of questions in order to practice dataframe calculation, aggregation, and transformation.
#
# 
#
# Follow the instructions below and enter your code.
#
# ### Import all required libraries
# import libraries
# ### Import data set
#
# Import data set `Pokemon.csv` from the `your-code` directory of this lab. Read the data into a dataframe called `pokemon`.
#
# *Data set attributed to [<NAME>](https://www.kaggle.com/abcsds/pokemon/)*
# import data set
# ### Print first 10 rows of `pokemon`
# enter your code here
# When you look at a data set, you often wonder what each column means. Some open-source data sets provide descriptions of the data set. In many cases, data descriptions are extremely useful for data analysts to perform work efficiently and successfully.
#
# For the `Pokemon.csv` data set, fortunately, the owner provided descriptions which you can see [here](https://www.kaggle.com/abcsds/pokemon/home). For your convenience, we are including the descriptions below. Read the descriptions and understand what each column means. This knowledge is helpful in your work with the data.
#
# | Column | Description |
# | --- | --- |
# | # | ID for each pokemon |
# | Name | Name of each pokemon |
# | Type 1 | Each pokemon has a type, this determines weakness/resistance to attacks |
# | Type 2 | Some pokemon are dual type and have 2 |
# | Total | A general guide to how strong a pokemon is |
# | HP | Hit points, or health, defines how much damage a pokemon can withstand before fainting |
# | Attack | The base modifier for normal attacks (eg. Scratch, Punch) |
# | Defense | The base damage resistance against normal attacks |
# | SP Atk | Special attack, the base modifier for special attacks (e.g. fire blast, bubble beam) |
# | SP Def | The base damage resistance against special attacks |
# | Speed | Determines which pokemon attacks first each round |
# | Generation | Number of generation |
# | Legendary | True if Legendary Pokemon False if not |
# ### Obtain the distinct values across `Type 1` and `Type 2`
#
# Exctract all the values in `Type 1` and `Type 2`. Then create an array containing the distinct values across both fields.
# enter your code here
# ### Cleanup `Name` that contain "Mega"
#
# If you have checked out the pokemon names carefully enough, you should have found there are junk texts in the pokemon names which contain "Mega". We want to clean up the pokemon names. For instance, "VenusaurMega Venusaur" should be "Mega Venusaur", and "CharizardMega Charizard X" should be "Mega Charizard X".
# +
# enter your code here
# test transformed data
pokemon.head(10)
# -
# ### Create a new column called `A/D Ratio` whose value equals to `Attack` devided by `Defense`
#
# For instance, if a pokemon has the Attack score 49 and Defense score 49, the corresponding `A/D Ratio` is 49/49=1.
# enter your code here
# ### Identify the pokemon with the highest `A/D Ratio`
# enter your code here
# ### Identify the pokemon with the lowest A/D Ratio
# enter your code here
# ### Create a new column called `Combo Type` whose value combines `Type 1` and `Type 2`.
#
# Rules:
#
# * If both `Type 1` and `Type 2` have valid values, the `Combo Type` value should contain both values in the form of `<Type 1> <Type 2>`. For example, if `Type 1` value is `Grass` and `Type 2` value is `Poison`, `Combo Type` will be `Grass-Poison`.
#
# * If `Type 1` has valid value but `Type 2` is not, `Combo Type` will be the same as `Type 1`. For example, if `Type 1` is `Fire` whereas `Type 2` is `NaN`, `Combo Type` will be `Fire`.
# enter your code here
# ### Identify the pokemons whose `A/D Ratio` are among the top 5
# enter your code here
# ### For the 5 pokemons printed above, aggregate `Combo Type` and use a list to store the unique values.
#
# Your end product is a list containing the distinct `Combo Type` values of the 5 pokemons with the highest `A/D Ratio`.
# enter your code here
# ### For each of the `Combo Type` values obtained from the previous question, calculate the mean scores of all numeric fields across all pokemons.
#
# Your output should look like below:
#
# 
# enter your code here
| module-2/lab-df-calculation-and-transformation/your-code/challenge-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# these are common imports for every notebook
# pandas and numpy are for analysis
# matplotlib and seaborn are for visualization
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ## Initial Datasets
# read dataset
qb = pd.read_csv('data/qb_yearly.csv')
qb.dtypes
# we don't need a few of these columns
qb = qb.drop(['gs', 'pos', 'pass_cmp_perc'], axis=1)
# drop seasons with less than 100 pass attempts
# this should filter out non-QBs who threw some passes
# as well as very marginal players
qb = qb.loc[qb['pass_att'] >= 100, :]
# +
# rename some columns
renames = {
'source_player_name': 'player',
'source_player_id': 'player_id',
'pass_adj_yds_per_att': 'aya',
'pass_adj_net_yds_per_att': 'anya'
}
qb = qb.rename(columns=renames)
# convert columns to string
qb['player'] = qb['player'].astype('string')
qb['player_id'] = qb['player_id'].astype('string')
# -
# check missing values
qb.loc[qb.isna().any(axis=1), :]
# ## QB Metrics: Adjusted Net Yards Per Attempt
# anya identifies all-time greats like Manning, Brady, Rodgers
# also highlights massive seasons like Mahomes 2018, Ryan 2016, Foles 2013
qb.sort_values('anya', ascending=False).head(10)
# let's look at how anya is distributed
# we have 960 QB seasons
# 25th percentile is is 4.6, median is 5.5, 75th is 6.44
qb['anya'].describe()
# looks like anya is normally distributed
# skew and kurtosis near zero, histogram looks normal
from scipy.stats import skew, kurtosis
print(kurtosis(qb['anya']))
print(skew(qb['anya']))
qb['anya'].hist()
# ## Create Age Curves with "Delta Method"
# ### Unadjusted Delta Method
# delta method starts with calculating the change or delta in a metric
# from one year to the next
# here, we will start with adjusted net yards per attempt
# will be easier if we sort the data at the beginning
qb = qb.sort_values(['player_id', 'season_year'])
# create two new columns
# anya_lag shows the anya from the previous year
# anya_d shows the change in anya from the previous year
# a positive anya_d means improved, negative means regressed
qb['anya_lag'] = qb.groupby(['player_id'])['anya'].shift(1)
qb['anya_d'] = qb['anya'] - qb['anya_lag']
# the delta method doesn't allow for gaps in seasons
# so we also need to measure the change in season_year
qb['season_lag'] = qb.groupby(['player_id'])['season_year'].shift(1)
qb['season_d'] = qb['season_year'] - qb['season_lag']
# now we can filter out the na rows
# which are the first row of that player in the dataset
qb = qb.loc[~qb.isna().any(axis=1), :]
# we can also filter out rows where season_d > 1
# so we ensure consecutive seasons
qb = qb.loc[qb['season_d'] == 1, :]
# now we'll make a dataframe of age and anya_d
qb_age_curve = (
qb.groupby('age')['anya_d']
.agg(['count', 'mean'])
.reset_index()
)
qb_age_curve.plot(x='age', y='mean', kind='scatter')
# ### Weighted Delta Method
# as before, we will use adjusted net yards / attempt as the metric
# will be easier if we sort the data at the beginning
# that way we can visually see the lag
qb = qb.sort_values(['player_id', 'season_year'])
# create two new columns
# anya_lag shows the anya from the previous year
# anya_d shows the change in anya from the previous year
# a positive anya_d means improved, negative means regressed
qb['anya_lag'] = qb.groupby(['player_id'])['anya'].shift(1)
qb['anya_d'] = qb['anya'] - qb['anya_lag']
# the delta method doesn't allow for gaps in seasons
# so we also need to measure the change in season_year
qb['season_lag'] = qb.groupby(['player_id'])['season_year'].shift(1)
qb['season_d'] = qb['season_year'] - qb['season_lag']
# now we can filter out the na rows
# which are the first row of that player in the dataset
qb = qb.loc[~qb.isna().any(axis=1), :]
# we can also filter out rows where season_d > 1
# so we ensure consecutive seasons
qb = qb.loc[qb['season_d'] == 1, :]
qb_age_curve['anya_d_wm'] = (
qb
.groupby('age')
.apply(lambda df_: np.average(df_.anya_d, weights=df_.pass_att))
)
qb_age_curve
qb_age_curve.reset_index().plot(x='age', y='weighted_mean', kind='scatter')
# +
# polynomial fit
# -
poly_params = np.polyfit(qb_age_curve.index, qb_age_curve.anya_d_mean, 3)
poly_3 = np.poly1d(poly_params)
xpoly = np.linspace(x.min(), x.max(), 100)
ypoly = poly_3(xpoly)
plt.plot(x, y, 'o', xpoly, ypoly)
# ## Create Age Curves with Peak Method
# +
# idea here is to identify the player's peak year and then
# express every other season as a % of the player's peak
# so if Manning's best season was 10 aya
# a season with 9.2 aya would be 92 (we are using 1-100 scale)
# -
# as before, we will use adjusted net yards / attempt as the metric
# will be easier if we sort the data at the beginning
# that way we can visually check the calculations
qb = qb.sort_values(['player_id', 'season_year'])
# create two new columns
# peak shows the maximum anya for the player
# normally, groupby produces one row per group
# but we want the peak value for every row
# tranform produces series of the same length as the original series
# so if there are 5 Aikman rows, it sets the peak in all of those rows
display(qb.groupby(['player_id'])['anya'].max().head())
display(qb.groupby(['player_id'])['anya'].transform('max').head())
qb['peak'] = qb.groupby(['player_id'])['anya'].transform('max')
# anya_d shows the difference between peak and anya for this row
from math import floor
qb['anya_d'] = qb.apply(lambda df_: floor((df_.anya / df_.peak) * 100), axis=1)
# now we'll make a dataframe of age and anya_d
# we want to use the weighted average of anya_d
# meaning that a QB that throws 600 passes will contribute
# more to the average than one who throws 350 passes.
qb_age_curve = (
qb.query('(age > 21) & (age < 40)')
.groupby('age')
.agg({'anya_d': ['count', 'mean']})
)
qb_age_curve.columns = ['_'.join([el for el in c if el])
for c in qb_age_curve.columns.to_flat_index()]
poly_params = np.polyfit(qb_age_curve.index, qb_age_curve.anya_d_mean, 3)
poly_3 = np.poly1d(poly_params)
xpoly = np.linspace(x.min(), x.max(), 100)
ypoly = poly_3(xpoly)
fig, ax = plt.subplots(figsize=(9, 5))
plt.plot(x, y, 'o', xpoly, ypoly)
plt.xticks(range(21, 40))
# try the same plot with a spline
x = qb_age_curve.index
y = qb_age_curve['anya_d_mean']
spl = UnivariateSpline(x, y, s=25)
xx = np.linspace(x.min(), x.max(), 100)
plt.plot(x, y, 'bo', xx, spl(xx))
x = qb_age_curve.index
y = qb_age_curve['anya_d_mean']
spl = InterpolatedUnivariateSpline(x, y)
xx = np.linspace(x.min(), x.max(), 100)
plt.plot(x, y, 'bo', xx, spl(xx))
# weighted mean
qb_age_curve['anya_d_wm'] = (
qb
.groupby('age')
.apply(lambda df_: np.average(df_.anya_d, weights=df_.pass_att))
)
x = qb_age_curve.index
y = qb_age_curve.anya_d_wm
poly_params = np.polyfit(x, y, 3)
poly_3 = np.poly1d(poly_params)
xx = np.linspace(x.min(), x.max(), 100)
yy = poly_3(xx)
fig, ax = plt.subplots(figsize=(9, 5))
plt.plot(x, y, 'o', xx, yy)
plt.xticks(range(21, 40))
# try the same plot with a spline
x = qb_age_curve.index
y = qb_age_curve['anya_d_wm']
spl = UnivariateSpline(x, y, s=25)
xx = np.linspace(x.min(), x.max(), 100)
yy = spl(xx)
fig, ax = plt.subplots(figsize=(9, 5))
plt.plot(x, y, 'o', xx, yy)
plt.xticks(range(21, 40))
x = qb_age_curve.index
y = qb_age_curve['anya_d_wm']
spl = InterpolatedUnivariateSpline(x, y)
xx = np.linspace(x.min(), x.max(), 100)
yy = spl(xx)
fig, ax = plt.subplots(figsize=(9, 5))
plt.plot(x, y, 'o', xx, yy)
plt.xticks(range(21, 40))
# ## Helper Functions
# calculate fantasy points
def qb_points(row, add_bonus=False):
"""Calculates qb fantasy points from row in dataframe"""
# assume 4 points pass TD, 1 point per 25 yards
# NOTE: our dataset does not have fumbles
points = 0
points += row.pass_yds * .04
points += row.pass_td * 4
points -= row.pass_int
points += row.rush_yds * .10
points += row.rush_td * 6
if add_bonus and row.pass_yds >= 300:
points += 3
return points
# add fantasy points
def add_fantasy_points(df):
"""Adds fantasy points columns to dataframe"""
df['fpts'] = df.apply(qb_points, axis=1)
df['dkpts'] = df.apply(qb_points, args=(True,), axis=1)
return df
def yearly_stats(df):
statcols = ['pass_att', 'pass_cmp', 'pass_int', 'pass_td', 'pass_yds', 'rush_att',
'rush_td', 'rush_yds', 'air_yards', 'fpts', 'dkpts']
return df.groupby(['nflid', 'player', 'season_year'])[statcols].sum()
def age_as_of_game(df):
"""Player age as of game date"""
# calculate the age by subtracting birthdate from gamedate
# convert the timedelta to days, then divide by 365
return df.apply(lambda df_: (df_.game_date - df_.birthdate).days / 365, axis=1)
def age_as_of_season(df):
"""Player age as of season start (Sept 1)"""
# create index that is cross join of nflid and seasons
idx = pd.MultiIndex.from_product(
[df.nflid.unique(), df.season_year.unique()],
names = ["nflid", "season_year"]
)
df = pd.DataFrame(idx).reset_index().join(df, how='left', on='nflid')
return (
df
.assign(start_date=lambda df_: df_.season_year.apply(lambda x: datetime(x, 9, 1)))
.assign(age=lambda df_: df_.apply(lambda row: (row.start_date - row.birthdate).days / 365, axis=1))
.drop(['birthdate', 'start_date'], axis=1)
.set_index(['nflid', 'season_year'])
)
| Chapter 4 - Quarterback Performance-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py37
# language: python
# name: py37
# ---
# ### CIFAR Image Classification with 0-augmented Neural ODEs
# +
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.metrics.functional import accuracy
from utils import get_cifar_dloaders, CIFARLearner
from torchdyn.models import *; from torchdyn import *
# -
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
trainloader, testloader = get_cifar_dloaders(batch_size=64)
# #### Define the model
# +
func = nn.Sequential(nn.GroupNorm(42, 42),
nn.Conv2d(42, 42, 3, padding=1, bias=False),
nn.Softplus(),
nn.Conv2d(42, 42, 3, padding=1, bias=False),
nn.Softplus(),
nn.GroupNorm(42, 42),
nn.Conv2d(42, 42, 1)
).to(device)
nde = NeuralDE(func,
solver='dopri5',
sensitivity='adjoint',
atol=1e-4,
rtol=1e-4,
s_span=torch.linspace(0, 1, 2)).to(device)
# NOTE: the first noop `Augmenter` is used only to keep the `nde` at index `2`. Used to extract NFEs in CIFARLearner.
model = nn.Sequential(Augmenter(1, 0), # does nothing
Augmenter(1, 39),
nde,
nn.Conv2d(42, 6, 1),
nn.AdaptiveAvgPool2d(4),
nn.Flatten(),
nn.Linear(6*16, 10)).to(device)
# +
learn = CIFARLearner(model, trainloader, testloader)
trainer = pl.Trainer(max_epochs=20, gpus=1)
trainer.fit(learn)
| dissecting-neural-odes/image_classification/cifar_zero_aug.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="eMdSKtYSCyuK" colab_type="text"
# **20 Basic Numpy Exercises - From Array Creation to making an array Immutable**
# + id="FiVH4UMGCqbo" colab_type="code" colab={}
import numpy as np
# + [markdown] id="GY3gfKrDC71b" colab_type="text"
# **1. Check your version**
# + id="g2p4mkoyC6PL" colab_type="code" outputId="701aa4df-b7de-4c77-babe-8b316edd69af" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.__version__
# + [markdown] id="MDgUf_0jDDc9" colab_type="text"
# **2. Create a 3x3 Array filled with zeroes**
# + id="D4TFoOtHDBWm" colab_type="code" outputId="81115347-b945-4855-da6a-2d607c7b7b10" colab={"base_uri": "https://localhost:8080/", "height": 69}
arr_0 = np.zeros((3,3))
arr_0
# + [markdown] id="omy3QhstDdmB" colab_type="text"
# **3. Create a 3x3 Array filled with the number 7**
# + id="cbiiRotODZrQ" colab_type="code" outputId="04172a7b-f51a-48e4-fac3-5b329eb52084" colab={"base_uri": "https://localhost:8080/", "height": 69}
arr_7 = np.full((3,3),7)
arr_7
# + [markdown] id="tRj_0nqGDo54" colab_type="text"
# **4. Create a vector of at least size 5 filled with Random Values between -1 and 1**
# + id="rBNcPivfDlIB" colab_type="code" outputId="7bf80602-2f4c-4a78-f607-0c1505476b98" colab={"base_uri": "https://localhost:8080/", "height": 35}
v = np.random.uniform(-1,1,(1,5))
v
# + [markdown] id="oX8wMvKHEpj7" colab_type="text"
# **5. Multiply the two matricies (A and B) below. (Assign the answer to the variable ANS) - Run the cell below to check your answer**
# + id="zmDsMgngET3Z" colab_type="code" colab={}
A = np.array([[.15,.55,.7]])
B = np.array([[1,2,3],[4,5,6],[7,8,9]])
# + id="8FZ9VQoEFeT2" colab_type="code" colab={}
ans = np.dot(A,B)
# + id="kOj-s1Zh1jGQ" colab_type="code" outputId="6494849e-65a0-4388-d72c-55651b66bf92" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Checking the answer!
assert np.sum(ans) == 25.95,'Try again'
print('Good job!', ans)
# + [markdown] id="0YnJi3rJGrtI" colab_type="text"
# **6. Combine the [ans] array created in #5 with the following 2 arrays below.**
# + id="e2PYHY9B13l_" colab_type="code" colab={}
first_row = np.array([[0.1, 0.1, 9.9]])
last_row = np.array([[9.9, 9.9, 0.1]])
# + id="clumLdOIGBWF" colab_type="code" outputId="1589e4bc-b479-4eed-8632-9791d7d1c3fd" colab={"base_uri": "https://localhost:8080/", "height": 69}
new_ans = np.vstack((first_row, ans ,last_row))
new_ans
# + id="loiKI3GK3BeE" colab_type="code" outputId="1e742bcd-41c5-4227-bb46-11b52678134a" colab={"base_uri": "https://localhost:8080/", "height": 69}
new_ans = np.concatenate((first_row, ans ,last_row),axis=0)
new_ans
# + [markdown] id="EL27ehnpJeUH" colab_type="text"
# **7. Swap the first and last columns in the combined array.**
# + id="yv4zks8KJjQ2" colab_type="code" outputId="59f85011-612e-4402-a2df-771867a3b74f" colab={"base_uri": "https://localhost:8080/", "height": 69}
swapped_ans = np.concatenate(
(new_ans[:,2].reshape(3,1),new_ans[:,1].reshape(3,1),new_ans[:,0].reshape(3,1)),axis=1)
swapped_ans
# + [markdown] id="O7Sx84hyJj50" colab_type="text"
# **8. Swap the first and the middle row in the combined array**
# + id="1Vf7zinuJklS" colab_type="code" outputId="e7ae2766-3ca3-42d0-81e4-f95b34d6e24f" colab={"base_uri": "https://localhost:8080/", "height": 69}
row_swap = np.vstack((swapped_ans[1,:],swapped_ans[0,:],swapped_ans[2,:]))
row_swap
# + [markdown] id="kVsk92t7HENc" colab_type="text"
# **9. Save the combined array from #8 to file.**
# + id="-X4QNhXU7lRK" colab_type="code" colab={}
np.savetxt('my_txt_array.txt', row_swap, delimiter=' ')
# + id="IQRs0uhmHMtW" colab_type="code" colab={}
np.savetxt('my_csv_array.csv', row_swap, delimiter=',')
# + [markdown] id="mBw0SDE7HNTi" colab_type="text"
# **10. Load the saved array from file into a new array.**
# + id="Hl2mJaWIHUaU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="c71cbf06-e92f-400a-d4b9-3d9214017cb8"
load_txt = np.loadtxt('my_txt_array.txt')
load_txt
# + id="HubTnmE67760" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="ebf8071f-d9d6-4dc2-9bdd-ea97cfbecd97"
load_csv = np.genfromtxt('my_csv_array.csv', delimiter=',')
load_csv
# + [markdown] id="HiZmZHxvHUzF" colab_type="text"
# **11. Convert all array elements to float32**
# + id="AUPnyfl3Htsn" colab_type="code" colab={}
fp32_arr = row_swap.astype(np.float32)
# + [markdown] id="jeRWPICJHuCc" colab_type="text"
# **12. Find the mean of the middle row of the following array.**
# + id="y2sU7jBcVSee" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 138} outputId="b7f35130-0765-44f0-cdd5-8bce4c438b27"
# For Questions 12-19 use the following array
arr = np.random.randint(1,10,(7,7))
arr
# + id="GXC-rDnQH6A0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="85a60323-f523-4537-b149-22a402e19c48"
arr[len(arr)//2,:].mean()
# + [markdown] id="5EbuyanKH5Wp" colab_type="text"
# **13. Find the Variance of the array.**
# + id="BLL70Jz-H-GE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="377a9324-2935-4c88-b7d5-f568de91a650"
arr.var()
# + [markdown] id="AJNCtHopH_A0" colab_type="text"
# **14. Slice the array into a new array of just it's middle column.**
# + id="UUXFbAGXIGlt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="667435c3-46c7-4335-8f4f-332e366c0f12"
arr[:,len(arr[0])//2]
# If you need it to still be the same shape, you would want to reshape it with
# .reshape(len(arr[0]),1)
# + [markdown] id="LHarnAoYIaXz" colab_type="text"
# **15. Create a new Array of all the elements on the left to right, top to bottom, diagnal.**
# + id="8Qj9--CmUSF3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2b6c4932-0094-4eec-999f-ed1689f25aaf"
np.diag(arr)
# + [markdown] id="Hz3DyOHrInL_" colab_type="text"
# **16. Find the most frequently occuring value in the array.**
# + id="obOjRKimIm9x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e48dcb19-4e17-449c-b556-b3c093e617f8"
np.argmax(np.bincount(arr.flatten()))
# + [markdown] id="iEaiGazWIuho" colab_type="text"
# **17. Normalize the array**
# + id="MdlWxvFbI9s1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 259} outputId="014dc2d2-766b-41b4-c43b-fa3cc3e8ea6a"
norm_arr = arr / np.max(arr)
norm_arr
# + [markdown] id="IKYO0V5-JKbV" colab_type="text"
# **18. Round the normalized array down to the nearest int.**
# + id="JZH3ehC4JZ5a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 138} outputId="8861b56e-dbe3-4090-8b68-3a351e4e1c9c"
np.floor(norm_arr)
# + [markdown] id="wtHZA8xlI-Fj" colab_type="text"
# **19. Make the array Unmutable**
# + id="XNOyy_jAJJrA" colab_type="code" colab={}
norm_arr.flags.writeable = False
# + [markdown] id="EWXWqrsvIH0h" colab_type="text"
# **20. What do you get when you multiply the following arrays `[True, True, False] * [False, False, True]`?**
# + id="pPtmWCHiJBIk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="43a51c8a-3caf-4731-bba0-b4d03d3d3935"
arr = np.array([1,1,0])
f_arr = arr == 1
s_arr = arr < 1
print(f_arr, s_arr)
print()
print(f_arr * s_arr)
| numpy/Numpy_Exercises - Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
def somme_carrees(*nombres):
somme = 0
for nombre in nombres:
somme += nombre * nombre
return somme
somme_carrees(0,1,2,3,4,5,6,7,8,9)
# +
liste = []
for indice in range(len(liste)):
elementEnCours = liste[indice]
for element in liste:
elementEnCours = element
pass
# -
# <strong>Résolution des équations du second dégré.</strong>
# +
import math
def resoudre(**equation):
a = equation['a']
b = equation['b']
c = equation['c']
#calcul du discriminant
delta =( b*b) - (4*a*c)
if delta > 0 :
x1 = (-b + math.sqrt(delta))/(2*a)
x2 = (-b - math.sqrt(delta))/(2*a)
return (x1, x2)
elif delta == 0:
x12 = -b/(2*a)
return (x12,)
else :
print(f'Solution impossible dans R')
# -
resoudre(c=1,b=2,a=1)
| Jour 4/resolution exercices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Preprocessing
#
# >1. filter out non-US countries
# >2. fill 'NaN' Province/State values with Country/Region values
# >3. (optionally) apply log transformation to target values
# +
# load and clean data
import pandas as pd
train_all = pd.read_csv("/root/data/train.csv")
def preprocess(
frame: pd.DataFrame,
log_transform: bool = False
):
# set index
new_frame = frame.set_index('Date')
# filter countries
new_frame = new_frame[new_frame['Country/Region'] == 'US']
# convert target values to log scale
if log_transform:
new_frame[['ConfirmedCases', 'Fatalities']] = np.log1p(
new_frame[['ConfirmedCases', 'Fatalities']].values
)
return new_frame
def split(
df: pd.DataFrame,
date: str = '2020-03-20'
):
train = df.loc[df.index < date]
test = df.loc[df.index >= date]
return train, test
train_all = preprocess(train_all)
_, train_all = split(train_all, date = '2020-03-01')
train, test = split(train_all)
# -
# # Data Exploration
# +
# plot confirmed cases and fatalities in train
import matplotlib.pyplot as plt
from gluonts.dataset.util import to_pandas
from gluonts.dataset.common import ListDataset
cum_train = train.groupby('Date').sum()
cum_test = test.groupby('Date').sum()
def plot_observations(
target: str = 'ConfirmedCases'
):
fig = plt.figure(figsize=(15, 6.1), facecolor="white", edgecolor='k')
train_ds = ListDataset(
[{"start": cum_train.index[0], "target": cum_train[target].values}],
freq = "D",
)
test_ds = ListDataset(
[{"start": cum_test.index[0], "target": cum_test[target].values}],
freq = "D",
)
for tr, te in zip(train_ds, test_ds):
tr = to_pandas(tr)
te = to_pandas(te)
tr.plot(linewidth=2, label = f'train {target}')
tr[-1:].append(te).plot(linewidth=2, label = f'test {target}')
plt.axvline(cum_train.index[-1], color='purple') # end of train dataset
plt.title(f'Cumulative number of {target} in the US', fontsize=16)
plt.legend(fontsize=16)
plt.grid(which="both")
plt.show()
plot_observations('ConfirmedCases')
plot_observations('Fatalities')
# -
# # Data Augmentation
#
# >1. Categorical feature for 'Province/State'
# 2. Categorical feature for 'Country/Region'
#
# +
from sklearn.preprocessing import OrdinalEncoder
expend = pd.read_csv(
'/root/data/state_level_data/healthcare_expenditures_2014.csv',
header=2,
skipfooter=16,
engine='python'
)
expend['Total Health Spending'] = expend['Total Health Spending'].str.lstrip('$').astype(int)
expend_per_c = pd.read_csv(
'/root/data/state_level_data/healthcare_expenditures_per_capita_2014.csv',
header=2,
skipfooter=34,
engine='python'
)
expend_per_c = expend_per_c.applymap(lambda x: x.lstrip('$'))
expend_per_c.loc[:, expend_per_c.columns != 'Location'] = expend_per_c.loc[:, expend_per_c.columns != 'Location'].astype(int)
private = pd.read_csv(
'/root/data/state_level_data/private_health_spending_2014.csv',
header=2,
skipfooter=13,
engine='python'
)
private['Total Private Health Insurance Spending'] = private['Total Private Health Insurance Spending'].str.lstrip('$').astype(int)
private_per_c = pd.read_csv(
'/root/data/state_level_data/private_spending_per_capita_2014.csv',
header=2,
skipfooter=16,
engine='python'
)
private_per_c['Per Capita Private Health Insurance Spending'] = private_per_c['Per Capita Private Health Insurance Spending'].str.lstrip('$').astype(int)
hospital = pd.read_csv(
'/root/data/state_level_data/hospital_expenses_2018.csv',
header = 2,
skipfooter=14,
engine='python'
)
hospital.loc[hospital['State/Local Government Hospitals'].isna(), 'State/Local Government Hospitals'] = \
hospital.loc[~hospital['State/Local Government Hospitals'].isna(), 'State/Local Government Hospitals'].mode()[0]
hospital.loc[hospital['For-Profit Hospitals'].isna(), 'For-Profit Hospitals'] = \
hospital.loc[~hospital['For-Profit Hospitals'].isna(), 'For-Profit Hospitals'].mode()[0]
hospital = hospital.applymap(lambda x: x.lstrip('$'))
hospital.loc[:, hospital.columns != 'Location'] = hospital.loc[:, hospital.columns != 'Location'].astype(int)
def join(
df: pd.DataFrame,
health_df: pd.DataFrame
):
# join, delete merge columns
new_df = df.reset_index().merge(
health_df,
left_on = 'Province/State',
right_on = 'Location',
how = 'inner'
).set_index('Date')
new_df = new_df.drop(columns=['Location'])
assert not new_df.isnull().sum().any()
return new_df
def encode(
df: pd.DataFrame
):
""" encode 'Province/State' and 'Country/Region' categorical variables as numerical ordinals"""
enc = OrdinalEncoder()
df['Province/State'] = enc.fit_transform(df['Province/State'].values.reshape(-1,1))
return df, enc
join_df = join(train_all, expend)
join_df = join(join_df, expend_per_c)
join_df = join(join_df, private)
join_df = join(join_df, private_per_c)
join_df = join(join_df, hospital)
all_df, enc = encode(join_df)
train_df, test_df = split(all_df)
_, val_df = split(all_df, date = '2020-03-15')
# +
from gluonts.dataset.common import ListDataset
from gluonts.dataset.field_names import FieldName
import typing
not_real_cols = ['Id', 'Province/State', 'Country/Region', 'Lat', 'Long', 'ConfirmedCases', 'Fatalities']
REAL_VARS = [col for col in all_df.columns if col not in not_real_cols]
def build_dataset(
frame: pd.DataFrame,
target: str = 'Fatalities',
cat_vars: typing.List[str] = ['Province/State'],
real_vars: typing.List[str] = REAL_VARS
):
return ListDataset(
[
{
FieldName.START: df.index[0],
FieldName.TARGET: df[target].values,
FieldName.FEAT_STATIC_CAT: df[cat_vars].values[0],
FieldName.FEAT_STATIC_REAL: df[real_vars].values[0]
}
for g, df in frame.groupby(by=['Province/State'])
],
freq = "D",
)
training_data_fatalities = build_dataset(train_df)
training_data_cases = build_dataset(train_df, target = 'ConfirmedCases')
training_data_fatalities_all = build_dataset(all_df)
training_data_cases_all = build_dataset(all_df, target = 'ConfirmedCases')
val_data_fatalities = build_dataset(val_df)
val_data_cases = build_dataset(val_df, target = 'ConfirmedCases')
# -
# # Fit DeepAR Model Estimates
#
# The DeepAR model was proposed by <NAME>, <NAME>, and <NAME> in "DeepAR: Probabilistic Forecasting with Autoregressive Recurrent Networks" (https://arxiv.org/abs/1704.04110). The approach trains an autoregressive RNN to produces time-variant parameters of a specified distribution on a large collection of related time series. The learned distribution can then be used to produce probabilistic forecasts. Here we use the authors' *GluonTS* implementation (https://gluon-ts.mxnet.io/index.html).
#
# We believe the probabilistic nature of the DeepAR forecasts is a feature that differentiates our approach from others we have seen so far. Specifically, the ability to provide both confidence intervals and point estimates allows one to better understand the range of possible trajectories, from the worst-case scenario, to the best-case scenario, to the expected scenario.
#
# *TODO:*
# > 1. Experiment with adding learned Box Cox transformation before learning Negative Binomial paramaters?
# +
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.distribution import NegativeBinomialOutput
import mxnet as mx
import numpy as np
# set random seeds for reproducibility
mx.random.seed(42)
np.random.seed(42)
def fit(
training_data: ListDataset,
validation_data: ListDataset = None,
use_real_vars: bool = True,
pred_length: int = 5,
epochs: int = 20,
weight_decay: float = 5e-5
):
estimator = DeepAREstimator(
freq="D",
prediction_length=pred_length,
use_feat_static_real = use_real_vars,
use_feat_static_cat = True,
cardinality = [train['Province/State'].nunique()],
distr_output=NegativeBinomialOutput(),
trainer=Trainer(
epochs=epochs,
learning_rate=0.001,
batch_size=64,
weight_decay=weight_decay
),
)
_, trained_net, predictor = estimator.train_model(
training_data = training_data,
validation_data = validation_data
)
return predictor, trained_net
#predictor_fatalities, net = fit(training_data_fatalities, val_data_fatalities, use_real_vars = False)
#predictor_fatalities_aug, aug_net = fit(training_data_fatalities, val_data_fatalities)
#predictor_cases = fit(training_data_cases, val_data_cases, epochs=20)
predictor_fatalities_all = fit(training_data_fatalities_all, pred_length=7, use_real_vars = False)
#predictor_cases_all = fit(training_data_cases_all, pred_length=7)
# -
# # Plot predictions from fit model parameters
# +
from gluonts.dataset.util import to_pandas
import matplotlib.pyplot as plt
from typing import List
def plot_forecast(
predictor,
location: str = 'New York',
target: str = 'Fatalities',
cat_vars: str = 'Province/State',
real_vars: typing.List[str] = REAL_VARS,
log_preds: bool = False,
fontsize: int = 16
):
fig = plt.figure(figsize=(15, 6.1), facecolor="white", edgecolor='k')
# plot train observations, true observations from public test set, and forecasts
location_tr = enc.transform(np.array(location).reshape(1,-1))[0]
tr_df = train_df[train_df['Province/State'].values == location_tr]
train_obs = ListDataset(
[{
FieldName.START: tr_df.index[0],
FieldName.TARGET: tr_df[target].values,
FieldName.FEAT_STATIC_CAT: tr_df[cat_vars].values[0].reshape(1,),
FieldName.FEAT_STATIC_REAL: tr_df[real_vars].values[0]
}],
freq = "D",
)
te_df = test_df[test_df['Province/State'].values == location_tr]
test_gt = ListDataset(
[{"start": te_df.index[0], "target": te_df[target].values}],
freq = "D",
)
for train_series, gt, forecast in zip(train_obs, test_gt, predictor.predict(train_obs)):
train_series = to_pandas(train_series)
#gt = to_pandas(gt)
if log_preds:
train_series = np.expm1(train_series)
gt = np.expm1(gt)
forecast.samples = np.expm1(forecast.samples)
train_series.plot(linewidth=2, label = 'train series')
#gt.plot(linewidth=2, label = 'test ground truth')
forecast.plot(color='g', prediction_intervals=[50.0, 90.0])
plt.title(f'Cumulative number of forecasted {target} in {location}', fontsize=fontsize)
plt.legend(fontsize = fontsize)
plt.grid(which='both')
plt.show()
plot_forecast(predictor_fatalities_all[0], 'New York')
plot_forecast(predictor_fatalities_all[0], 'California')
plot_forecast(predictor_fatalities_all[0], 'Washington')
plot_forecast(predictor_fatalities_all[0], 'Texas')
plot_forecast(predictor_fatalities_all[0], 'Louisiana')
# -
# # Calculate metrics on public test
# +
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
import json
all_data = build_dataset(all_df)
# evaluate fatalities predictor
forecast_iterable, ts_iterable = make_evaluation_predictions(
dataset=all_data,
predictor=predictor_fatalities,
num_samples=100
)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(ts_iterable, forecast_iterable, num_series=len(all_data))
print('Unaugmented Model Fatalities Predictor Metrics: ')
print(json.dumps(agg_metrics, indent=4))
# evaluate confirmed cases predictor
forecast_iterable, ts_iterable = make_evaluation_predictions(
dataset=all_data,
predictor=predictor_fatalities_aug,
num_samples=100
)
agg_metrics, item_metrics = evaluator(ts_iterable, forecast_iterable, num_series=len(all_data))
print('Augmented Model Fatalities Predictor Metrics: ')
print(json.dumps(agg_metrics, indent=4))
# -
# # Interrogate importance of covariates
#
# +
from sklearn import manifold
from matplotlib.ticker import NullFormatter
import typing
# get list of countries with most fatalities
def visualize_embedding(
trained_net,
states: typing.List[str] = None
):
# visualize 2-D projection of learned State embedding space with TSNE
embedding = trained_net.collect_params()[f'{trained_net.name}_featureembedder0_cat_0_embedding_weight'].data()
proj = manifold.TSNE(init='pca', random_state = 0).fit_transform(embedding.asnumpy())
# plot
fig = plt.figure(figsize=(15, 6.1), facecolor="white", edgecolor='k')
ax = plt.gca()
if states is None:
states = enc.categories_[0]
for state in states:
idx = np.where(enc.categories_[0] == state)[0][0]
plt.scatter(
proj[idx, 0],
proj[idx, 1],
cmap=plt.cm.Spectral,
)
ax.annotate(
state,
(proj[idx, 0], proj[idx, 1]),
fontsize=16,
)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.title(f"TSNE Visualization of Learned Embedding Space for States", fontsize=16)
plt.show()
visualize_embedding(net, states = ['California', 'New York', 'Washington', 'Louisiana', 'Texas', 'New Jersey'])
# -
| notebooks/COVID-US-Forecasting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Tensorflow 101
import numpy as np
import tensorflow as tf
# Taken from <NAME>'s Coursera Deep Learning series
#
# https://www.coursera.org/learn/deep-neural-network/lecture/zcZlH/tensorflow
# Implement forward path
# TensorFlow figures out backprop
w = tf.Variable(0, dtype=tf.float32)
cost = tf.add(tf.add(w ** 2, tf.multiply(-10.0, w)), 25)
learning_rate = 0.01
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
print(session.run(w))
session.run(train)
print(session.run(w))
for _ in range(1000):
session.run(train)
print(session.run(w))
# An alternative way of identifying the `cost` is to use overloaded operations
# This is equivalent to
# tf.add(tf.add(w ** 2, tf.multiply(-10.0, w)), 25)
cost = w ** 2 - 10 * w + 25
# ### Training data (with input)
# +
coefficients = np.array([[1.], [-10.], [25.]])
x = tf.placeholder(tf.float32, [3, 1])
cost = x[0][0] * w ** 2 + x[1][0] * w + x[2][0]
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
for _ in range(1000):
session.run(train, feed_dict={x: coefficients})
print(session.run(w))
# -
with tf.Session() as session:
session.run(init)
for _ in range(1000):
session.run(train, feed_dict={x: coefficients})
print(session.run(w))
| deep-learning/Tensorflow-101.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
#
# # 文本表示模型:TF-IDF
#
# <br>
#
# > ## 分类模型:DNN
# </center>
# <img src='../static/img/logo.jpg' align='right' style="width:260px;height:60px;display:block"/>
# 
#
#
# tf-idf(英语:term frequency–inverse document frequency)是一种用于信息检索与文本挖掘的常用加权技术。tf-idf是一种统计方法,用以评估一字词对于一个文件集或一个语料库中的其中一份文件的重要程度。字词的重要性随着它在文件中出现的次数成正比增加,但同时会随着它在语料库中出现的频率成反比下降。tf-idf加权的各种形式常被搜索引擎应用,作为文件与用户查询之间相关程度的度量或评级。除了tf-idf以外,互联网上的搜索引擎还会使用基于链接分析的评级方法,以确定文件在搜索结果中出现的顺序。
# ### TF-IDF算法步骤:
#
# 第一步,计算词频:
#
# 
#
#
# 考虑到文章有长短之分,为了便于不同文章的比较,进行"词频"标准化。
#
# 
#
#
# 第二步,计算逆文档频率:
#
# 这时,需要一个语料库(corpus),用来模拟语言的使用环境。
#
# 
#
#
# 如果一个词越常见,那么分母就越大,逆文档频率就越小越接近0。分母之所以要加1,是为了避免分母为0(即所有文档都不包含该词)。log表示对得到的值取对数。
#
# 第三步,计算TF-IDF:
#
# 
#
#
# 可以看到,TF-IDF与一个词在文档中的出现次数成正比,与该词在整个语言中的出现次数成反比。所以,自动提取关键词的算法就很清楚了,就是计算出文档的每个词的TF-IDF值,然后按降序排列,取排在最前面的几个词。
#1.导入包和模块
from sklearn.feature_extraction.text import TfidfVectorizer
text=["The quick brown fox jumped over the lazy dag.",
"The dog."
"The fox"
]
#2.创建变换函数
vectorizer=TfidfVectorizer()
# 3.词条化以及创建词汇表
vectorizer.fit(text)
#4.特征以及每个特征(词)的idf
print("特征:",vectorizer.vocabulary_)
print("特征的IDF:",vectorizer.idf_)
# 5.编码文档
vector=vectorizer.transform([text[0]])
X=vectorizer.fit_transform(text)
print('TF-IDF矩阵:',X.toarray())
#6.总结编码文档
print('shape:',vector.shape)
print(vector.toarray())
| Day10-NLP/TF-IDF_DNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_amazonei_mxnet_p36
# language: python
# name: conda_amazonei_mxnet_p36
# ---
# ### Get the Personalize boto3 Client
# +
import boto3
import json
import numpy as np
import pandas as pd
import time
personalize = boto3.client('personalize')
personalize_runtime = boto3.client('personalize-runtime')
# -
# ### Specify a Bucket and Data Output Location
bucket = "personalizedemozixiqi" # name of S3 bucket
filename = "tripadvisor_data/tripadvisor.csv" # replace with a name that you want to save the dataset under
boto3.Session().resource('s3').Bucket(bucket).Object(filename).upload_file(filename)
# ### Create Schema
# +
schema = {
"type": "record",
"name": "Interactions",
"namespace": "com.amazonaws.personalize.schema",
"fields": [
{
"name": "USER_ID",
"type": "string"
},
{
"name": "ITEM_ID",
"type": "string"
},
{
"name": "OVERALL_RATING",
"type": "int"
},
{
"name": "RATING1",
"type": "int"
},
{
"name": "RATING2",
"type": "int"
},
{
"name": "RATING3",
"type": "int"
},
{
"name": "RATING4",
"type": "int"
},
{
"name": "TIMESTAMP",
"type": "long"
},
],
"version": "1.0"
}
create_schema_response = personalize.create_schema(
name = "TRIPADVISOR",
schema = json.dumps(schema)
)
schema_arn = create_schema_response['schemaArn']
print(json.dumps(create_schema_response, indent=2))
# -
# ### Create and Wait for Dataset Group
# #### Create Dataset Group
# +
create_dataset_group_response = personalize.create_dataset_group(
name = "tripadvisor-dataset-group"
)
dataset_group_arn = create_dataset_group_response['datasetGroupArn']
print(json.dumps(create_dataset_group_response, indent=2))
# -
# #### Wait for Dataset Group to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_group_response = personalize.describe_dataset_group(
datasetGroupArn = dataset_group_arn
)
status = describe_dataset_group_response["datasetGroup"]["status"]
print("DatasetGroup: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Create Dataset
# +
dataset_type = "INTERACTIONS"
create_dataset_response = personalize.create_dataset(
name = "tripadvisor-dataset",
datasetType = dataset_type,
datasetGroupArn = dataset_group_arn,
schemaArn = schema_arn
)
dataset_arn = create_dataset_response['datasetArn']
print(json.dumps(create_dataset_response, indent=2))
# -
# ### Prepare, Create, and Wait for Dataset Import Job
# #### Attach Policy to S3 Bucket
# +
s3 = boto3.client("s3")
policy = {
"Version": "2012-10-17",
"Id": "PersonalizeS3BucketAccessPolicy",
"Statement": [
{
"Sid": "PersonalizeS3BucketAccessPolicy",
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": [
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::{}".format(bucket),
"arn:aws:s3:::{}/*".format(bucket)
]
}
]
}
s3.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
# -
# #### Create Personalize Role
# +
iam = boto3.client("iam")
role_name = "PersonalizeRoleTripAdvisor"
assume_role_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
create_role_response = iam.create_role(
RoleName = role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)
)
# AmazonPersonalizeFullAccess provides access to any S3 bucket with a name that includes "personalize" or "Personalize"
# if you would like to use a bucket with a different name, please consider creating and attaching a new policy
# that provides read access to your bucket or attaching the AmazonS3ReadOnlyAccess policy to the role
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonPersonalizeFullAccess"
iam.attach_role_policy(
RoleName = role_name,
PolicyArn = policy_arn
)
time.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate
role_arn = create_role_response["Role"]["Arn"]
print(role_arn)
# -
# #### Create Dataset Import Job
# +
create_dataset_import_job_response = personalize.create_dataset_import_job(
jobName = "tripadvisor-dataset-import-job",
datasetArn = dataset_arn,
dataSource = {
"dataLocation": "s3://{}/{}".format(bucket, filename)
},
roleArn = role_arn
)
dataset_import_job_arn = create_dataset_import_job_response['datasetImportJobArn']
print(json.dumps(create_dataset_import_job_response, indent=2))
# -
# #### Wait for Dataset Import Job to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_import_job_response = personalize.describe_dataset_import_job(
datasetImportJobArn = dataset_import_job_arn
)
status = describe_dataset_import_job_response["datasetImportJob"]['status']
print("DatasetImportJob: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Select Recipe
list_recipes_response = personalize.list_recipes()
recipe_arn = "arn:aws:personalize:::recipe/aws-popularity-count" # aws-hrnn selected for demo purposes
list_recipes_response
# ### Create and Wait for Solution
# #### Create Solution
# +
create_solution_response = personalize.create_solution(
name = "tripadvisor-solution",
datasetGroupArn = dataset_group_arn,
recipeArn = recipe_arn
)
solution_arn = create_solution_response['solutionArn']
print(json.dumps(create_solution_response, indent=2))
# -
# #### Create Solution Version
# +
create_solution_version_response = personalize.create_solution_version(
solutionArn = solution_arn
)
solution_version_arn = create_solution_version_response['solutionVersionArn']
print(json.dumps(create_solution_version_response, indent=2))
# -
# #### Wait for Solution Version to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_solution_version_response = personalize.describe_solution_version(
solutionVersionArn = solution_version_arn
)
status = describe_solution_version_response["solutionVersion"]["status"]
print("SolutionVersion: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# #### Get Metrics of Solution
# +
get_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = solution_version_arn
)
print(json.dumps(get_solution_metrics_response, indent=2))
# -
# ### Create and Wait for Campaign
# #### Create Campaign
# +
create_campaign_response = personalize.create_campaign(
name = "DEMO-campaign",
solutionVersionArn = solution_version_arn,
minProvisionedTPS = 1
)
campaign_arn = create_campaign_response['campaignArn']
print(json.dumps(create_campaign_response, indent=2))
# -
# #### Wait for Campaign to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_campaign_response = personalize.describe_campaign(
campaignArn = campaign_arn
)
status = describe_campaign_response["campaign"]["status"]
print("Campaign: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Get Recommendations
# #### Generate Recommendation Based On User Interaction Event
# +
user_id = 1234
item_id = 5678
get_recommendations_response = personalize_runtime.get_recommendations(
campaignArn = "arn:aws:personalize:us-east-1:990615222287:campaign/DEMO-campaign",
userId = str(user_id),
itemId = str(item_id),
numResults = 5
)
item_list = get_recommendations_response['itemList']
print("Recommendations: {}".format(json.dumps(item_list, indent=2)))
# -
# ### Solution Creation With AutoML
# +
create_solution_response = personalize.create_solution(
name = "tripadvisor-solution-automl",
datasetGroupArn = dataset_group_arn,
performAutoML = True,
)
automl_solution_arn = create_solution_response['solutionArn']
print(json.dumps(create_solution_response, indent=2))
# +
create_solution_version_response = personalize.create_solution_version(
solutionArn = automl_solution_arn
)
automl_solution_version_arn = create_solution_version_response['solutionVersionArn']
print(json.dumps(create_solution_version_response, indent=2))
# -
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_solution_version_response = personalize.describe_solution_version(
solutionVersionArn = automl_solution_version_arn
)
status = describe_solution_version_response["solutionVersion"]["status"]
print("SolutionVersion: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# +
get_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = automl_solution_version_arn
)
print(json.dumps(get_solution_metrics_response, indent=2))
# -
personalize.describe_solution(solutionArn="arn:aws:personalize:us-east-1:990615222287:solution/tripadvisor-solution-automl")
# ### Create Campaign
# +
create_campaign_response = personalize.create_campaign(
name = "automl-campaign-1",
solutionVersionArn = "arn:aws:personalize:us-east-1:990615222287:solution/tripadvisor-solution-automl/34e23e30",
minProvisionedTPS = 1
)
campaign_arn = create_campaign_response['campaignArn']
print(json.dumps(create_campaign_response, indent=2))
# -
# ### Wait For Campaign To Have Active Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_campaign_response = personalize.describe_campaign(
campaignArn = campaign_arn
)
status = describe_campaign_response["campaign"]["status"]
print("Campaign: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Personalized Ranking
# +
create_solution_response = personalize.create_solution(
name = "tripadvisor-solution-personalized-ranking-1",
datasetGroupArn = "arn:aws:personalize:us-east-1:990615222287:dataset-group/tripadvisor-dataset-group",
recipeArn = 'arn:aws:personalize:::recipe/aws-personalized-ranking',
)
pr_solution_arn = create_solution_response['solutionArn']
print(json.dumps(create_solution_response, indent=2))
# +
create_solution_version_response = personalize.create_solution_version(
solutionArn = pr_solution_arn
)
solution_version_arn = create_solution_version_response['solutionVersionArn']
print(json.dumps(create_solution_version_response, indent=2))
# -
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_solution_version_response = personalize.describe_solution_version(
solutionVersionArn = "arn:aws:personalize:us-east-1:990615222287:solution/tripadvisor-solution-personalized-ranking-1/03fe3c55"
)
status = describe_solution_version_response["solutionVersion"]["status"]
print("SolutionVersion: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# +
create_campaign_response = personalize.create_campaign(
name = "personalized-ranking-campaign-1",
solutionVersionArn = "arn:aws:personalize:us-east-1:990615222287:solution/tripadvisor-solution-personalized-ranking-1/03fe3c55",
minProvisionedTPS = 1
)
campaign_arn = create_campaign_response['campaignArn']
print(json.dumps(create_campaign_response, indent=2))
# -
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_campaign_response = personalize.describe_campaign(
campaignArn = "arn:aws:personalize:us-east-1:990615222287:campaign/personalized-ranking-campaign-1"
)
status = describe_campaign_response["campaign"]["status"]
print("Campaign: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# +
user_id = 1234
candidate_items = [1468,1235,3654,7852,8354,7949,5352,9673,5763,5767]
personalized_ranking_response = personalize_runtime.get_personalized_ranking(
campaignArn = "arn:aws:personalize:us-east-1:990615222287:campaign/personalized-ranking-campaign-1",
userId = str(user_id),
inputList = [str(item_id) for item_id in candidate_items],
)
for item in personalized_ranking_response['personalizedRanking']:
print (item['itemId'])
# -
| single_aspect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
np.set_printoptions(precision=3)
# This notebook shows some basic linear algebra operations in python. We will use the linear algebra package in numpy. Detailed documentation can be found in this [link](https://numpy.org/doc/stable/reference/routines.linalg.html).
# # Vector Operation
# ## Dot product
# real vectors
a = [1, 2, 3]
b = [4, 5, 6]
print(np.dot(a, b)) # a dot b = 1*4 + 2*5 + 3*6
# complex vectors
a = [1 + 1j*2, 3 + 1j*0.5]
b = [2, 4]
print(np.dot(a, b))
print(np.dot(a, np.conj(a))) # conjugate np.conj(a)
# # Matrix
# ## Multiplication
A = np.array([[1, 3],
[2, 1]])
B = np.array([[4, 1],
[2, 2]])
# AB != BA
print("AB = \n", np.matmul(A, B))
print("BA = \n", np.matmul(B, A))
A = np.array([[1, 3, 1],
[2, 1, 2]])
B = np.array([[4, 1],
[2, 2],
[7, 3]])
print("A: ", A.shape, ", B: ", B.shape)
# AB != BA
print("AB = \n", np.matmul(A, B))
print("BA = \n", np.matmul(B, A))
# ## Determinant
A = np.array([[1, 2],
[3, 4]])
print("det(A) = {0:.2f}".format(np.linalg.det(A)))
A = np.array([[1, 2, 1],
[2, 1, 2],
[1, 3, 5]])
print("det(A) = {0:.2f}".format(np.linalg.det(A)))
A = np.random.random([5, 5]) # A random 5x5 matrix
print("A = \n", A)
print("det(A) = {0:.4e}".format(np.linalg.det(A)))
# ## Inverse
A = np.array([[1, 2],
[3, 4]])
Ainv = np.linalg.inv(A)
print("A = \n", A)
print("A^{-1} = \n", Ainv)
print("AA^{-1} = \n", np.matmul(A, Ainv))
A = np.random.random([4, 4]) # A random 4x4 matrix
Ainv = np.linalg.inv(A)
print("A = \n", A)
print("A^{-1} = \n", Ainv)
print("AA^{-1} = \n", np.matmul(A, Ainv))
# ## Eigenvalue and Eigenvector
A = np.array([[1, -1],
[1, 1]])
w, v = np.linalg.eig(A)
print("A = \n", A)
for i in range(len(w)):
print("eigenvalue {0:d}: {1:.3f}, eigenvector: {2}".format(i, w[i], v[:, i]))
A = np.random.random([4, 4]) # A random 4x4 matrix
w, v = np.linalg.eig(A)
print("A = \n", A)
for i in range(len(w)):
print("eigenvalue {0:d}: {1:.3f}, eigenvector: {2}".format(i, w[i], v[:, i]))
# ### Degeneracy
# The eigenvectors for the degenerate eigenvalue are orthogonal to each other
A = np.array([[1, 0, 1],
[0, 2, 0],
[1, 0, 1]])
w, v = np.linalg.eig(A)
print("A = \n", A)
for i in range(len(w)):
print("eigenvalue {0:d}: {1:.3f}, eigenvector: {2}".format(i, w[i], v[:, i]))
# Matrix A is a defective
# The returned two eigen vectors are linear dependent of each other
A = np.array([[1, 1],
[0, 1]])
w, v = np.linalg.eig(A)
print("A = \n", A)
for i in range(len(w)):
print("eigenvalue {0:d}: {1:.3f}, eigenvector: {2}".format(i, w[i], v[:, i]))
| demos/LinearAlgebra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Out-of-core classification of text documents
#
#
# This is an example showing how scikit-learn can be used for classification
# using an out-of-core approach: learning from data that doesn't fit into main
# memory. We make use of an online classifier, i.e., one that supports the
# partial_fit method, that will be fed with batches of examples. To guarantee
# that the features space remains the same over time we leverage a
# HashingVectorizer that will project each example into the same feature space.
# This is especially useful in the case of text classification where new
# features (words) may appear in each batch.
#
# +
# Authors: <NAME> <<EMAIL>>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from html.parser import HTMLParser
from urllib.request import urlretrieve
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
# -
# Reuters Dataset related routines
# --------------------------------
#
# The dataset used in this example is Reuters-21578 as provided by the UCI ML
# repository. It will be automatically downloaded and uncompressed on first
# run.
#
#
# +
class ReutersParser(HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
sys.stdout.write(
'\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb))
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
sys.stdout.write('\r')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
# -
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
#
#
# +
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
alternate_sign=False)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(max_iter=5),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [('{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
# -
# Plot results
# ------------
#
# The plot represents the learning curve of the classifier: the evolution
# of classification accuracy over the course of the mini-batches. Accuracy is
# measured on the first 1000 samples, held out as a validation set.
#
# To limit the memory consumption, we queue examples up to a fixed amount
# before feeding them to the learner.
#
#
# +
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = [stats['total_fit_time']
for cls_name, stats in sorted(cls_stats.items())]
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
plt.setp(plt.xticks()[1], rotation=30)
autolabel(rectangles)
plt.tight_layout()
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.tight_layout()
plt.show()
| sklearn/sklearn learning/demonstration/auto_examples_jupyter/applications/plot_out_of_core_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Vector Data I/O in Python
#
# Reading data into Python is usually the first step of an analysis workflow. There are various different GIS data formats available such as [Shapefile](https://en.wikipedia.org/wiki/Shapefile), [GeoJSON](https://en.wikipedia.org/wiki/GeoJSON), [KML](https://en.wikipedia.org/wiki/Keyhole_Markup_Language), and [GPKG](https://en.wikipedia.org/wiki/GeoPackage). [Geopandas](http://geopandas.org/io.html) is capable of reading data from all of these formats (plus many more).
#
# This tutorial will show some typical examples how to read (and write) data from different sources. The main point in this section is to demonstrate the basic syntax for reading and writing data using short code snippets. You can find the example data sets in the data-folder. However, most of the example databases do not exists, but you can use and modify the example syntax according to your own setup.
# ## File formats
#
# In geopandas, we use a generic function [from_file()](http://geopandas.org/reference.html#geopandas.GeoDataFrame.to_file) for reading in different data formats. In the bacground, Geopandas uses [fiona.open()](https://fiona.readthedocs.io/en/latest/fiona.html#fiona.open) when reading in data. Esri Shapefile is the default file format. For other file formats we need to specify which driver to use for reading in the data.
#
# You can check supported through geopandas, or directly from fiona:
# +
import geopandas as gpd
# Check supported format drivers
gpd.io.file.fiona.drvsupport.supported_drivers
# Same as:
#import fiona
#fiona.supported_drivers
# -
# ### Read / write Shapefile
# +
import geopandas as gpd
# Read file from Shapefile
fp = "data/finland_municipalities.shp"
data = gpd.read_file(fp)
# Write to Shapefile (just make a copy)
outfp = "temp/finland_municipalities.shp"
data.to_file(outfp)
# -
# ### Read / write GeoJSON
# +
# Read file from GeoJSON
fp = "data/finland_municipalities.gjson"
data = gpd.read_file(fp, driver="GeoJSON")
# Write to GeoJSON (just make a copy)
outfp = "temp/finland_municipalities.gjson"
data.to_file(outfp, driver="GeoJSON")
# -
# ### Read / write KML
# +
# Enable KML driver
gpd.io.file.fiona.drvsupport.supported_drivers['LIBKML'] = 'rw'
# Read file from KML
fp = "data/finland_municipalities.kml"
data = gpd.read_file(fp)
# Write to KML (just make a copy)
outfp = "temp/finland_municipalities.kml"
data.to_file(outfp, driver="KML")
# -
# ### Read / write Geopackage
# +
# Read file from Geopackage
fp = "data/finland_municipalities.gpkg"
data = gpd.read_file(fp)
# Write to Geopackage (just make a copy)
outfp = "temp/finland_municipalities.gpkg"
data.to_file(outfp, driver="GPKG")
# -
# ### Read / write GeoDatabase
# +
# Read file from File Geodatabase
fp = "data/finland.gdb"
data = gpd.read_file(fp, driver="OpenFileGDB", layer='municipalities')
# Write to same FileGDB (just add a new layer) - requires additional package installations(?)
outfp = "data/finland.gdb"
data.to_file(outfp, driver="FileGDB", layer="municipalities_copy")
# -
# ### Read / write MapInfo Tab
# +
# Read file from MapInfo Tab
fp = "data/finland_municipalities.tab"
data = gpd.read_file(fp, driver="MapInfo File")
# Write to same FileGDB (just add a new layer)
outfp = "temp/finland_municipalities.tab"
data.to_file(outfp, driver="MapInfo File")
# -
# ## Databases
#
# Example syntax for reading and writing data from/to databases.
# ### Read PostGIS database using psycopg2
# +
import geopandas as gpd
import psycopg2
# Create connection to database with psycopg2 module (update params according your db)
conn, cursor = psycopg2.connect(dbname='my_postgis_database',
user='my_usrname',
password='<PASSWORD>',
host='123.22.432.16', port=5432)
# Specify sql query
sql = "SELECT * FROM MY_TABLE;"
# Read data from PostGIS
data = gpd.read_postgis(sql=sql, con=conn)
# -
# ### Read / write PostGIS database using SqlAlchemy + GeoAlchemy
# +
from sqlalchemy.engine.url import URL
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy.orm import sessionmaker
from geoalchemy2 import WKTElement, Geometry
# Update with your db parameters
HOST = '123.234.345.16'
DB = 'my_database'
USER = 'my_user'
PORT = 5432
PWD = '<PASSWORD>'
# Database info
db_url = URL(drivername='postgresql+psycopg2', host=HOST, database=DB,
username=USER, port=PORT, password=<PASSWORD>)
# Create engine
engine = create_engine(db_url)
# Init Metadata
meta = MetaData()
# Load table definitions from db
meta.reflect(engine)
# Create session
Session = sessionmaker(bind=engine)
session = Session()
# ========================
# Read data from PostGIS
# ========================
# Specify sql query
sql = "SELECT * FROM finland;"
# Pull the data
data = gpd.read_postgis(sql=sql, con=engine)
# Close session
session.close()
# =========================================
# Write data to PostGIS (make a copy table)
# =========================================
# Coordinate Reference System (srid)
crs = 4326
# Target table
target_table = 'finland_copy'
# Convert Shapely geometries to WKTElements into column 'geom' (default in PostGIS)
data['geom'] = data['geometry'].apply(lambda row: WKTElement(row.wkt, srid=crs))
# Drop Shapely geometries
data = data.drop('geometry', axis=1)
# Write to PostGIS (overwrite if table exists, be careful with this! )
# Possible behavior: 'replace', 'append', 'fail'
data.to_sql(target_table, engine, if_exists='replace', index=False)
# -
# ### Read / write Spatialite database
# +
import geopandas as gpd
import sqlite3
import shapely.wkb as swkb
from sqlalchemy import create_engine, event
# DB path
dbfp = 'L2_data/Finland.sqlite'
# Name for the table
tbl_name = 'finland'
# SRID (crs of your data)
srid = 4326
# Parse Geometry type of the input Data
gtype = data.geom_type.unique()
assert len(gtype) == 1, "Mixed Geometries! Cannot insert into SQLite table."
geom_type = gtype[0].upper()
# Initialize database engine
engine = create_engine('sqlite:///{db}'.format(db=dbfp), module=sqlite)
# Initialize table without geometries
geo = data.drop(['geometry'], axis=1)
with sqlite3.connect(dbfp) as conn:
geo.to_sql(tbl_name, conn, if_exists='replace', index=False)
# Enable spatialite extension
with sqlite3.connect(dbfp) as conn:
conn.enable_load_extension(True)
conn.load_extension("mod_spatialite")
conn.execute("SELECT InitSpatialMetaData(1);")
# Add geometry column with specified CRS with defined geometry typehaving two dimensions
conn.execute(
"SELECT AddGeometryColumn({table}, 'wkb_geometry',\
{srid}, {geom_type}, 2);".format(table=tbl_name, srid=srid, geom_type=geom_type)
)
# Convert Shapely geometries into well-known-binary format
data['geometry'] = data['geometry'].apply(lambda geom: swkb.dumps(geom))
# Push to database (overwrite if table exists)
data.to_sql(tbl_name, engine, if_exists='replace', index=False)
# -
# ## Read Web Feature Service (WFS)
#
# This script was used to generate input data for this tutorial (FileGDB and tab were created separately). Source: Statistics finland WFS.
# +
import geopandas as gpd
import requests
import geojson
from pyproj import CRS
# Specify the url for the backend.
#Here we are using data from Statistics Finland: https://www.stat.fi/org/avoindata/paikkatietoaineistot_en.html. (CC BY 4.0)
url = 'http://geo.stat.fi/geoserver/tilastointialueet/wfs'
# Specify parameters (read data in json format).
params = dict(service='WFS', version='2.0.0', request='GetFeature',
typeName='tilastointialueet:kunta4500k', outputFormat='json')
# Fetch data from WFS using requests
r = requests.get(url, params=params)
# Create GeoDataFrame from geojson and set coordinate reference system
data = gpd.GeoDataFrame.from_features(geojson.loads(r.content), crs="EPSG:3067")
# -
data.head()
# Prepare data for writing to various file formats
data = data.drop(columns=["bbox"])
# Check crs
data.crs
# +
# filename
layer_name = "finland_municipalities"
# enable writing kml
gpd.io.file.fiona.drvsupport.supported_drivers['LIBKML'] = 'rw'
# drivers and extensions for different file formats
drivers = {'ESRI Shapefile': 'shp',
'GeoJSON': 'gjson',
'LIBKML': 'libkml',
'GPKG': 'gpkg',
}
# Write layer to different file formats
for driver, extension in drivers.items():
# Create file path and file name
file_name = "data/{0}.{1}".format(layer_name, extension)
# Write data using correct dricer
data.to_file(file_name, driver=driver)
print("Created file", file_name)
# -
| source/notebooks/L2/00-data-io.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import logging
import emcee
import h5py
import numpy as np
import matplotlib.pyplot as plt
from os import path
import sys
sys.path.insert(0, path.abspath('../'))
from src import workdir, parse_model_parameter_file
from src.emulator import Emulator
from src.mcmc import Chain
# -
validationIdx = 19
exp_path = "../validation_data/IPGlasmaDiffraction_{:02d}/Bayesian_output.txt".format(validationIdx)
model_par = "../model_parameter_dict_examples/IPGlasmaDiffraction.txt"
training_set = "../training_data"
mymcmc = Chain(expdata_path=exp_path, model_parafile=model_par,
training_data_path=training_set, npc=7)
paramFile = "../validation_data/IPGlasmaDiffraction_{0:02d}/parameter_{0:02d}".format(validationIdx)
mymcmc.set_closure_test_truth(paramFile)
nsteps = 5000
nwalkers = 100
nburnsteps = 5000
status = None
sampler = mymcmc.run_mcmc(nsteps=nsteps, nburnsteps=nburnsteps,
nwalkers=nwalkers, status=status)
mymcmc.make_plots(sampler.chain)
| jupyter_notebook/ClosureTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib.widgets import Button
from matplotlib.widgets import TextBox
from scipy.fft import fft, fftfreq
# +
steps=int(10000) #Number of steps
last_t=5 #Time duration of the signal
t=np.linspace(0,last_t,num=last_t*steps,endpoint=True)
# -
#Devide by section
def div_grid():
ax[0].axvline((0*np.pi/3)/2/np.pi,alpha=1,color='k')
ax[0].axvline((1*np.pi/3)/2/np.pi,alpha=0.2,color='k')
ax[0].axvline((2*np.pi/3)/2/np.pi,alpha=0.2,color='k')
ax[0].axvline((3*np.pi/3)/2/np.pi,alpha=0.5,color='k')
ax[0].axvline((4*np.pi/3)/2/np.pi,alpha=0.2,color='k')
ax[0].axvline((5*np.pi/3)/2/np.pi,alpha=0.2,color='k')
ax[0].axvline((6*np.pi/3)/2/np.pi,alpha=1,color='k')
#pulse genrator
def pulse(pwave,angl_start,pw,last_t,steps,reverse=True):
p_start=int(steps/np.pi/2*angl_start)
p_end=int(p_start+pw)
pwave[p_start:p_end]=1
if reverse:
p_end2=int(steps)-int(steps/np.pi/2*angl_start)
p_start2=int(p_end2-pw)
else:
p_start2=int(steps/np.pi/2*angl_start)+int(steps/2)
p_end2=int(p_start2+pw)
pwave[p_start2:p_end2]=1
#Repeat waveform for the rest of the signal
stamp=pwave[:steps-1]
for i in range(last_t-1):
pwave[(i+1)*steps:((i+2))*steps-1]=stamp
return pwave
#Button Control Class
class Blistner(object):
# Swith the signal to Reverse Sequence
def reverse(self, event):
global reverse_
global angl_start
global color_
color_='b'
ax[0].cla()
ax[1].cla()
reverse_=True
pwave= np.zeros_like(t)
pwave1=pulse(pwave,angl_start,pw,last_t,steps,reverse=reverse_)
yf = fft(pwave1)
Fourier_plt=ax[1].plot(xf, 2.0/steps * np.abs(yf[:steps//2]),color=color_)
ax[1].set_xlim([0, 25])
ax[1].set_xticks(np.arange(1, 25, step=1))
ax[0].grid()
ax[1].grid()
div_grid()
#ax[1].draw()
txt_=ax[0].text(-0.4,-1.8,"Reverse Sequence",color='orange')
pulse_plot=ax[0].plot(t,pwave1)
# Swith the signal to Normal Sequence
def normal_(self, event):
global reverse_
global angl_start
global color_
color_='r'
ax[0].cla()
ax[1].cla()
reverse_=False
pwave= np.zeros_like(t)
pwave2=pulse(pwave,angl_start,pw,last_t,steps,reverse=reverse_)
yf = fft(pwave2)
Fourier_plt=ax[1].plot(xf, 2.0/steps * np.abs(yf[:steps//2]),color=color_)
ax[1].set_xlim([0, 25])
ax[1].set_xticks(np.arange(1, 25, step=1))
ax[0].grid()
ax[1].grid()
div_grid()
#ax.draw()
txt_=ax[0].text(-0.4,-1.8,"Normal Sequence",color='orange')
pulse_plot=ax[0].plot(t,pwave2)
#Slider Control Class
class Slistner(object):
def p_location(self, val):
global reverse_
global angl_start
ax[0].cla()
pwave= np.zeros_like(t)
angl_start = t_slider.val
pwave0=pulse(pwave,angl_start,pw,last_t,steps,reverse=reverse_)
pulse_plot=ax[0].plot(t,pwave0)
if reverse_:
txt_=ax[0].text(-0.4,-1.8,"Reverse Sequence",color='orange')
else:
txt_=ax[0].text(-0.4,-1.8,"Normal Sequence",color='orange')
ax[0].grid()
#ax[0].draw()
div_grid()
def F_location(self, val):
global reverse_
global angl_start
ax[1].cla()
pwave= np.zeros_like(t)
angl_start = t_slider.val
pwave1=pulse(pwave,angl_start,pw,last_t,steps,reverse=reverse_)
yf = fft(pwave1)
Fourier_plt=ax[1].plot(xf, 2.0/steps * np.abs(yf[:steps//2]),color=color_)
ax[1].set_xlim([0, 25])
ax[1].set_xticks(np.arange(1, 25, step=1))
ax[1].grid()
div_grid()
#ax.draw()
# +
#Text Box Control Class
class Tlistner(object):
def T_PW(self, expression):
ax[0].cla()
ax[1].cla()
global pw
pw = float(eval(expression))*steps/2/np.pi
pwave= np.zeros_like(t)
pwave0=pulse(pwave,angl_start,pw,last_t,steps,reverse=reverse_)
yf = fft(pwave0)
pulse_plot=ax[0].plot(t,pwave0)
Fourier_plt=ax[1].plot(xf, 2.0/steps * np.abs(yf[:steps//2]),color=color_)
ax[1].set_xlim([0, 25])
ax[1].set_xticks(np.arange(1, 25, step=1))
ax[1].grid()
ax[0].grid()
div_grid()
# +
# %matplotlib widget
# Create subplot
fig = plt.figure()
ax = fig.subplots(2)
plt.subplots_adjust(bottom=0.35)
ax[0].grid()
ax[1].grid()
############### Wave Plot ###############
pwave= np.zeros_like(t)
global pw
pw= steps/8 #Pulse Width
global angl_start
angl_start=np.pi/6
global reverse_
reverse_=True
pwave0=pulse(pwave,angl_start,pw,last_t,steps,reverse=reverse_)
pulse_plot=ax[0].plot(t,pwave)
div_grid()
############### Frequency Plot ##############
global color_
color_='b'
T=last_t/steps
yf = fft(pwave0)
xf = fftfreq(last_t, steps)[:last_t//2]
xf = np.linspace(0, 1.0/(2.0*T), int(steps/2))
Fourier_plt =ax[1].plot(xf, 2.0/steps * np.abs(yf[:steps//2]),color=color_)
ax[1].set_xlim([0, 25])
ax[1].set_xticks(np.arange(1, 25, step=1))
txt_=ax[0].text(-0.4,-1.8,"Reverse Sequence",color='orange')
############### Interactive Tools ##############
# button
# xposition, yposition, width and height
axcut1 = plt.axes([0.8, 0.2, 0.1, 0.075])
axcut2 = plt.axes([0.68, 0.2, 0.1, 0.075])
bcut1 = Button(axcut1, 'Reverse', color='red', hovercolor='green')
bcut2 = Button(axcut2, 'Normal', color='red', hovercolor='green')
# Slider
# xposition, yposition, width and height
ax_slide = plt.axes([0.15, 0.1, 0.65, 0.03])
last_slider=np.pi-pw*2*np.pi/steps
t_slider = Slider(ax_slide, 'alpha1',0, last_slider, angl_start, valstep=0.03)
# Textbox
axbox = plt.axes((0.15, 0.15, 0.08, 0.05))
text_box = TextBox(axbox, 'Pulse Width', initial='np.pi/4')
callback = Blistner()
bcut1.on_clicked(callback.reverse)
bcut2.on_clicked(callback.normal_)
callback2 = Slistner()
t_slider.on_changed(callback2.F_location)
t_slider.on_changed(callback2.p_location)
callback3 = Tlistner()
text_box.on_submit(callback3.T_PW)
plt.show()
# -
| ReveringSequenceHarmonics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Toolbox Demo
#
# This notebook will cover the basic usage of the toolbox to numerically simulate target (nucleus) and incident (proton) objects in the color glass condensate effective field theory. There is an explicit (deprecated) implementation of the method for SU(2) -- since the exponential of a 2x2 matrix has a closed form -- as well as a generic implementation for SU(n) -- which approxmiates this exponential.
#
# The notebook below uses only the generic version; for the explicit SU(2) one, see `TwoColors.py`.
# +
# Import our functions
import cgc
# Import numpy and matplotlib for plotting/other utility
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
# Make our plots nice and large
plt.rcParams["figure.dpi"] = 120
plt.rcParams["font.size"] = 12
# -
# ## Two Color Demo
# This example explores the simplified SU(2) case.
# +
# Constants that define our lattice and color charge distribution
# See documentation for more information
N = 128
delta = .025
mu = 2
radius = 1
# Create a nucleus and proton object
nucleus = cgc.Nucleus(2, N, delta, mu)
proton = cgc.Proton(2, N, delta, mu, radius)
# Create a collision using these two objects
# The order doesn't matter here, it will determine which is which
# based on which implements wilsonLine()
#col = cgc.Collision(proton, nucleus)
col = cgc.Collision(nucleus, proton)
# Plot the number of particles produced vs. their momenta
# Note that we can calculate the intermediary quantities (eg. the Wilson Line)
# but we don't have to; the number of calculations done will be
# the same either way
plt.plot(col.momentaBins(), col.particlesProduced())
plt.yscale('log')
plt.ylabel('Particles Produced')
plt.xlabel(r'$|\vec k|$')
#plt.savefig('test_su2.png')
plt.show()
# -
# ## Three Color Demo
#
# Note that the code is almost exactly the same as before, except that we just specify that we are looking at SU(3) instead of SU(2). We can look at some intermediary quantities here as well.
# +
# Constants that define our lattice and color charge distribution
# See documentation for more information
N = 128
delta = .1
muA = 2
muP = 0.25
radius = 4
# Create a nucleus and proton object
nucleus = cgc.Nucleus(3, N, delta, muA, rngSeed=0)
proton = cgc.Proton(3, N, delta, muP, radius, rngSeed=0)
# Create a collision using these two objects
# The order doesn't matter here, it will determine which is which
# based on which implements wilsonLine()
#col = cgc.Collision(proton, nucleus)
col = cgc.Collision(proton, nucleus)
# Plot the number of particles produced vs. their momenta
# Note that we can calculate the intermediary quantities (eg. the Wilson Line)
# but we don't have to; the number of calculations done will be
# the same either way
plt.plot(col.momentaBins(), col.particlesProduced(verbose=1))
plt.yscale('log')
plt.ylabel('Particles Produced')
plt.xlabel(r'$|\vec k|$')
#plt.savefig('test_su_3.png')
plt.show()
print(np.sum(col.particlesProduced()))
# +
# Let's see what the color density and gauge fields look like
# (Only 2 of them; showing all 8 is a little excessive)
fig, ax = plt.subplots(2, 2, figsize=(7, 6))
print(np.shape(nucleus.gaugeField()))
print(np.shape(proton.gaugeField()))
# We have a total of colorCharges**2 - 1 (8 in this case) fields we could
# look at for the proton, and Ny times that for the nucleus. Feel free to change this up to look at a
# different one (though they should all look the same on average)
fieldSelect = 0
# We also have multiple layers of our nucleus we could look at
layerSelect = 0
ax[0,0].pcolor(nucleus.colorChargeField()[layerSelect,:,:,fieldSelect])
ax[0,0].set_title("Nucleus Color Charge Density")
ax[1,0].pcolor(nucleus.gaugeField()[layerSelect,:,:,fieldSelect])
ax[1,0].set_title("Nucleus Gauge Field")
ax[0,1].pcolor(proton.colorChargeField()[:,:,fieldSelect])
ax[0,1].set_title("Proton Color Charge Density")
ax[1,1].pcolor(proton.gaugeField()[:,:,fieldSelect])
ax[1,1].set_title("Proton Gauge Field")
fig.tight_layout()
plt.show()
# -
# We can also look at the wilson line (or rather, the trace of it in the adjoint
# representation)
print(np.shape(nucleus.adjointWilsonLine()))
adjointWilsonLineTrace = np.trace(nucleus.adjointWilsonLine(), axis1=-2, axis2=-1)
plt.pcolor(np.abs(adjointWilsonLineTrace))
plt.title(r'$tr(U_{ij})$')
plt.show()
# Now plot a few fourier harmonics
for i in range(1, 4):
plt.plot(col.momentaBins(), np.abs(col.fourierHarmonic(i)) / np.abs(col.fourierHarmonic(0)), '--', label=f'n = {i}')
#plt.yscale('log')
plt.ylabel(f'$|v_n| / v_0$')
plt.xlabel(r'$|\vec k|$')
plt.legend()
#plt.savefig(f'test_fourier_harmonics_su3.png')
plt.show()
# Odd harmonics are zero, as expected
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from discopy import Ty, Id, Box, Functor
sheet, ragu, lasagna, bang_sheet = Ty('sheet'), Ty('ragu'), Ty('lasagna'), Ty('sheets')
spread = Box('SPREAD', lasagna @ ragu, lasagna)
bang = Box('CONTINUE!', bang_sheet @ lasagna @ ragu, lasagna)
take = Box('TAKE', ragu, ragu @ ragu)
swap = Box('SWAP', sheet @ ragu, ragu @ sheet)
stack = Box('STACK', sheet @ lasagna, lasagna)
draw = Box('DRAW', bang_sheet, bang_sheet @ sheet)
# -
lasagna1 = Id(bang_sheet @ sheet @ lasagna @ ragu) >> Id(bang_sheet) @ stack @ take\
>> Id(bang_sheet) @ spread @ Id(ragu) >> bang
lasagna1.draw(aspect='auto')
ob = {ragu: ragu, bang_sheet: bang_sheet @ sheet, sheet: sheet, lasagna: lasagna}
ar = {spread: spread, bang: lasagna1, take: take, swap: swap, stack: stack, draw: draw}
Next = Functor(ob, ar)
lasagna2 = Next(lasagna1)
lasagna2.draw(aspect='auto')
lasagna3 = Next(lasagna2)
lasagna3.draw(aspect='auto')
| notebooks/lasagna-recursion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
sns.set_style('darkgrid')
# %matplotlib inline
# +
#data = "./Resources/gapminder.tsv"
# -
df_raw = pd.read_csv('E:/gapminder.csv')
#print(df_raw.shape)
df_raw.head()
df_raw.info()
df = df_raw.groupby(['country']).mean()
df.head(3)
df['population'] = df['population']
df.head()
df.columns
pop = df['population'].tolist()
pop[:10]
gdp_cap = df['gdpPerCap'].tolist()
gdp_cap[:10]
life_exp = df['lifeExp'].tolist()
life_exp[:10]
year = df['year'].tolist()
year[:10]
# # # Visualization
# +
# Create scatter plot
plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
plt.xscale('log')
# Show plot
plt.show()
# +
#You can see that the higher GDP usually corresponds to a higher life expectancy. In other words, there is a positive correlation.
## But is there a relationship between population and life expectancy of a country?
# +
# Build Scatter plot
plt.scatter(pop, life_exp)
# Show plot
plt.show()
# +
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development from 1952-2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# After customizing, display the plot
plt.show()
# +
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Labels
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World GDP VS Life Expectancy ')
# Definition of tick_val and tick_lab
tick_val = [1000,10000,100000]
tick_lab = ['1k','10k','100k']
# Additional customizations to highlight China and India.
plt.text(2120, 65, 'India')
plt.text(4200, 74, 'China')
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
#Add grid() call
plt.grid(True)
fig = plt.gcf()
fig.set_size_inches(10, 8)
# After customizing, display the plot
plt.show()
# +
# customize the scatter plot to be more self explanatory
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop * 2
# -
df = df_raw
is_2007 = df['year']==2007
print(is_2007.head())
df_2007 = df[is_2007]
print(df_2007.shape)
print(df_2007.head())
pop_2007 = df_2007['population'].tolist()
gdpPercap = df_2007['gdpPerCap'].tolist()
lifeExp = df_2007['lifeExp'].tolist()
year_2007 = df_2007['year'].tolist()
pop = df_2007['population'].tolist()
pop[: 10]
# Create function that maps a country's continent to a color
def color_map(c):
if c['continent'] == 'Asia':
return 'red'
elif c['continent'] == 'Europe':
return 'green'
elif c['continent'] == 'Africa':
return 'blue'
elif c['continent'] == 'Americas':
return 'yellow'
else:
return 'black'
df_2007['color'] = df_2007.apply(color_map, axis=1)
df.head()
col = df_2007['color']
# +
# Specify c and alpha inside plt.scatter()
plt.scatter(x = gdpPercap, y = lifeExp, c = col, alpha = 0.8)
# Labels
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Legend
blue_patch = mpatches.Patch(color='blue', label='Africa')
yellow_patch = mpatches.Patch(color='yellow', label='Americas')
red_patch = mpatches.Patch(color='red', label='Asia')
green_patch = mpatches.Patch(color='green', label='Europe')
black_patch = mpatches.Patch(color='black', label='Oceania')
plt.legend(handles=[blue_patch, yellow_patch, red_patch, green_patch, black_patch])
# Definition of tick_val and tick_lab
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Show the plot
plt.show()
# +
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop * 2
# +
# Scatter plot
plt.scatter(x = gdp_cap, y = life_exp, c = col, alpha = 0.8)
# Labels
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('Overview World Development in 1952 - 2007')
# Definition of tick_val and tick_lab
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Legend
blue_patch = mpatches.Patch(color='blue', label='Africa')
yellow_patch = mpatches.Patch(color='yellow', label='Americas')
red_patch = mpatches.Patch(color='red', label='Asia')
green_patch = mpatches.Patch(color='green', label='Europe')
black_patch = mpatches.Patch(color='black', label='Oceania')
plt.legend(handles=[blue_patch, yellow_patch, red_patch, green_patch, black_patch])
# Additional customizations to highlight China and India.
plt.text(2120, 65, 'India')
plt.text(4200, 74, 'China')
#Add grid() call
plt.grid(True)
fig = plt.gcf()
fig.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Overview_World_Development_in_1952 _-_2007.png')
# Show the plot
plt.show()
# -
df_2007_group = df[is_2007].groupby(['continent']).mean()
df_2007_group.head()
data= df_raw
data.tail(10)
# Assign variable
China = data[data.country == 'China']
# +
# Plot GDP per Capita of China
plt.plot(China.year, China.gdpPerCap)
plt.title('GDP per Capita of China')
#Add grid() call
plt.grid(True)
fig1 = plt.gcf()
fig1.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/GDP_vs_Life_Expectancy_China.png')
plt.show()
# +
# Plot GDP per Capita in United States
us = data[data.country == 'United States']
plt.plot(us.year, us.gdpPerCap)
plt.title('GDP per Capita of United States')
#Add grid() call
plt.grid(True)
fig2 = plt.gcf()
fig2.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/GDP_per_Capita_of_United_States.png')
plt.show()
# +
# Plot Life expectancy in United States
us = data[data.country == 'United States']
plt.plot(us.year, us.lifeExp)
plt.title('Life expectancy in United States')
# Add grid() call
plt.grid(True)
fig3 = plt.gcf()
fig3.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/GDP_vs_Life_Expectancy_US.png')
plt.show()
# +
# Plot Life expectancy in China
China = data[data.country == 'China']
plt.plot(China.year, China.lifeExp)
plt.title('Life expectancy in China')
# Add grid() call
plt.grid(True)
fig4 = plt.gcf()
fig4.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Life_Expectancy_China.png')
plt.show()
# -
# # Display a time series with line charts
# Assign variables
us = data[data.country == 'United States']
china = data[data.country == 'China']
# +
# GDP per capita in China vs United States
plt.plot(us.year, us.gdpPerCap)
plt.plot(china.year, china.gdpPerCap)
plt.legend(['United States', 'China'])
plt.xlabel('year')
plt.ylabel('GDP per capita [in years]')
plt.title('GDP in China vs United States')
#Add grid() call
plt.grid(True)
fig5 = plt.gcf()
fig5.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/GDP_in_China _vs_United_States.png')
plt.show()
# +
plt.plot(us.year, us.lifeExp)
plt.plot(china.year, china.lifeExp)
plt.legend(['United States', 'China'])
plt.xlabel('year')
plt.ylabel('Life expectancy ')
plt.title('Life Expectancy in China vs United States')
# Add grid() call
plt.grid(True)
fig6 = plt.gcf()
fig6.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Life_Expectancy_in_China_vs_United_States.png')
plt.show()
# +
# Plot Life expectancy of China and US
plt.plot(us.year, us.lifeExp)
plt.plot(china.year, china.lifeExp)
plt.legend(['United States', 'china'])
plt.xlabel('year')
plt.ylabel('Life expectancy [in years]')
#Add grid() call
plt.grid(True)
fig7 = plt.gcf()
fig7.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Life_Expectancy_China_US.png')
plt.show()
# -
##Find Distribution of Data using Histograms¶
set(data.continent)
# Assign variables
data2007 = data[data.year == 2007]
asia2007 = data2007[data2007.continent == 'Asia']
europe2007 = data2007[data2007.continent == 'Europe']
len(set(asia2007.country))
len(set(europe2007.country))
asia2007.gdpPerCap.mean()
europe2007.gdpPerCap.mean()
asia2007.gdpPerCap.median()
europe2007.gdpPerCap.median()
# +
#Comparing GDP of EU and Asia in 2007
plt.subplot(211)
plt.title('Comparing GDP of EU and Asia in 2007')
plt.hist(asia2007.gdpPerCap, 30, edgecolor='black')
plt.ylabel('Asia')
plt.subplot(212)
plt.hist(europe2007.gdpPerCap, 30, edgecolor='black')
plt.ylabel('Europe')
#Add grid() call
plt.grid(True)
fig8 = plt.gcf()
fig8.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Comparing_GDP_of_EU_and_Asia_in_2007.png')
plt.show()
# +
# Top 10 countries with highest population
top10 = data2007.sort_values('population', ascending=False).head(10)
top10
# +
# Plot the scatterplot for GDP per capita and life expectancy in 2007
plt.scatter(data2007.gdpPerCap, data2007.lifeExp)
plt.title('GDP per capita and life Expectancy in 2007')
plt.xlabel('GDP per capita [in USD]')
plt.ylabel('Life expectancy [in years]')
# Add grid() call
plt.grid(True)
fig9 = plt.gcf()
fig9.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/GDP_per_capita_and_life_Expectancy_in_2007.png')
plt.show()
# -
# Sort data by year
yearsSorted = sorted(set(data.year))
# +
# Plot scatter plots of GDP and life expectancy for every 5 years interval
for aYear in yearsSorted:
dataYear = data[data.year == aYear]
plt.scatter(dataYear.gdpPerCap, dataYear.lifeExp, 5)
plt.title(aYear)
plt.xlim(0,60000)
plt.ylim(25, 85)
plt.xlabel('GDP per capita [in USD]')
plt.ylabel('Life expectancy [in years]')
plt.show()
# Add grid() call
plt.grid(True)
fig11 = plt.gcf()
fig11.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/GDP_per_capita_and_Life_Expectancy.png')
# +
# Group by continent
dfcontinent = df_raw.groupby(['continent']).median().head(5)
dfcontinent
# +
# list of continents with associated average GDP
dfcontinent_gdp = dfcontinent['gdpPerCap']
dfcontinent_gdp
# +
# Continents with the highest and lowest gdp
continents = ['Africa','Americas','Asia','Europe','Oceania']
colors = [ "blue", "yellow", 'red','green', 'Black']
plt.bar(continents,dfcontinent_gdp, color= colors, alpha =.5, align ='center')
plt.xlabel("Continents")
plt.ylabel("gdp per cap [in USD]")
plt.title('Continents with the highest and lowest gdp')
# Add grid() call
plt.grid(True)
fig11 = plt.gcf()
fig11.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Continents_with_the_highest_and_lowest_gdp.png')
# +
# Continents with Highest and Lowest Life Expectancy
continents = ['Africa','Americas','Asia','Europe','Oceania']
colors = [ "blue", "yellow", 'red','green', 'Black']
plt.bar(continents, dfcontinent['lifeExp'], color= colors, alpha =.5, align ='center')
plt.xlabel ("Continents")
plt.ylabel ("Life Expectancy [in years]")
plt.title("Continents with Highest and Lowest Life Expectancy")
# Add grid() call
plt.grid(True)
fig12 = plt.gcf()
fig12.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Continents_with_Highest_and_Lowest_Life_Expectancy.png')
# +
# Continents with Highest and Lowest Population
continents = ['Africa','Americas','Asia','Europe','Oceania']
colors = [ "blue", "yellow", 'red','green', 'Black']
plt.bar(continents, dfcontinent['population'], color= colors, alpha =.5, align ='center')
plt.xlabel ("Continents")
plt.ylabel ("Population [in Billions]")
plt.title("Continents with Highest and Lowest Population")
# Add grid() call
plt.grid(True)
fig13 = plt.gcf()
fig13.set_size_inches(10, 8)
# Save the plot
#plt.savefig('./Images/Continents_with_Highest_and_Lowest_Population.png')
# -
| Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# # Leakage reduction in fast superconducting qubit gates via optimal control
# ## Introduction
#
# Transmon qubits typically have an anharmonicity ($\eta$) of few hundreds of MHz, which helps to isolate the computational states from the higher energy states. Despite the anharmonicity, microwave pulses are tailored with Derivative Removal by Adiabatic Gate ([DRAG](https://arxiv.org/pdf/0901.0534.pdf)) technique to minimize the leakage to non-computational basis states. However, such DRAG pulses fail to produce high fidelity gates when the duration of the gate is relatively short (see Fig.3 in the paper). In order to obtain shorter gates (due to the lack of precise theoretical models), one has to rely on real-time optimization schemes. In this notebook we will showcase using QUA, the experimental protocol described in Werninghaus *et al*. The paper is included as a PDF in the current folder, or you can access it via [arXiv](https://arxiv.org/abs/2003.05952).
#
# ## Problem
#
# The system under consideration is a single superconduting qubit (though there are actually two qubits and a flux tunable coupler in the actual experiment, but they are not used here). The Hamiltonian describing the interaction between the qubit and the microwave is given by
#
# $$H(t) = \omega (a^{\dagger}a + \frac{1}{2}) - i [\Omega_x(t) cos(\omega_d t + \phi) - \Omega_y(t) sin(\omega_d t + \phi)] (a - a^{\dagger}) + \frac{\Delta}{2} {a^{\dagger}}^2 a^2, $$
#
# where $\omega / 2 \pi = 5.11722$ GHz, the anharmonicity $\Delta / 2 \pi = −315.28$ MHz and $\omega_d = \omega + \omega_{lo}$. Here $\omega$ is the intermediate frequency that can be tuned with an OPX and $\omega_{lo}$ is the frequency of the external oscillator. By choosing pulses $\Omega_x$ and $\Omega_y$ such that the area under them is $\pi/2$ and selecting the phases $\phi = n\pi/2$ ($n=0, 1, 2, 3$) we can realize single qubit gates $\pm X/2$ and $\pm Y/2$ with the same pulse profiles.
#
# The prescribed pulse shapes from the DRAG technique are $\Omega_x(t) = A e^{-t^2 / (2\sigma^2)}$ and $\Omega_y(t) = i\frac{\beta}{\Delta} \frac{d\Omega_x(t)}{dt}$, the amplitude $A$ and the scaling parameter $\beta$ are determined by experimental calibration. However, the gate fidelities we obtain with DRAG scheme drop when the pulse duration is smaller than $\approx 10/\Delta$. The goal is to optimize $\Omega_x(y)$ and $\Omega_y(t)$ to do short ($\tau \approx 4$ ns) Clifford gates $\pm X/2$ and $\pm Y/2$, such that the leakage is minimized.
#
# <center><img src="figure_1.png"/></center>
# ## Optimization
#
# The authors use [CMA-ES](https://en.wikipedia.org/wiki/CMA-ES)
# algorithm for the optimization of in-phase and out-of-phase components of the microwave pulse. This is a gradient-free evolutionary optimization technique, similar to the genetic algorithm. The cost function is evaluated via randomized benchmarking (RB).
#
# <center><img src="figure_2a.png" =2x3/></center>
#
# RB is a useful technique to estimate the fidelity of quantum operations by creating a
# random circuit of Cliffords. The random circuit is followed by an operation to invert the transformation performed on the system with appropriately chosen Clifford gate to recover the initial state (as illustrated in the above figure). The final step of this procedure is a measurement which is averaged over multiple realizations, to obtain an "average" figure of merit for the fidelity of all operations performed on the qubit. A randomized benchmarking procedure script is available in the QM Libraries repo on GitHub, and is repeated here almost without changes.
#
# The optimization happens in two steps:
#
# 1) the amplitudes of I-Q quadratures ($A$, $\beta$) and frequency ($\omega$) of the usual DRAG pulse are first optimized ($3$ parameters),
#
# 2) the optimal pulse obtained is then used as a starting point for another optimization step with additional degrees of freedom ($3 + 2N$ parameters, where $N$ is the number of samples).
#
# In the second step, corrections are added to each sample of the I-Q components of the DRAG pulse i.e.,
#
# $$\Omega_x(n \delta t) = A e^{-n^2 \delta t^2 / (2\sigma^2)} + a_n$$
#
# $$\Omega_y(n \delta t) = i\frac{\beta}{\Delta} \frac{d\Omega_x(n \delta t)}{dt} + b_n$$
#
#
#
# This is referred to as a _piecewise constant pulse_ (PWC). With these added degrees of freedom, the optimization is run again to obtain the final optimized pulse.
# In the paper, during the optimization, RB is performed at a fixed depth. But once the optimization is done, the average Clifford gate fidelity is calculated by varying the circuit depth.
#
# ## QUA Implementation
#
# The QUA program for the procedure outlined above is performed in [lr_lib](lr_lib.py). This file contains the function `get_program` which performs two operations: generation of slowly varying envelopes of I,Q needed to synthesize the waveforms and randomization of the RB circuit.
#
# ```python
# def get_program(config, params, t, N_avg, d):
# """
# A function to generate the QUA program
# :param config: the QM config dictionary
# :param params: parameter list for optimization
# :param t: duration of DRAG pulses in ns.
# :param N_avg: number of runs per RB circuit realization
# :param d: depth of the randomized circuit
# :return:
# """
# th = 0
# state, op_list = update_waveforms(params, d, config, t)
# with program() as drag_RB_prog:
# N = declare(int)
# I = declare(fixed)
# state_estimate = declare(bool)
# out_str = declare_stream()
# F = declare(fixed)
# F_str = declare_stream()
# update_frequency("qubit", params[2])
# with for_(N, 0, N < N_avg, N + 1):
# play("random_clifford_seq", "qubit")
# ## compute the recovery operation
# recovery_op = recovery_clifford(state)[0]
# if recovery_op == "I":
# wait(gauss_len, "qubit")
# else:
# play(recovery_op, "qubit")
# assign(F, get_simulated_fidelity(op_list, err=e))
# save(F, F_str)
# align("rr", "qubit")
# measure("readout", "rr", None, integration.full("integW1", I))
# assign(state_estimate, I > th)
# save(state_estimate, out_str)
# wait(500, "qubit")
# with stream_processing():
# out_str.save_all("out_stream")
# F_str.save_all("F_stream")
# return drag_RB_prog
#
# ```
#
# The QUA program generated by `get_program` is run by `get_result` which runs it multiple times to get multiple realizations at the specified RB circuit depth. The fidelity is calculated at each step and an error term is returned to be used as a cost for the next optimization step.
#
# ```python
# def get_result(prog, duration, K=10):
# """
# Upload the waveforms to the configuration and re-open the QM
#
# :param prog: QUA program
# :param duration: simulation duration
# :return:
# """
#
# QMm = QuantumMachinesManager()
# QMm.close_all_quantum_machines()
# QM = QMm.open_qm(config)
# F_avg = []
# for _ in range(K):
# job = QM.simulate(prog, SimulationConfig(duration))
# res = job.result_handles
# F = res.F_stream.fetch_all()["value"]
# F_avg.append(F.mean(axis=0))
# err = 1 - np.array(F_avg).mean()
# return err
# ```
# +
# # !pip install cma
# # !pip install http::IP/python-package ## ask for the IP address
import cma
from lr_lib import *
import numpy as np
import matplotlib.pyplot as plt
import time
##we first optimize a regular DRAG pulse
np.random.seed(3)
es1 = cma.CMAEvolutionStrategy(np.random.rand(3), 0.5)
es1.optimize(cost_DRAG, iterations=1)
es1.result_pretty()
##Finally, we use the optimized DRAG pulse to add more degrees of freedom
## use A, B, freq as initial guess for the full optimization
start = time.time()
init = list(es1.result.xbest) + list(np.random.rand(n_params))
sigma0 = 0.5
es2 = cma.CMAEvolutionStrategy(init, sigma0, {"popsize": 2})
es2.optimize(cost_optimal_pulse, iterations=1)
es2.result_pretty()
end = time.time()
print(end - start)
# We can now draw the optimal pulse
#NOTE: the figure displayed at the end is only for illustration
opt_pulse = np.array(get_DRAG_pulse("X/2", es2.result.xbest, pulse_duration))
plt.plot(opt_pulse[0, :])
plt.plot(opt_pulse[1, :])
# -
| examples/Workshops/CQE/6. leakage-reduction/6. Filipp paper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Deep AI
# language: python
# name: dl
# ---
# +
import time
import torch
import torch.nn.functional as F
from torch.nn import ModuleList, Embedding
from torch.nn import Sequential, ReLU, Linear
from torch.nn import CrossEntropyLoss, MSELoss, L1Loss
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch_geometric.utils import degree
from torch_geometric.loader import DataLoader
from torch_geometric.nn import GCNConv, PNAConv, BatchNorm, global_add_pool
from phylognn_model import G2Dist_PNAConv
from gene_graph_dataset import GeneGraphDataset
from torch.utils.tensorboard import SummaryWriter
# -
train_p, test_p = 0.7, 0.2
dataset = GeneGraphDataset('dataset_adj1', 20, 20, graph_num = 100)
data_size = len(dataset)
train_size, test_size = (int)(data_size * train_p), (int)(data_size * test_p)
data_size
dataset = dataset.shuffle()
train_dataset = dataset[:train_size]
test_dataset = dataset[train_size:(train_size + test_size)]
val_dataset = dataset[(train_size + test_size):]
# +
# len(train_dataset), len(test_dataset), len(val_dataset)
# -
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64)
val_loader = DataLoader(val_dataset, batch_size=1)
# +
# len(train_loader), len(test_loader), len(val_loader)
# -
deg = torch.zeros(5, dtype=torch.long)
for data in dataset:
d = degree(data.edge_index[1].type(torch.int64),
num_nodes=data.num_nodes, dtype=torch.long)
deg += torch.bincount(d, minlength=deg.numel())
# +
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = G2Dist_PNAConv(deg).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay = 0.0001)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10,
min_lr=0.00001)
# +
# loss_fn = MSELoss()
# l1_fn = L1Loss()
loss_fn = CrossEntropyLoss()
def train(train_loader):
model.train()
total_loss, counter = 0, 0
size = len(train_loader)
for batch, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index, data.batch)
#loss = (out.squeeze() - data.y).abs().sum()
pred, y = out.softmax(axis = 1).argmax(axis = 1), data.y
counter += (pred == y).sum().item()
loss = loss_fn(out, data.y)
loss.backward()
total_loss += loss.item()
optimizer.step()
return total_loss / len(train_loader), counter
# -
@torch.no_grad()
def test(loader):
model.eval()
total_error, counter = 0, 0
for data in loader:
data = data.to(device)
out = model(data.x, data.edge_index, data.batch)
pred, y = out.softmax(axis = 1).argmax(axis = 1), data.y
counter += (pred == y).sum().item()
# total_error += (out.squeeze() - data.y).abs().sum().item()
total_error += loss_fn(out, data.y).item()
return total_error / len(loader), counter
writer = SummaryWriter(log_dir='runs_g2d_10/g2dist_adjone_02000-pna-global-run1')
import numpy as np
for epoch in range(1, 1001):
loss, train_counter = train(train_loader)
test_mae, test_counter = test(test_loader)
val_mae, _ = test(val_loader)
# scheduler.step(loss)
writer.add_scalar('Loss/train', loss, epoch)
writer.add_scalar('Loss/test', test_mae, epoch)
writer.add_scalar('Loss/val', val_mae, epoch)
writer.add_scalar('Counter/train', train_counter/len(train_loader.dataset), epoch)
writer.add_scalar('Counter/test', test_counter/len(test_loader.dataset), epoch)
print(f'{time.ctime()}\t'
f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Val: {val_mae:.4f}, '
f'Test: {test_mae:.4f}')
print(f'\t\t -- train_counter: {train_counter}, test_counter:{test_counter}')
model.eval()
model.train()
tld0 = list(train_loader)[0].to(device)
tld1 = list(test_loader)[0].to(device)
res0 = model(tld0.x, tld0.edge_index, tld0.batch)
res0.argmax(axis = 1)
tld0.y
loss_fn(res0, tld0.y)
L1Loss()(res0.argmax(axis = 1).to(torch.float), tld0.y.to(torch.float))
(res0.argmax(axis = 1) == tld0.y).abs().sum().item()/len(tld0.y)
res1 = model(tld1.x, tld1.edge_index, tld1.batch)
res1.argmax(axis = 1)
tld1.y
loss_fn(res1, tld1.y)
L1Loss()(res1.argmax(axis = 1).to(torch.float), tld1.y.to(torch.float))
train_y = [d.y.item() for d in train_dataset]
np.unique(train_y)
test_y = [d.y.item() for d in test_dataset]
np.unique(test_y)
np.unique([d.y.item() for d in val_dataset])
data = train_dataset[0]
data = data.to(device)
data.x
model.node_emb
x = model.node_emb(data.x.squeeze()).view(-1, 80)
x
x = model.convs[0](x, data.edge_index)
x
| g2dist_global_pna-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import os
# First of all, we need some satellite data.
# Let's open a lightweight a Landsat-5 TM collection 2 tile.
path = os.path.join("/home", "data", "DATA", "PRODS", "LANDSATS_COL2", "LT05_L1TP_200030_20111110_20200820_02_T1.tar")
# +
from eoreader.reader import Reader
# Create the reader
# This reader is a singleton can be called once and then open all your data.
eoreader = Reader()
# -
# Open your product
# No need to extract the product here: Archived Landsat-5 Collection are handled by EOReader
prod = eoreader.open(path)
# Here you have opened your product and you have its object in hands
# You can play a little with it to see what it got inside
print(f"Landsat tile: {prod.tile_name}")
print(f"Acquisition datetime: {prod.datetime}")
# + pycharm={"name": "#%%\n"}
# Retrieve the UTM CRS of the tile
prod.crs
# -
# Open here some more interesting geographical data: extent and footprint
base = prod.extent.plot(color='cyan', edgecolor='black')
prod.footprint.plot(ax=base, color='blue', edgecolor='black', alpha=0.5)
# +
from eoreader.bands import *
from eoreader.env_vars import DEM_PATH
# Select the bands you want to load
bands = [GREEN, NDVI, YELLOW, CLOUDS]
# Compute DEM band only if you have set a DEM in your environment path
if DEM_PATH in os.environ:
bands.append(HILLSHADE)
# Be sure they exist for Landsat-5 TM sensor:
ok_bands = [band for band in bands if prod.has_band(band)]
print(to_str(ok_bands)) # Landsat-5 TM doesn't provide YELLOW band
# -
# Load those bands as a dict of xarray.DataArray
band_dict = prod.load(ok_bands)
band_dict[GREEN]
# The nan corresponds to the nodata you see on the footprint
# Plot a subsampled version
band_dict[GREEN][:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::10].plot()
# Plot a subsampled version
band_dict[NDVI][:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::10].plot()
# Plot a subsampled version
if HILLSHADE in band_dict:
band_dict[HILLSHADE][:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::10].plot()
# You can also stack those bands
stack = prod.stack(ok_bands)
stack
# +
# Plot a subsampled version
import matplotlib.pyplot as plt
nrows = len(stack)
fig, axes = plt.subplots(nrows=nrows, figsize=(2 * nrows, 6 * nrows), subplot_kw={"box_aspect": 1}) # Square plots
for i in range(nrows):
stack[i, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::10].plot(x="x", y="y", ax=axes[i])
| docs/_build/.jupyter_cache/executed/12a8baa636eaeb1d8eda189e348fc94e/base.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# +
# количество итераций обучения
NUM_OF_ITERATIONS = 400
# читаем размеченные данные из файла
beer_dataset = pd.read_excel('beer_data_set.xlsx')
# печатаем список колонок таблички чтобы удобнее было копировать
print('\n'.join(beer_dataset.columns))
# выводим несколько первых строк таблицы
beer_dataset.head()
# -
# Импортируем преобразователь текста в вектор.
#
# [Примерное объяснение как работает](http://zabaykin.ru/?p=463)
#
# [Документация](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html)
from sklearn.feature_extraction.text import CountVectorizer
# Импортируем преобразователь меток в числа.
# Он присваивает каждому новой встреченной метке уникальный номер.
#
# Допустим у нас есть такой набор меток:
# ```python
# labels = ['заяц', 'волк', 'утка', 'белка', 'заяц', 'утка']
# ```
#
# LabelEncoder строит примерно такое соответсвие:
#
# | Метка | Номер |
# |:-----:|:-----:|
# | волк | 0 |
# | белка | 1 |
# | заяц | 2 |
# | утка | 3 |
#
# Так что если к нам поступает такой набор меток,
# ```python
# ['утка', 'утка', 'заяц', 'утка', 'белка', 'волк', 'заяц', 'волк', 'волк', 'белка']
# ```
# то мы можем преобразовать их в список чисел
# ```python
# [3, 3, 2, 3, 1, 0, 2, 0, 0, 1]
# ```
#
# Это нужно потому что многие алгоритмы не умеют работать со строками, к тому же числа занимаю меньше места
#
# [Документация](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html)
from sklearn.preprocessing import LabelEncoder
# Импортируем функцию для разбиения датасета на обучающий и тестирующий набор данных случайным образом.
#
# Зачем это нужно: когда модель обучается, то она может начать запоминать сочетания "вопрос"-"ответ". Вместо того, чтобы пытаться разобраться в во входных данных. Поэтому модель нужно проверять на данных, которых она до этого не видела.
#
# [Немного подробнее](http://robotosha.ru/algorithm/training-set-and-test-data.html)
#
# [Документация](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
from sklearn.model_selection import train_test_split
# Импортируем классификатор данных из библиотеки CatBoost от Яндекса
from catboost import CatBoostClassifier
# Выдираем сырые строки из колонки **SKU_NAME**. В таблице строки хранятся в виде списков байт, метод `.flatten()` преобразует их в простые строки
data = [item for item in beer_dataset[['SKU_NAME']].values.flatten()]
# Выдираем метки из колонки **Тип упаковки**. В таблице строки хранятся в виде списков байт, метод `.flatten()` преобразует их в простые строки. Так же на всякий случай переводим метки в нижний регистр с помощью метода `.lower()`
labels = [item.lower() for item in beer_dataset['Тип упаковки'].values.flatten()]
# Разбиваем датасет случайным образом на обучающие и тестовые данные с соотношением 2 к 1. `random_state` – это начальное состояние генератора случайных чисел для разбиения, ставим число 42 чтобы каждый раз разбиение было одним и тем же.
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.33, random_state=42)
# Печатаем статистику полученных датасетов: суммарный размер (Total), размер обучающей выборки (train) и тестовой (test)
print(f"Total: {len(data)} samples\n"
f"\ttrain: {len(train_data)} data, {len(train_labels)} labels\n"
f"\ttest: {len(test_data)} data, {len(test_labels)} labels")
# Воспомогательная функция. CountVectorizer возвращает сжатые вектора, а нам нужны обычные. Эта функция берёт список сжатых веткоров и преобразует их в массивы чисел.
def dense_vectors(vectors):
return [np.asarray(item.todense())[0] for item in vectors]
# Функция обучения модели. Наша модель состоит из трёх частей:
# - `CountVectorizer` для преобразования входных данных в векторное представление
# - `LabelEncoder` для преобразования меток в числа
# - `CatBoostClassifier` – собственно классификатор данных
#
# По-хорошему надо сохранять все три части в один или несколько файлов, пока для тестирования сохраняем только последнее.
#
# Входные данные в виде списка строк переводятся в нижний регистр, разбиваются на токены с помощью регулярного выражения `(?u)\b\w\w+\b|[0-9\.,]+[\%\w]|\w+`. Посмотреть как работает это выражение можно посмотреть [здесь](https://regex101.com/r/Puyk9J/1). Оно разбивает строки на список подстрок примерно так
#
#
# |Строка| Токены |
# |:------------------------------|:-----------------------------------:|
# | Пиво БагБир 0.5л ст/бут | ['Пиво', 'БагБир', '0.5л', 'ст', 'бут'] |
# | Пиво БАГ-БИР св.ст/б 0.5л | ['Пиво', 'БАГ', 'БИР', 'св', '.с', 'т', 'б', '0.5л'] |
# | Пиво BAGBIER светлое 4,9% 1.5л | ['Пиво', 'BAGBIER', 'светлое', '4,9%', '1.5л'] |
# | Пиво БАГ-БИР св.ПЭТ 2.5л | ['Пиво', 'БАГ', 'БИР', 'св', '.П', 'ЭТ', '2.5л'] |
# | Пиво БАГ БИР ГОЛЬДЕН светлое ПЭТ 4% 1,5л | ['Пиво', 'БАГ', 'БИР', 'ГОЛЬДЕН', 'светлое', 'ПЭТ', '4%', '1,5л'] |
# | Пиво БАГ-БИР ГОЛЬДЕН св.4% ПЭТ 2.5л | ['Пиво', 'БАГ', 'БИР', 'ГОЛЬДЕН', 'св', '.4%', 'ПЭТ', '2.5л'] |
# | Пиво БАГ БИР БОК темное 4% 1.5л | ['Пиво', 'БАГ', 'БИР', 'БОК', 'темное', '4%', '1.5л'] |
# | Нап.пивн.BAGBIER 4,6% ст/б 0.5л | ['Нап', '.п', 'ивн', '.B', 'AGBIER', '4,6%', 'ст', 'б', '0.5л'] |
# | Нап.пивн.БАГ БИР 4,2% ст/б 0.5л | ['Нап', '.п', 'ивн', '.Б', 'АГ', 'БИР', '4,2%', 'ст', 'б', '0.5л'] |
# | Пиво BRAHMA 4.6% св.ст/б 0.33л | ['Пиво', 'BRAHMA', '4.6%', 'св', '.с', 'т', 'б', '0.33л'] |
#
# Потом мы переводим данные в сжатые вектора, а из них получаем простые вектора с помощью фунции `dense_vectors`. Затем создаём LabelEncoder() и заставляем его запомнить наши метки.
#
# Теперь можно заняться самым главным: обучить классификатор.
#
# Мы создаём CatBoostClassifier с настраиваемым параметром iterations (число итераций) и обучаем его через вызов метода `fit` ([документация](https://catboost.ai/docs/concepts/python-reference_catboostclassifier_fit.html)). Этот метод принимает обучающие данные в виде списка векторов (`vectorized_data`) и список закодированных меток.
#
# После того, как обучились, возвращаем кортеж вида `(CountVectorizer, LabelEncoder, CatBoostClassifier)`
def build_model(data, labels, iterations=200):
vectorizer = CountVectorizer(lowercase=True,
analyzer="word",
token_pattern=r"(?u)\b\w\w+\b|[0-9\.,]+[\%\w]|\w+")
compressed_data = vectorizer.fit_transform(data)
vectorized_data = dense_vectors(compressed_data)
le = LabelEncoder()
encoded_labels = le.fit_transform(labels)
classifier = CatBoostClassifier(iterations=iterations, task_type = "GPU")
classifier.fit(vectorized_data, encoded_labels, silent=False)
return (vectorizer, le, classifier)
# Обучаем нашу модель
model = build_model(train_data, train_labels, iterations=NUM_OF_ITERATIONS)
# Генератор отчёта по нашей модели. Скармливаем ей модель, тестовые данные и метки.
#
# Метод `.predict` ([документация](https://catboost.ai/docs/concepts/python-reference_catboostclassifier_predict.html)) классификатора `CountVectorizer` принимает список векторов (входные данные) и возвращает для них самые вероятные ответы в виде чисел float, которые нужно преобразовать к целым числам.
#
# Метод `.predict_proba` ([документация](https://catboost.ai/docs/concepts/python-reference_catboostclassifier_predict_proba.html)) классификатора `CountVectorizer` принимает список векторов (входные данные) и возвращает для них вероятности полученных выше ответов. Полученные вероятности умножем на 100 и преобразумем в строки вида `95%, 80%, 91%`.
#
# После этого берём полученные из метода `.predict` ответы и раскодируем их с помощью метода `.inverse_transform` кодировщика `LabelEncoder`, который преобразует список чисел в список строк.
#
# Из полученных выше ответов, входных данных и правильных ответов из тестового датасета делаем табличку (`table`) и преобразуем её в удобный для нас вид `pandas.DataFrame`.
def validate_model(model, valid_data, valid_labels):
vectorizer, le, classifier = model
columns = ["Запись", "Предсказание", "Уверенность" ,"Правильный результат"]
compressed_data = vectorizer.transform(valid_data)
vectorized_data = dense_vectors(compressed_data)
prediction = classifier.predict(vectorized_data).flatten().astype('int64')
proba = np.rint(100*classifier.predict_proba(vectorized_data).max(axis=1))
proba_column = (f"{int(item)}%" for item in proba)
results = le.inverse_transform(prediction).flatten()
table = zip(valid_data, results, proba_column, valid_labels)
return pd.DataFrame(table, columns=columns)
# Запускаем валидацию нашей модели и выводим сравнительную табличку результатов
validation = validate_model(model, test_data, test_labels)
validation.head(20)
# Хорошо бы посчитать процент правильных ответов. Фильтруем все строки, в которых значение в столбце **Предсказание** совпадает со значением в столбце **Правильный результат** и считаем их количество.
#
# Делим число правильных ответов на размер тестового набора данных и выводим.
valid = len(validation[validation['Предсказание'] == validation['Правильный результат']])
total = len(validation)
print(f"Valid: {valid} from {total} ({100*valid/total}%)")
# Не забываем сохранить модель в файл!
# +
vectorizer, le, classifier = model
classifier.save_model(f"beer_container_catboost_{NUM_OF_ITERATIONS}_iterations.cbm")
| beer_containers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Sankha1998/SVM-VT/blob/master/Extract_Decision_Boundary_For_Linear_SVM_(2D).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="rwSzUxETpA0z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 74} outputId="8d4f84fd-4b47-4e08-b521-41b2762b5d47"
# %matplotlib inline
import pandas as pd
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns;
from sklearn.preprocessing import StandardScaler
s=StandardScaler()
import plotly.graph_objects as go
from sklearn import svm
# + id="-s1csj7hrPOi" colab_type="code" colab={}
df = sns.load_dataset("iris")
# + id="us6pqRWyrRUZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="ece9b864-d767-4614-fa71-26aeab123d36"
df.head()
# + id="175tH0b4rRZH" colab_type="code" colab={}
df['species'].replace({'setosa':0,'versicolor':1,'virginica':2},inplace=True)
# + id="Iwo8zSNdrRXj" colab_type="code" colab={}
x=df.iloc[:,[2,3]].values
y = (df.species!=2).astype(np.float64)
# + id="wCe_jTAxEzgV" colab_type="code" colab={}
x=s.fit_transform(x)
# + id="DBthBv_kJgPC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 691} outputId="98140f93-e7d5-46b2-9720-3884f076f1cb"
from sklearn.svm import SVC
clf = SVC(C = 1, kernel = 'linear')
clf.fit(x, y)
print('w = ',clf.coef_)
print('b = ',clf.intercept_)
print('Indices of support vectors = ', clf.support_)
print('Support vectors = ', clf.support_vectors_)
print('Number of support vectors for each class = ', clf.n_support_)
print('Coefficients of the support vector in the decision function = ', np.abs(clf.dual_coef_))
# + [markdown] id="r1hqT6pO0MUI" colab_type="text"
# a = -W[0]/W[1]
# b = I[0]/W[1]
# W=svc.coef_[0] the intercept I=svc.intercept_
#
# y = a*x - b
# + id="qf-kMQIh0JZw" colab_type="code" colab={}
w = clf.coef_
i = clf.intercept_
# + id="1dSDGMq_Jvtq" colab_type="code" colab={}
a = -w[0][0]/w[0][1]
b = i[0]/w[0][1]
# + id="jYIDBkdEGMpE" colab_type="code" colab={}
from sklearn.svm import SVC
# + id="MNTU82SOwFAB" colab_type="code" colab={}
def svmplot(x,y,c):
clf = SVC(C = c, kernel = 'linear')
clf.fit(x,y)
w = clf.coef_
i = clf.intercept_
a = -w[0][0]/w[0][1]
b = i[0]/w[0][1]
trace = go.Scatter(x=x[:,0],y=x[:,1],mode='markers',marker={'color': y})
trace1 = go.Scatter(x=clf.support_vectors_[:, 0],y = clf.support_vectors_[:, 1],mode='markers',marker={'color': y})
trace1 = go.Scatter(x=x[:,0], y=a*x[:,0] - b,mode='lines')
data=[trace,trace1]
layout = go.Layout(title = 'SVM Plot')
fig = go.Figure(data=data,layout=layout)
fig.show()
# + id="MWGwL_jTH3NW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="99c2cc3d-a17b-4ddf-e477-6d495c2999c4"
svmplot(x,y,0.1)
# + id="iIXRRlsGHs8Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="5f96c4ae-eec4-4ea7-80d5-eadee7b501b3"
svmplot(x,y,100)
# + id="8YVe8GKPH0QN" colab_type="code" colab={}
y = (df.species!=0).astype(np.float64)
# + id="a9cPogL0Hkab" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="41fd4f8d-f140-41c7-e7c0-497aa1051f6a"
svmplot(x,y,0.01)
# + id="fwDdpoFAHkhA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="bb056230-1333-4d8d-d8de-88b619dc06c5"
svmplot(x,y,100)
| Extract_Decision_Boundary_For_Linear_SVM_(2D).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="m5tUoHe9FRhs"
# ## Import Libraries
# + id="Dr7BCHS-nIRW" colab={"base_uri": "https://localhost:8080/"} outputId="bc22af7e-8fe6-4e08-f613-785ffb5f287d"
# !pip install transformers==4.3.3
import pandas as pd
import numpy as np
import tensorflow as tf
import torch
from torch.nn import BCEWithLogitsLoss, BCELoss
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import classification_report, confusion_matrix, multilabel_confusion_matrix, f1_score, accuracy_score
from transformers import AutoModel, AutoTokenizer
import pickle
from transformers import *
from tqdm import tqdm, trange
from ast import literal_eval
# + id="_3VYEJpwW_Qb" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="6ad80b69-2372-43e3-d838-2e0e15ec180b"
raw_df = pd.read_csv('train_with_sarcasm.csv')
raw_df.head()
# + id="RWuN_xLGdAoK" colab={"base_uri": "https://localhost:8080/", "height": 246} outputId="fff58ee2-9c82-4a73-d187-f43aa92d9ab5"
print(raw_df.shape)
df = raw_df[["content", "Pro Trump", "Pro Biden", "Neutral", "sarcasm_labels"]]
df = df[df['content'].notna()]
df = df[df['Pro Biden'].notna()]
df = df[df['Pro Trump'].notna()]
df = df[df['Neutral'].notna()]
df = df[df['sarcasm_labels'].notna()]
print(df.shape)
df = df.astype({"Pro Trump": int, "Pro Biden": int, "Neutral": int})
df.drop_duplicates(subset='content', keep='first', inplace=True)
print(df.shape)
df.head()
# + id="BEXi8QFKqC4v"
df["sarcasm"] = 0
for i in range(len(df)):
if df["sarcasm_labels"].iloc[i] == "sarcasm":
df["sarcasm"].iloc[i] = 1
elif df["sarcasm_labels"].iloc[i] != "regular":
print(f"ERROR - unexpected label at index {i}")
break
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="exwW7kBTqw05" outputId="785556b5-7283-4b91-dc63-bf6dec8880d2"
df = df[["content", "Pro Trump", "Pro Biden", "Neutral", "sarcasm"]]
df.head(10)
# + id="UhjnJEwKnISB" colab={"base_uri": "https://localhost:8080/"} outputId="7c73f426-abc8-4424-9b9a-887f7b481603"
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# + id="uorMX_zrnISM" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="c38b73d1-aec4-4b10-b231-7e6970a0ec54"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
# + [markdown] id="dLcetMjZFjSH"
# ## Load and Preprocess Training Data
# + [markdown] id="2dal0ggBcYdD"
# Dataset will be tokenized then split into training and validation sets. The validation set will be used to monitor training. For testing a separate test set will be loaded for analysis.
# + id="6AhWrzX7nITB" colab={"base_uri": "https://localhost:8080/"} outputId="c1859f4e-c48b-4e23-febb-e662bf6c9b55"
print('Unique comments: ', df.content.nunique() == df.shape[0])
print('Null values: ', df.isnull().values.any())
# df[df.isna().any(axis=1)]
# + id="OkKpz_9eJRt7" colab={"base_uri": "https://localhost:8080/"} outputId="d13a7b4a-ab07-4936-a8d9-25ef779621b7"
print('average sentence length: ', df.content.str.split().str.len().mean())
print('stdev sentence length: ', df.content.str.split().str.len().std())
# + id="UVI59S9VaAfB" colab={"base_uri": "https://localhost:8080/"} outputId="b34c1474-dbfd-4de6-8ebf-421b2bf8d58d"
cols = df.columns
label_cols = ['Pro Trump', 'Pro Biden', 'Neutral']
num_labels = len(label_cols)
print('Label columns: ', label_cols)
# + id="xzgA5qQgYIBZ" colab={"base_uri": "https://localhost:8080/"} outputId="32afd1fb-4630-4ae3-e893-776285694315"
print('Count of 1 per label: \n', df[label_cols].sum(), '\n') # Label counts, may need to downsample or upsample
print('Count of 0 per label: \n', df[label_cols].eq(0).sum())
# + id="uFpSd4JzaAae"
df = df.sample(frac=1).reset_index(drop=True) #shuffle rows
# + id="0DF3ddjej5vd" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="e9824c1e-8ae9-40b4-89b1-2b99aa6cc11c"
df['one_hot_labels'] = list(df[label_cols].values)
df.head()
# + id="MlhHifh5bW7e"
labels = list(df.one_hot_labels.values)
comments = list(df.content.values)
# + [markdown] id="IlMHfElhGJzc"
# Load the pretrained tokenizer that corresponds to your choice in model. e.g.,
#
# ```
# BERT:
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
#
# XLNet:
# tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased', do_lower_case=False)
#
# RoBERTa:
# tokenizer = RobertaTokenizer.from_pretrained('roberta-base', do_lower_case=False)
# ```
#
# + id="HNsEu-vUur-4" colab={"base_uri": "https://localhost:8080/", "height": 153, "referenced_widgets": ["a536daa501474fd6acf32e2149ef57bd", "fb12c51b8f024f5796468a78bc157314", "2a61f39b4ccc4267bba1795dd7e1207f", "267e589ae22b47a19d83c2155c266b1a", "ddfd363d9acc42878debf8512e5a31a9", "e763356f0f2946cd93a10894e55538d8", "f07094209a7f458f80bec6bce278549c", "e04cf2a3712c4d8daec2080172a8d92c"]} outputId="7dec2699-8548-4c84-fe8a-1b7aa07f49ed"
max_length = 100
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) # tokenizer
encodings = tokenizer.batch_encode_plus(comments,max_length=max_length,pad_to_max_length=True) # tokenizer's encoding method
print('tokenizer outputs: ', encodings.keys())
# + id="gFzJJFlkVNNK"
# len(encodings['token_type_ids'][0]), len(encodings['token_type_ids'][100])
# + id="l6CCLSjfur-9"
input_ids = encodings['input_ids'] # tokenized and encoded sentences
token_type_ids = encodings['token_type_ids'] # token type ids
attention_masks = encodings['attention_mask'] # attention masks
# + id="vSOFbThlYcpb" colab={"base_uri": "https://localhost:8080/"} outputId="02600246-d4e6-407e-9839-4a9cbbba0fe2"
# Identifying indices of 'one_hot_labels' entries that only occur once - this will allow us to stratify split our training data later
label_counts = df.one_hot_labels.astype(str).value_counts()
one_freq = label_counts[label_counts==1].keys()
one_freq_idxs = sorted(list(df[df.one_hot_labels.astype(str).isin(one_freq)].index), reverse=True)
print('df label indices with only one instance: ', one_freq_idxs)
# + id="CQQ7CoOag_r7"
# Gathering single instance inputs to force into the training set after stratified split
one_freq_input_ids = [input_ids.pop(i) for i in one_freq_idxs]
one_freq_token_types = [token_type_ids.pop(i) for i in one_freq_idxs]
one_freq_attention_masks = [attention_masks.pop(i) for i in one_freq_idxs]
one_freq_labels = [labels.pop(i) for i in one_freq_idxs]
# + [markdown] id="r9PxAt48HRRj"
# Be sure to handle all classes during validation using "stratify" during train/validation split:
# + id="WPFaq4ufnIT2"
# Use train_test_split to split our data into train and validation sets
train_inputs, validation_inputs, train_labels, validation_labels, train_token_types, validation_token_types, train_masks, validation_masks = train_test_split(input_ids, labels, token_type_ids,attention_masks,
random_state=2020, test_size=0.20, stratify = labels)
# Add one frequency data to train data
train_inputs.extend(one_freq_input_ids)
train_labels.extend(one_freq_labels)
train_masks.extend(one_freq_attention_masks)
train_token_types.extend(one_freq_token_types)
# Convert all of our data into torch tensors, the required datatype for our model
train_inputs = torch.tensor(train_inputs)
train_labels = torch.tensor(train_labels)
train_masks = torch.tensor(train_masks)
train_token_types = torch.tensor(train_token_types)
validation_inputs = torch.tensor(validation_inputs)
validation_labels = torch.tensor(validation_labels)
validation_masks = torch.tensor(validation_masks)
validation_token_types = torch.tensor(validation_token_types)
# + id="ZRnuLna-nIT4"
# Select a batch size for training. For fine-tuning with XLNet, the authors recommend a batch size of 32, 48, or 128. We will use 32 here to avoid memory issues.
batch_size = 8
# Create an iterator of our data with torch DataLoader. This helps save on memory during training because, unlike a for loop,
# with an iterator the entire dataset does not need to be loaded into memory
train_data = TensorDataset(train_inputs, train_masks, train_labels, train_token_types)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels, validation_token_types)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
# + id="iiFRnP_ZTBFa"
torch.save(validation_dataloader,'validation_data_loader')
torch.save(train_dataloader,'train_data_loader')
# + [markdown] id="ncGteBuSFuZM"
# ## Load Model & Set Params
# + [markdown] id="Z0dL-Bz_NrGj"
# Load the appropriate model below, each model already contains a single dense layer for classification on top.
#
#
#
# ```
# BERT:
# model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=num_labels)
#
# XLNet:
# model = XLNetForSequenceClassification.from_pretrained("xlnet-base-cased", num_labels=num_labels)
#
# RoBERTa:
# model = RobertaForSequenceClassification.from_pretrained('roberta-base', num_labels=num_labels)
# ```
#
#
# + id="Ujk4k16DnIT6" colab={"base_uri": "https://localhost:8080/"} outputId="c0883eb3-cf0d-461d-f89d-5e62af5208c9"
# Load model, the pretrained model will include a single linear classification layer on top for classification.
# model = AutoModel.from_pretrained("vinai/bertweet-base", num_labels=num_labels)
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=3)
# model = XLNetForSequenceClassification.from_pretrained("xlnet-base-cased", num_labels=num_labels)
# model = RobertaForSequenceClassification.from_pretrained('roberta-base', num_labels=num_labels)
# model.load_state_dict(torch.load('./models/bert_main'))
model.cuda()
# + [markdown] id="jGE4gv9qfhRG"
# Setting custom optimization parameters for the AdamW optimizer https://huggingface.co/transformers/main_classes/optimizer_schedules.html
# + id="GsV8zwWYnIT9"
# setting custom optimization parameters. You may implement a scheduler here as well.
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
# no_decay = []
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
# + id="aOomZIEIoHOL"
optimizer = AdamW(optimizer_grouped_parameters,lr=3e-5,correct_bias=True)
# optimizer = AdamW(model.parameters(),lr=2e-5) # Default optimization
# + [markdown] id="JRQQZ8zIFzLW"
# ## Train Model
# + id="uDLZmEC_oKo3" colab={"base_uri": "https://localhost:8080/"} outputId="631a5a89-18f8-4232-f670-084164118de9"
# Store our loss and accuracy for plotting
train_loss_set = []
# Number of training epochs (authors recommend between 2 and 4)
epochs = 8
# trange is a tqdm wrapper around the normal python range
for _ in trange(epochs, desc="Epoch"):
# Training
# Set our model to training mode (as opposed to evaluation mode)
model.train()
# Tracking variables
tr_loss = 0 #running loss
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels, b_token_types = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# print(b_labels)
# # Forward pass for multiclass classification
# outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
# loss = outputs[0]
# logits = outputs[1]
# Forward pass for multilabel classification
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
logits = outputs[0]
loss_func = BCEWithLogitsLoss()
loss = loss_func(logits.view(-1,num_labels),b_labels.type_as(logits).view(-1,num_labels)) #convert labels to float for calculation
loss_func = BCELoss()
loss = loss_func(torch.sigmoid(logits.view(-1,num_labels)),b_labels.type_as(logits).view(-1,num_labels)) #convert labels to float for calculation
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# scheduler.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
###############################################################################
# Validation
# Put model in evaluation mode to evaluate loss on the validation set
model.eval()
# Variables to gather full output
logit_preds,true_labels,pred_labels,tokenized_texts = [],[],[],[]
# Predict
for i, batch in enumerate(validation_dataloader):
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels, b_token_types = batch
with torch.no_grad():
# Forward pass
outs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
b_logit_pred = outs[0]
pred_label = torch.sigmoid(b_logit_pred)
b_logit_pred = b_logit_pred.detach().cpu().numpy()
pred_label = pred_label.to('cpu').numpy()
b_labels = b_labels.to('cpu').numpy()
tokenized_texts.append(b_input_ids)
logit_preds.append(b_logit_pred)
true_labels.append(b_labels)
pred_labels.append(pred_label)
# Flatten outputs
pred_labels = [item for sublist in pred_labels for item in sublist]
true_labels = [item for sublist in true_labels for item in sublist]
# Calculate Accuracy
threshold = 0.5
pred_bools = [pl>threshold for pl in pred_labels]
true_bools = [tl==1 for tl in true_labels]
val_f1_accuracy = f1_score(true_bools,pred_bools,average='micro')*100
val_flat_accuracy = accuracy_score(true_bools, pred_bools)*100
print('F1 Validation Accuracy: ', val_f1_accuracy)
print('Flat Validation Accuracy: ', val_flat_accuracy)
# + id="aiBeiBSRoOuz"
torch.save(model.state_dict(), 'bert_model_toxic')
# + [markdown] id="_7dd2GE3F4yK"
# ## Load and Preprocess Test Data
# + id="bW6MsCuFHeOE"
#DELETE LATER
# test_df = pd.read_csv('testset_new.csv')
test_df = pd.read_csv('test_main.csv')
print(test_df.shape)
test_df.dropna()
print(test_df.shape)
# test_df.drop(columns='content', inplace=True)
# test_df = test_df.rename(columns={'content': 'tweet', 'Against Trump': 'Anti Trump', 'Against Biden': 'Anti Biden'})
test_label_cols = ['Pro Trump', 'Pro Biden', 'Neutral']
test_df['Pro Trump'] = np.nan
test_df['Pro Biden'] = np.nan
test_df['Neutral'] = np.nan
print(test_label_cols)
test_df.head()
# + id="77rjCrMGpYxz"
test_df = test_df[~test_df[test_label_cols].eq(-1).any(axis=1)] #remove irrelevant rows/comments with -1 values
test_df['one_hot_labels'] = list(test_df[test_label_cols].values)
test_df.head()
# + id="1a41OmU2i7qp"
# Gathering input data
test_labels = list(test_df.one_hot_labels.values)
test_comments = list(test_df.content.values)
# print(test_comments)
# + id="amySMO8EQzf2"
# Encoding input data
# %%time
test_encodings = tokenizer.batch_encode_plus(test_comments,max_length=max_length,pad_to_max_length=True)
test_input_ids = test_encodings['input_ids']
test_token_type_ids = test_encodings['token_type_ids']
test_attention_masks = test_encodings['attention_mask']
# + id="hqOfi9fkRaRN"
# Make tensors out of data
test_inputs = torch.tensor(test_input_ids)
test_labels = torch.tensor(test_labels)
test_masks = torch.tensor(test_attention_masks)
test_token_types = torch.tensor(test_token_type_ids)
# Create test dataloader
test_data = TensorDataset(test_inputs, test_masks, test_labels, test_token_types)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=8)
# Save test dataloader
torch.save(test_dataloader,'test_data_loader')
# + [markdown] id="PFTWxCA_GBau"
# ## Prediction and Metics
# + id="NPvrL6OFSQvf"
# Test
# Put model in evaluation mode to evaluate loss on the validation set
model.eval()
#track variables
logit_preds,true_labels,pred_labels,tokenized_texts = [],[],[],[]
# Predict
for i, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels, b_token_types = batch
with torch.no_grad():
# Forward pass
outs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
b_logit_pred = outs[0]
pred_label = torch.sigmoid(b_logit_pred)
b_logit_pred = b_logit_pred.detach().cpu().numpy()
pred_label = pred_label.to('cpu').numpy()
b_labels = b_labels.to('cpu').numpy()
tokenized_texts.append(b_input_ids)
logit_preds.append(b_logit_pred)
true_labels.append(b_labels)
pred_labels.append(pred_label)
# Flatten outputs
tokenized_texts = [item for sublist in tokenized_texts for item in sublist]
pred_labels = [item for sublist in pred_labels for item in sublist]
true_labels = [item for sublist in true_labels for item in sublist]
# Converting flattened binary values to boolean values
true_bools = [tl==1 for tl in true_labels]
# + [markdown] id="bQeGWqeMzAoZ"
# We need to threshold our sigmoid function outputs which range from [0, 1]. Below I use 0.50 as a threshold.
# + [markdown] id="8eoOvfa5zjqm"
# # Metrics
#
# + id="BZcZUcYOxxmM" colab={"base_uri": "https://localhost:8080/"} outputId="0d791075-60c0-46b5-a98f-10b287aab38e"
pred_bools = [pl>0.50 for pl in pred_labels] #boolean output after thresholding
# Print and save classification report
# print('F1 Validation Accuracy: ', val_f1_accuracy)
# print('Flat Validation Accuracy: ', val_flat_accuracy,'\n')
print('Test F1 Accuracy: ', f1_score(true_bools, pred_bools,average='micro'))
print('Test Flat Accuracy: ', accuracy_score(true_bools, pred_bools),'\n')
clf_report = classification_report(true_bools,pred_bools,target_names=test_label_cols)
pickle.dump(clf_report, open('classification_report.txt','wb')) #save report
print(clf_report)
# + [markdown] id="5rLqrHK87eir"
# ## Output Dataframe
# + id="CJBkRdGN1hzx"
idx2label = dict(zip(range(6),label_cols))
print(idx2label)
# + id="QZUglV_A4BF_"
# Getting indices of where boolean one hot vector true_bools is True so we can use idx2label to gather label names
true_label_idxs, pred_label_idxs=[],[]
for vals in true_bools:
true_label_idxs.append(np.where(vals)[0].flatten().tolist())
for vals in pred_bools:
pred_label_idxs.append(np.where(vals)[0].flatten().tolist())
# + id="OOGhXM3R4a91"
# Gathering vectors of label names using idx2label
true_label_texts, pred_label_texts = [], []
for vals in true_label_idxs:
if vals:
true_label_texts.append([idx2label[val] for val in vals])
else:
true_label_texts.append(vals)
for vals in pred_label_idxs:
if vals:
pred_label_texts.append([idx2label[val] for val in vals])
else:
pred_label_texts.append(vals)
# + id="5HaqV6pn_HCG"
# Decoding input ids to comment text
comment_texts = [tokenizer.decode(text,skip_special_tokens=True,clean_up_tokenization_spaces=False) for text in tokenized_texts]
# + id="R7kk0Mgl1L-T"
# Converting lists to df
comparisons_df = pd.DataFrame({'comment_text': comment_texts, 'true_labels': true_label_texts, 'pred_labels':pred_label_texts})
comparisons_df.to_csv('comparisons.csv')
comparisons_df.sample(20)
# + id="bRVf66L3boV4"
def temp(x):
if x:
return x[0]
else:
return np.nan
test_df['sentiment'] = comparisons_df['pred_labels']
test_df['sentiment'] = test_df['sentiment'].apply(lambda col: temp(col))
test_df.head()
# + id="0NBUWMFbePtS"
print(test_df.shape)
test_df.dropna(subset=['sentiment'], inplace=True)
print(test_df.shape)
# + id="g2aO-5jDhMkU"
test_df.groupby(['country', 'sentiment']).agg(['count', 'sum'])
# + id="aGYjVTLGe_qV"
test_df = test_df[test_df['sentiment'] != 'Neutral']
print(test_df.shape)
# + id="36c9ntxJfMpy"
del test_df['Pro Trump']
del test_df['Pro Biden']
del test_df['Neutral']
del test_df['one_hot_labels']
# + id="ecLHLdFqfkPU"
test_df.head()
# + id="NIb0M3o_m4lf"
test_df.to_csv('tempBeforeSolr.csv')
# + [markdown] id="c6mDy1lw0S4y"
# Doing this may result in a trade offs between precision, flat accuracy and micro F1 accuracy. You may tune the threshold however you want.
# + id="PHlhb2lvar8V"
# Calculate Accuracy - maximize F1 accuracy by tuning threshold values. First with 'macro_thresholds' on the order of e^-1 then with 'micro_thresholds' on the order of e^-2
macro_thresholds = np.array(range(1,10))/10
f1_results, flat_acc_results = [], []
for th in macro_thresholds:
pred_bools = [pl>th for pl in pred_labels]
test_f1_accuracy = f1_score(true_bools,pred_bools,average='micro')
test_flat_accuracy = accuracy_score(true_bools, pred_bools)
f1_results.append(test_f1_accuracy)
flat_acc_results.append(test_flat_accuracy)
best_macro_th = macro_thresholds[np.argmax(f1_results)] #best macro threshold value
micro_thresholds = (np.array(range(10))/100)+best_macro_th #calculating micro threshold values
f1_results, flat_acc_results = [], []
for th in micro_thresholds:
pred_bools = [pl>th for pl in pred_labels]
test_f1_accuracy = f1_score(true_bools,pred_bools,average='micro')
test_flat_accuracy = accuracy_score(true_bools, pred_bools)
f1_results.append(test_f1_accuracy)
flat_acc_results.append(test_flat_accuracy)
best_f1_idx = np.argmax(f1_results) #best threshold value
# Printing and saving classification report
print('Best Threshold: ', micro_thresholds[best_f1_idx])
print('Test F1 Accuracy: ', f1_results[best_f1_idx])
print('Test Flat Accuracy: ', flat_acc_results[best_f1_idx], '\n')
best_pred_bools = [pl>micro_thresholds[best_f1_idx] for pl in pred_labels]
clf_report_optimized = classification_report(true_bools,best_pred_bools, target_names=label_cols)
pickle.dump(clf_report_optimized, open('classification_report_optimized.txt','wb'))
print(clf_report_optimized)
| Sarcasm Detection/BERT_Main_with_Sarcasm_Detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: a3dbr
# language: python
# name: a3dbr
# ---
# +
# %load_ext autoreload
# %autoreload 2
from matplotlib.path import Path
import numpy as np
import matplotlib.pyplot as plt
import cv2
from PIL import Image
import argparse
import os, sys
sys.path.append(os.path.dirname(os.getcwd()))
import polygon_primitives.file_writer as fw
from image_processing import extract_window_wall_ratio, utils, contour_extraction
from scipy.spatial import Delaunay
# -
# %pwd
# First, we set the image and parameter directories, as well as the merged polygons file path. We load the merged polygons, as we also initialize a dictionary for the Cameras. The Camera class stores all information related to the camera, i.e. intrinsic and extrinsic camera parameters.
# +
#Example file
filename = "DJI_0081.JPG"
directory = "../data/Drone_Flight/"
facade_file = "../data/Drone_Flight/merged.txt"
image_dir = directory + "RGB/"
param_dir = directory + "params/"
predictions_dir = directory + "predictions/"
offset = np.loadtxt(param_dir + "offset.txt",usecols=range(3))
#Initializes a dictionary of Camera classes. See utils.py for more information.
camera_dict = utils.create_camera_dict(param_dir, offset=offset)
#Loads pmatrices and image filenamees
p_matrices = np.loadtxt(param_dir + 'pmatrix.txt', usecols=range(1,13))
#Loads the merged polygons, as well as a list of facade types (i.e. roof, wall, or floor)
merged_polygons, facade_type_list, file_format = fw.load_merged_polygon_facades(filename=facade_file)
#Offset adjustment parameter
height_adj = np.array([0.0, 0.0, 108])
offset = offset + height_adj
# -
# Next, we extract the contours for the window predictions, by taking the window prediction points and using them to create a shapely polygon.
# +
window_file = predictions_dir + "DJI_0081_Windows.png"
print("Window predictions: ")
image = cv2.imread(window_file)
plt.imshow(image)
plt.show()
#Extract the contours of the window file
contours = contour_extraction.extract_contours(window_file)
#Create polygons from the window contours
window_polygons = utils.convert_polygons_shapely(contours)
def plot_shapely_polys(image_file, polys):
for poly in polys:
s = poly
s = poly.simplify(0.1, preserve_topology=True)
x,y = s.exterior.xy
plt.plot(x,y)
plt.show()
print("Extracted contours: ")
plt.imshow(image)
plot_shapely_polys(window_file, window_polygons)
# -
# Finally, for each window point, we obtain its 3D coordinates and use them to calculate the window to wall ratio.
# +
camera = camera_dict[filename]
pmatrix = camera.calc_pmatrix()
image_file = utils.load_image(image_dir + filename)
#Projects the merged polygon facades onto the camera image
projected_facades, projective_distances = extract_window_wall_ratio.project_merged_polygons(
merged_polygons, offset, pmatrix)
#Creates a dictionary mapping the facade to the windows contained within them, keyed by facade index
facade_window_map = extract_window_wall_ratio.get_facade_window_map(
window_polygons, projected_facades, projective_distances)
#Creates a list of all the facades in the merged polygon
facades = []
for poly in merged_polygons:
facades = facades + poly
facade_indices = list(facade_window_map.keys())
for i in facade_indices:
#Computes window to wall ratio
win_wall_ratio = extract_window_wall_ratio.get_window_wall_ratio(
projected_facades[i], facades[i], facade_window_map[i])
#Output printing:
print("Facade index: " + str(i))
print("Window-to-wall ratio: " + str(win_wall_ratio))
#Uncomment this line to plot the windows and facades on the image
# extract_window_wall_ratio.plot_windows_facade(projected_facades[i], facade_window_map[i], image_file)
# -
| Example_Notebooks/Window_Wall_Ratio_Example_ST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import os, sys
import warnings
warnings.filterwarnings('ignore')
from sklearn.decomposition import PCA
from sklearn.cross_validation import train_test_split, KFold
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
import xgboost as xgb
basepath = os.path.expanduser('~/Desktop/src/AllState_Claims_Severity/')
sys.path.append(os.path.join(basepath, 'src'))
np.random.seed(12)
# -
train = pd.read_csv(os.path.join(basepath, 'data/raw/train.csv'))
test = pd.read_csv(os.path.join(basepath, 'data/raw/test.csv'))
sample_sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv'))
# append train and test
data = pd.concat((train, test))
# target variable
y = np.log(train.loss)
# categorical and continuous variables
categorical_variables = [col for col in data.columns if 'cat' in col]
continuous_variables = [col for col in data.columns if 'cont' in col]
data_cont = data[continuous_variables]
pca = PCA(n_components=10, whiten=True)
pca.fit(data_cont)
data_rem = pca.transform(data_cont)
print('Explained variance by the components {}'.format(np.sum(pca.explained_variance_ratio_)))
train_ = data_cont[:len(train)]
test_ = data_cont[len(train):]
itrain, itest = train_test_split(range(len(train)), test_size=0.2, random_state=1231)
# +
X_train = train_.iloc[itrain]
X_test = train_.iloc[itest]
y_train = y.iloc[itrain]
y_test = y.iloc[itest]
# -
def cv(X_train, y_train):
kf = KFold(len(X_train), n_folds=3, random_state=12313)
for i, (itr, ite) in enumerate(kf):
print('Fold: {}'.format(i))
Xtr = X_train.iloc[itr]
Xte = X_train.iloc[ite]
ytr = y_train.iloc[itr]
yte = y_train.iloc[ite]
est = RandomForestRegressor(n_jobs=-1, random_state=123111)
est.fit(Xtr, ytr)
yhat = est.predict(Xte)
print('MAE on unseen examples: {}'.format(mean_absolute_error(np.exp(yte), np.exp(yhat))))
def get_correlated_features(df, numerical_columns):
"""
Arguments
---------
df: Dataframe
Returns
-------
List of correlated pairs
"""
df_cont = df[numerical_columns]
correlated_pairs = []
df_corr = df_cont.corr()
index = df_corr.index.values
for i in range(len(df_corr)):
for j in range(len(numerical_columns)):
if i == j:
continue
else:
if abs(df_cont.iloc[i, j]) > 0.8:
correlated_pairs.append((index[i], numerical_columns[j]))
return correlated_pairs
correlated_pairs = get_correlated_features(data, continuous_variables)
def remove_correlated_pairs(X_train, y_train, X_test, y_test, correlated_pairs):
columns = X_train.columns
for col1, col2 in correlated_pairs:
print('Pair: {0}, {1}'.format(col1, col2))
features = columns.drop([col1])
cv(X_train[features], y_train)
est = RandomForestRegressor(n_jobs=-1, random_state=123111)
est.fit(X_train[features], y_train)
yhat = est.predict(X_test[features])
print('First feature removed')
print('MAE on unseen examples {}'.format(mean_absolute_error(np.exp(y_test), np.exp(yhat))))
print('-'*50)
features = columns.drop([col2])
cv(X_train[features], y_train)
est = RandomForestRegressor(n_jobs=-1, random_state=123111)
est.fit(X_train[features], y_train)
yhat = est.predict(X_test[features])
print('Second feature removed')
print('MAE on unseen examples {}'.format(mean_absolute_error(np.exp(y_test), np.exp(yhat))))
print('-'*50)
features = columns.drop([col1, col2])
cv(X_train[features], y_train)
est = RandomForestRegressor(n_jobs=-1, random_state=123111)
est.fit(X_train[features], y_train)
yhat = est.predict(X_test[features])
print('Both features removed')
print('MAE on unseen examples {}'.format(mean_absolute_error(np.exp(y_test), np.exp(yhat))))
print('-'*50)
print('\n\n')
remove_correlated_pairs(X_train, y_train, X_test, y_test, correlated_pairs)
| notebooks/Experiment - 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:general]
# language: python
# name: general
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# - 导包
#
#
# + pycharm={"name": "#%%\n"}
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
# + pycharm={"name": "#%%\n"}
layer1 = nn.Linear(in_features=10,out_features=5,bias=True)
x = Variable(torch.randn(1,10),requires_grad = True)
layer1(x)
# + pycharm={"name": "#%%\n"}
layer1.weight
# + pycharm={"name": "#%%\n"}
layer1.bias
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 堆叠线性层
# + pycharm={"name": "#%%\n"}
layer1 = nn.Linear(10,5)
layer2 = nn.Linear(5,2)
layer2(layer1(x))
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 非线性激活函数
# + pycharm={"name": "#%%\n"}
sample_data = Variable(torch.Tensor([[1,2,-1,-2]]))
myRelu = nn.ReLU()
myRelu(sample_data)
# + pycharm={"name": "#%%\n"}
import torch.nn.functional as F
sample_data = Variable(torch.Tensor([[1,2,-1,-2]]))
f = F.relu(sample_data)
f
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 神经网络
# + pycharm={"name": "#%%\n"}
class model(nn.Module):
def __init__(self,input_size,hidden_size,output_size):
super(model, self).__init__()
self.layer1 = nn.Linear(input_size,hidden_size)
self.layer2 = nn.Linear(hidden_size,output_size)
def __forward__(self,input):
out = self.layer1(input)
out = F.relu(out)
out = self.layer2(out)
return out
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Loss
# + pycharm={"name": "#%%\n"}
# MSE
loss = nn.MSELoss()
input = Variable(torch.randn(3,5),requires_grad = True)
target = Variable(torch.randn(3,5))
output = loss(input,target)
output.backward()
output
# + pycharm={"name": "#%%\n"}
# CrossEntropyLoss
loss = nn.CrossEntropyLoss()
input = Variable(torch.randn(3, 5), requires_grad=True)
target = Variable(torch.LongTensor(3).random_(5))
output = loss(input, target)
output.backward()
output
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 优化器
#
# + pycharm={"name": "#%%\n"}
import torch.optim as optim
model = model(3,5,3)
optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)
for input, target in datasets:
optimizer.zero_grad()
output = model(input)
loss = loss(output, target)
loss.backward()
optimizer.step()
| pytorch/DeepLearningwithPyTorch_Code/DLwithPyTorch-master/Chapter03/chapter03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Get your data ready for training
# This module defines the basic [`DataBunch`](/basic_data.html#DataBunch) object that is used inside [`Learner`](/basic_train.html#Learner) to train a model. This is the generic class, that can take any kind of fastai [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) or [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). You'll find helpful functions in the data module of every application to directly create this [`DataBunch`](/basic_data.html#DataBunch) for you.
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.basics import *
# + hide_input=true
show_doc(DataBunch)
# -
# It also ensure all the dataloaders are on `device` and apply to them `tfms` as batch are drawn (like normalization). `path` is used internally to store temporary files, `collate_fn` is passed to the pytorch `Dataloader` (replacing the one there) to explain how to collate the samples picked for a batch. By default, it applies data to the object sent (see in [`vision.image`](/vision.image.html#vision.image) or the [data block API](/data_block.html) why this can be important).
#
# `train_dl`, `valid_dl` and optionally `test_dl` will be wrapped in [`DeviceDataLoader`](/basic_data.html#DeviceDataLoader).
# ### Factory method
# + hide_input=true
show_doc(DataBunch.create)
# -
# `num_workers` is the number of CPUs to use, `tfms`, `device` and `collate_fn` are passed to the init method.
# + hide_input=true
jekyll_warn("You can pass regular pytorch Dataset here, but they'll require more attributes than the basic ones to work with the library. See below for more details.")
# -
# ### Visualization
# + hide_input=true
show_doc(DataBunch.show_batch)
# -
# ### Grabbing some data
# + hide_input=true
show_doc(DataBunch.dl)
# + hide_input=true
show_doc(DataBunch.one_batch)
# + hide_input=true
show_doc(DataBunch.one_item)
# + hide_input=true
show_doc(DataBunch.sanity_check)
# -
# ### Empty [`DataBunch`](/basic_data.html#DataBunch) for inference
# + hide_input=true
show_doc(DataBunch.export)
# + hide_input=true
show_doc(DataBunch.load_empty, full_name='load_empty')
# -
# This method should be used to create a [`DataBunch`](/basic_data.html#DataBunch) at inference, see the corresponding [tutorial](/tutorial.inference.html).
# ### Dataloader transforms
# + hide_input=true
show_doc(DataBunch.add_tfm)
# -
# Adds a transform to all dataloaders.
# ## Using a custom Dataset in fastai
# If you want to use yur pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) in fastai, you may need to implement more attributes/methods if you want to use the full functionality of the library. Some functions can easily be used with your pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) if you just add an attribute, for others, the best would be to create your own [`ItemList`](/data_block.html#ItemList) by following [this tutorial](/tutorial.itemlist.html). Here is a full list of what the library will expect.
# ### Basics
# First of all, you obviously need to implement the methods `__len__` and `__getitem__`, as indicated by the pytorch docs. Then the most needed things would be:
# - `c` attribute: it's used in most functions that directly create a [`Learner`](/basic_train.html#Learner) ([`tabular_learner`](/tabular.data.html#tabular_learner), [`text_classifier_learner`](/text.learner.html#text_classifier_learner), [`unet_learner`](/vision.learner.html#unet_learner), [`create_cnn`](/vision.learner.html#create_cnn)) and represents the number of outputs of the final layer of your model (also the number of classes if applicable).
# - `classes` attribute: it's used by [`ClassificationInterpretation`](/vision.learner.html#ClassificationInterpretation) and also in [`collab_learner`](/collab.html#collab_learner) (best to use [`CollabDataBunch.from_df`](/collab.html#CollabDataBunch.from_df) than a pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset)) and represents the unique tags that appear in your data.
# - maybe a `loss_func` attribute: that is going to be used by [`Learner`](/basic_train.html#Learner) as a default loss function, so if you know your custom [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) requires a particular loss, you can put it.
#
# ### For a specific application
# In text, your dataset will need to have a `vocab` attribute that should be an instance of [`Vocab`](/text.transform.html#Vocab). It's used by [`text_classifier_learner`](/text.learner.html#text_classifier_learner) and [`language_model_learner`](/text.learner.html#language_model_learner) when building the model.
#
# In tabular, your dataset will need to have a `cont_names` attribute (for the names of continuous variables) and a `get_emb_szs` method that returns a list of tuple `(n_classes, emb_sz)` representing, for each categorical variable, the number of different codes (don't forget to add 1 for nan) and the corresponding embedding size. Those two are used with the `c` attribute by [`tabular_learner`](/tabular.data.html#tabular_learner).
# ### Functions that really won't work
# To make those last functions work, you really need to use the [data block API](/data_block.html) and maybe write your own [custom ItemList](/tutorial.itemlist.html).
# - [`DataBunch.show_batch`](/basic_data.html#DataBunch.show_batch) (requires `.x.reconstruct`, `.y.reconstruct` and `.x.show_xys`)
# - [`Learner.predict`](/basic_train.html#Learner.predict) (requires `x.set_item`, `.y.analyze_pred`, `.y.reconstruct` and maybe `.x.reconstruct`)
# - [`Learner.show_results`](/basic_train.html#Learner.show_results) (requires `x.reconstruct`, `y.analyze_pred`, `y.reconstruct` and `x.show_xyzs`)
# - `DataBunch.set_item` (requires `x.set_item`)
# - [`Learner.backward`](/basic_train.html#Learner.backward) (uses `DataBunch.set_item`)
# - [`DataBunch.export`](/basic_data.html#DataBunch.export) (requires `export`)
# + hide_input=true
show_doc(DeviceDataLoader)
# -
# Put the batches of `dl` on `device` after applying an optional list of `tfms`. `collate_fn` will replace the one of `dl`. All dataloaders of a [`DataBunch`](/basic_data.html#DataBunch) are of this type.
# ### Factory method
# + hide_input=true
show_doc(DeviceDataLoader.create)
# -
# The given `collate_fn` will be used to put the samples together in one batch (by default it grabs their data attribute). `shuffle` means the dataloader will take the samples randomly if that flag is set to `True`, or in the right order otherwise. `tfms` are passed to the init method. All `kwargs` are passed to the pytorch [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) class initialization.
# ### Methods
# + hide_input=true
show_doc(DeviceDataLoader.add_tfm)
# + hide_input=true
show_doc(DeviceDataLoader.remove_tfm)
# + hide_input=true
show_doc(DeviceDataLoader.new)
# + hide_input=true
show_doc(DeviceDataLoader.proc_batch)
# + hide_input=true
show_doc(DatasetType, doc_string=False)
# -
# Internal enumerator to name the training, validation and test dataset/dataloader.
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
show_doc(DeviceDataLoader.collate_fn)
# ## New Methods - Please document or move to the undocumented section
| docs_src/basic_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import sys
from pyspark import SparkContext, SparkConf
if __name__ == "__main__":
# create Spark context with Spark configuration
conf = SparkConf().setAppName("Spark Count")
sc = SparkContext(conf=conf)
# get threshold
threshold = int(sys.argv[2])
# read in text file and split each document into words
tokenized = sc.textFile(sys.argv[1]).flatMap(lambda line: line.split(" "))
# count the occurrence of each word
wordCounts = tokenized.map(lambda word: (word, 1)).reduceByKey(lambda v1,v2:v1 +v2)
# filter out words with fewer than threshold occurrences
filtered = wordCounts.filter(lambda pair:pair[1] >= threshold)
# count characters
charCounts = filtered.flatMap(lambda pair:pair[0]).map(lambda c: c).map(lambda c: (c, 1)).reduceByKey(lambda v1,v2:v1 +v2)
list = charCounts.collect()
print (repr(list)[1:-1])
# -
sc
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Medium
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
#
# (i.e., [0,0,1,2,2,5,6] might become [2,5,6,0,0,1,2]).
#
# You are given a target value to search. If found in the array return true, otherwise return false.
#
# **Example 1:**
#
# Input: nums = [2,5,6,0,0,1,2], target = 0
# Output: true
#
# **Example 2:**
#
# Input: nums = [2,5,6,0,0,1,2], target = 3
# Output: false
#
# **Follow up:**
#
# - This is a follow up problem to Search in Rotated Sorted Array, where nums may contain duplicates.
# - Would this affect the run-time complexity? How and why?
# # Thought
#
# In the condition for the nums[left] == nums[right] would cause the O(n) worst case. Therefore, duplicated numbers would cause our time complexity different from the LeetCode 33.
#
class Solution:
def search(self, nums: List[int], target: int) -> bool:
left , right = 0 , len(nums)-1
start = nums[0]
mid = (left + right)//2
while left <= right:
print(left, mid, right)
mid = (left + right)//2
if nums[mid] == target: return True
if nums[mid] < target:
if nums[left] > target:
left = mid + 1
elif nums[left] < target:
right = mid - 1
else:
return True
elif nums[mid] > target:
left = mid + 1
return False
| 81. Search in Rotated Sorted Array II.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ZoWs-Ug0VVih"
# # Recommendations in Keras using triplet loss
# Along the lines of BPR [1].
#
# [1] <NAME>, et al. "BPR: Bayesian personalized ranking from implicit feedback." Proceedings of the Twenty-Fifth Conference on Uncertainty in Artificial Intelligence. AUAI Press, 2009.
#
# This is implemented (more efficiently) in LightFM (https://github.com/lyst/lightfm). See the MovieLens example (https://github.com/lyst/lightfm/blob/master/examples/movielens/example.ipynb) for results comparable to this notebook.
#
# ## Set up the architecture
# A simple dense layer for both users and items: this is exactly equivalent to latent factor matrix when multiplied by binary user and item indices. There are three inputs: users, positive items, and negative items. In the triplet objective we try to make the positive item rank higher than the negative item for that user.
#
# Because we want just one single embedding for the items, we use shared weights for the positive and negative item inputs (a siamese architecture).
#
# This is all very simple but could be made arbitrarily complex, with more layers, conv layers and so on. I expect we'll be seeing a lot of papers doing just that.
#
# + id="Ehv1HJfNVViz" outputId="32cf3b33-5af2-41ef-c482-45d59dfe8ac3"
"""
Triplet loss network example for recommenders
"""
from __future__ import print_function
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.layers import Embedding, Flatten, Input, merge
from keras.optimizers import Adam
import data
import metrics
def identity_loss(y_true, y_pred):
return K.mean(y_pred - 0 * y_true)
def bpr_triplet_loss(X):
positive_item_latent, negative_item_latent, user_latent = X
# BPR loss
loss = 1.0 - K.sigmoid(
K.sum(user_latent * positive_item_latent, axis=-1, keepdims=True) -
K.sum(user_latent * negative_item_latent, axis=-1, keepdims=True))
return loss
def build_model(num_users, num_items, latent_dim):
positive_item_input = Input((1, ), name='positive_item_input')
negative_item_input = Input((1, ), name='negative_item_input')
# Shared embedding layer for positive and negative items
item_embedding_layer = Embedding(
num_items, latent_dim, name='item_embedding', input_length=1)
user_input = Input((1, ), name='user_input')
positive_item_embedding = Flatten()(item_embedding_layer(
positive_item_input))
negative_item_embedding = Flatten()(item_embedding_layer(
negative_item_input))
user_embedding = Flatten()(Embedding(
num_users, latent_dim, name='user_embedding', input_length=1)(
user_input))
loss = merge(
[positive_item_embedding, negative_item_embedding, user_embedding],
mode=bpr_triplet_loss,
name='loss',
output_shape=(1, ))
model = Model(
input=[positive_item_input, negative_item_input, user_input],
output=loss)
model.compile(loss=identity_loss, optimizer=Adam())
return model
# + [markdown] id="LgYn0JfKVVi6"
# ## Load and transform data
# We're going to load the Movielens 100k dataset and create triplets of (user, known positive item, randomly sampled negative item).
#
# The success metric is AUC: in this case, the probability that a randomly chosen known positive item from the test set is ranked higher for a given user than a ranomly chosen negative item.
# + id="6hpUgQQTVVi7" outputId="352f7c6d-2044-4877-c3ea-8a759b2a5f73"
latent_dim = 100
num_epochs = 10
# Read data
train, test = data.get_movielens_data()
num_users, num_items = train.shape
# Prepare the test triplets
test_uid, test_pid, test_nid = data.get_triplets(test)
model = build_model(num_users, num_items, latent_dim)
# Print the model structure
print(model.summary())
# Sanity check, should be around 0.5
print('AUC before training %s' % metrics.full_auc(model, test))
# + [markdown] id="CwIn5EXVVVi9"
# ## Run the model
# Run for a couple of epochs, checking the AUC after every epoch.
# + id="juLku8-fVVi-" outputId="bf115138-957b-4695-a56e-cb2ab9f04a26"
for epoch in range(num_epochs):
print('Epoch %s' % epoch)
# Sample triplets from the training data
uid, pid, nid = data.get_triplets(train)
X = {
'user_input': uid,
'positive_item_input': pid,
'negative_item_input': nid
}
model.fit(X,
np.ones(len(uid)),
batch_size=64,
nb_epoch=1,
verbose=0,
shuffle=True)
print('AUC %s' % metrics.full_auc(model, test))
# + [markdown] id="g-5lv2wWVVi_"
# The AUC is in the low-90s. At some point we start overfitting, so it would be a good idea to stop early or add some regularization.
| _source/raw/triplet_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
# %matplotlib inline
# + deletable=true editable=true
import os
import sys
# Modify the path
sys.path.append("..")
import yellowbrick as yb
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
# + [markdown] deletable=true editable=true
# ## Using Yellowbrick to Boston Dataset
# + [markdown] deletable=true editable=true
# This is a user study to test YellowBrick library by exploring the Boston Dataset in sklearn.
# + deletable=true editable=true
#loading the dataset, extracting the data, targets and feature names
boston = load_boston()
X = boston.data[:, None, 0]
# target contains the price
y = boston.target
# Use only one feature
features = boston.feature_names
# + deletable=true editable=true
print (boston.DESCR)
# + deletable=true editable=true
y.shape
# + [markdown] deletable=true editable=true
# ### Creating a LinearRegression model by splitting the data and targets into train & test datasets
# + deletable=true editable=true
## from sklearn.model_selection import train_test_split as tts
from sklearn.linear_model import LinearRegression
from yellowbrick.regressor import ResidualsPlot
model = LinearRegression()
# Split the data into training/testing sets
X_train = X[:-20]
X_test = X[-20:]
# Split the targets into training/testing sets
y_train = y[:-20]
y_test = y[-20:]
# + deletable=true editable=true
X_train.shape
# + deletable=true editable=true
# Train the model using the training sets
model.fit(X_train, y_train)
# The coefficients
print('Coefficients: \n', model.coef_)
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % model.score(X_test, y_test))
# Plot outputs
plt.scatter(X_test, y_test, color='green')
plt.plot(X_test, model.predict(X_test), color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# + deletable=true editable=true
from sklearn.model_selection import cross_val_predict
y_pred = cross_val_predict(model, X, y, cv=10)
# + deletable=true editable=true
y_pred.shape
# + deletable=true editable=true
y.shape
# + deletable=true editable=true
fig,ax = plt.subplots()
ax.scatter(y, y_pred)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
# + deletable=true editable=true
from yellowbrick.regressor import PredictionError
from yellowbrick.regressor import RegressionScoreVisualizer
y = y.reshape(-1, 1)
sviz = PredictionError(model)
sviz.score(y, y_pred)
sviz.poof()
# + deletable=true editable=true
from sklearn.neighbors import KNeighborsRegressor
knn = KNeighborsRegressor()
knn.fit(X_train, y_train)
# + deletable=true editable=true
sviz = PredictionError(knn)
sviz.score(y, y_pred)
sviz.poof()
| examples/balavenkatesan/testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/M4rkAdrian/CPEN21A-1-2/blob/main/Prelim_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="5MWvrVmUoHtw"
# Prelim Exam
# + colab={"base_uri": "https://localhost:8080/"} id="JSh4dSM1oLUv" outputId="81de4c23-1d4f-4c98-9f8c-fc5bc4cabbb6"
class Student:
def __init__(self,Name, Student_No, Age, School,Course):
self.Name = Name
self.Student_No = Student_No
self.Age = Age
self. School = School
self. Course = Course
def Info(self):
print(self.Name)
print(self.Student_No)
print(self.Age)
print(self.School)
print(self.Course)
Myself = Student("<NAME>",202101472,18,"Cavite State University Delas Alas Campus","Bachelor of Science in Computer Engineering")
Myself.Info()
| Prelim_Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import argparse
import copy
import sys
sys.path.append('../../')
import sopa.src.models.odenet_cifar10.layers as cifar10_models
from sopa.src.models.odenet_cifar10.utils import *
# +
parser = argparse.ArgumentParser()
# Architecture params
parser.add_argument('--is_odenet', type=eval, default=True, choices=[True, False])
parser.add_argument('--network', type=str, choices=['metanode34', 'metanode18', 'metanode10', 'metanode6', 'metanode4',
'premetanode34', 'premetanode18', 'premetanode10', 'premetanode6',
'premetanode4'],
default='premetanode10')
parser.add_argument('--in_planes', type=int, default=64)
# Type of layer's output normalization
parser.add_argument('--normalization_resblock', type=str, default='NF',
choices=['BN', 'GN', 'LN', 'IN', 'NF'])
parser.add_argument('--normalization_odeblock', type=str, default='NF',
choices=['BN', 'GN', 'LN', 'IN', 'NF'])
parser.add_argument('--normalization_bn1', type=str, default='NF',
choices=['BN', 'GN', 'LN', 'IN', 'NF'])
parser.add_argument('--num_gn_groups', type=int, default=32, help='Number of groups for GN normalization')
# Type of layer's weights normalization
parser.add_argument('--param_normalization_resblock', type=str, default='PNF',
choices=['WN', 'SN', 'PNF'])
parser.add_argument('--param_normalization_odeblock', type=str, default='PNF',
choices=['WN', 'SN', 'PNF'])
parser.add_argument('--param_normalization_bn1', type=str, default='PNF',
choices=['WN', 'SN', 'PNF'])
# Type of activation
parser.add_argument('--activation_resblock', type=str, default='ReLU',
choices=['ReLU', 'GeLU', 'Softsign', 'Tanh', 'AF'])
parser.add_argument('--activation_odeblock', type=str, default='ReLU',
choices=['ReLU', 'GeLU', 'Softsign', 'Tanh', 'AF'])
parser.add_argument('--activation_bn1', type=str, default='ReLU',
choices=['ReLU', 'GeLU', 'Softsign', 'Tanh', 'AF'])
args, unknown_args = parser.parse_known_args()
# +
# Initialize Neural ODE model
config = copy.deepcopy(args)
norm_layers = (get_normalization(config.normalization_resblock),
get_normalization(config.normalization_odeblock),
get_normalization(config.normalization_bn1))
param_norm_layers = (get_param_normalization(config.param_normalization_resblock),
get_param_normalization(config.param_normalization_odeblock),
get_param_normalization(config.param_normalization_bn1))
act_layers = (get_activation(config.activation_resblock),
get_activation(config.activation_odeblock),
get_activation(config.activation_bn1))
model = getattr(cifar10_models, config.network)(norm_layers, param_norm_layers, act_layers,
config.in_planes, is_odenet=config.is_odenet)
# -
model
| examples/cifar10/Build the model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github"
# <a href="https://colab.research.google.com/github/Spandan-Madan/Harvard_BAI/blob/main/assignment_1/colab_test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="aLnC5242NF8P" executionInfo={"status": "ok", "timestamp": 1613524786242, "user_tz": 300, "elapsed": 292, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18305993989659529139"}} outputId="2add21bf-d7ff-484a-ae9c-104de74a8b10"
from google.colab import drive
drive.mount('/content/drive')
# + id="uxM6SAJ-NPzM" executionInfo={"status": "ok", "timestamp": 1613524786828, "user_tz": 300, "elapsed": 187, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18305993989659529139"}}
import os
os.chdir('/content/drive/MyDrive/Harvard_BAI')
# + id="JjHrCUjVXA2a" executionInfo={"status": "ok", "timestamp": 1613524787581, "user_tz": 300, "elapsed": 320, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18305993989659529139"}}
### CREATE A PERSONAL ACCESS TOKEN FROM YOUR GITHUB ACCOUNT ####
# Go to Settings ---> Developer Settings ---> Generate new access token
# For scope just select the checkbox "repo".
# Copy the access token.
# Create a file (OUTSIDE YOUR GITHUB REPO) in your google drive access_token.txt and paste your access token to this file.
# I made mine at /content/drive/MyDrive/access_token.txt, which is the path below. If you make your file at another path, use that path below.
# + id="ZNP_CmRySKAs" executionInfo={"status": "ok", "timestamp": 1613524787928, "user_tz": 300, "elapsed": 189, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18305993989659529139"}}
with open('/content/drive/MyDrive/access_token.txt','r') as F:
contents = F.readlines()
# + id="k2Rv8UpFUo9B" executionInfo={"status": "ok", "timestamp": 1613524788344, "user_tz": 300, "elapsed": 229, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18305993989659529139"}}
token = contents[0]
# + id="9t1e5nXXkrdF"
# !git config --global user.email "<YOUR EMAIL HERE>"
# !git config --global user.name "<YOUR GITHUB USERNAME HERE>"
# + id="p-bp2rIZkuz_"
# !git add assignment_2/Assignment2.ipynb
# !git add assignment_2/update_github.ipynb
# !git add assignment_2/todos.md
# + colab={"base_uri": "https://localhost:8080/"} id="ihMFaQH2Nbgj" executionInfo={"status": "ok", "timestamp": 1613524796844, "user_tz": 300, "elapsed": 442, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18305993989659529139"}} outputId="20a34151-eeed-4c1a-88f0-118749d5d7c5"
# !git commit -m "Updating Assignment2 + Todos"
# + id="NV7bGbYyQ8Dk"
# + id="MTdR75I7RZX6"
# THE FIRST TIME YOU RUN THIS CODE, UNCOMMENT LINES BELOW. AFTER THAT COMMENT THEM BACK.
# # !git remote rm origin
# # !git remote add origin https://Spandan-Madan:$token@github.com/Spandan-Madan/Harvard_BAI.git
# + colab={"base_uri": "https://localhost:8080/"} id="cT6nlp_-NgEa" executionInfo={"status": "ok", "timestamp": 1613524804347, "user_tz": 300, "elapsed": 3334, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18305993989659529139"}} outputId="63619454-036d-409b-c2ef-db5ada0370a8"
# !git push -u origin main
# + id="zzzv2-YBizvP"
| assignment_2/update_github.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Nessie Demo
# ===========
# This demo showcases how to use Nessie python API along with Spark
#
# Initialize Pyspark + Nessie environment
# ----------------------------------------------
# +
import os
import findspark
from pyspark.sql import *
from pyspark import SparkConf, SparkContext
from py4j.java_gateway import java_import
findspark.init()
spark = SparkSession.builder \
.config("spark.jars", "../../clients/deltalake/spark3/target/nessie-deltalake-spark3-0.2.2-SNAPSHOT.jar") \
.config("spark.sql.execution.pyarrow.enabled", "true") \
.config("spark.hadoop.fs.defaultFS", 'file://' + os.getcwd() + '/spark_warehouse') \
.config("spark.hadoop.nessie.url", "http://localhost:19120/api/v1") \
.config("spark.hadoop.nessie.ref", "main") \
.config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
.config("spark.delta.logFileHandler.class", "com.dremio.nessie.deltalake.NessieLogFileMetaParser") \
.config("spark.delta.logStore.class", "com.dremio.nessie.deltalake.NessieLogStore") \
.getOrCreate()
sc = spark.sparkContext
jvm = sc._gateway.jvm
java_import(jvm, "org.apache.spark.sql.delta.DeltaLog")
java_import(jvm, "io.delta.tables.DeltaTable")
# -
# Set up nessie branches
# ----------------------------
#
# - Branch `main` already exists
# - Create branch `dev`
# - List all branches (pipe JSON result into jq)
# !nessie branch dev
# !nessie --verbose branch
# Create tables under dev branch
# -------------------------------------
#
# Creating two tables under the `dev` branch:
# - region
# - nation
#
# It is not yet possible to create table using pyspark and iceberg, so Java code
# is used instead
# +
hadoop_conf = sc._jsc.hadoopConfiguration()
hadoop_conf.set("nessie.ref", "dev")
region_df = spark.read.load("data/region.parquet")
region_df.write.format("delta").save("spark_warehouse/testing/region")
nation_df = spark.read.load("data/nation.parquet")
nation_df.write.format("delta").save("spark_warehouse/testing/nation")
# -
# Check generated tables
# ----------------------------
#
# Check tables generated under the dev branch (and that the main branch does not
# have any tables)
# !nessie contents --list
# !nessie contents --list --ref dev
# !nessie --verbose branch
# Dev promotion
# -------------
#
# Promote dev branch promotion to main
# !nessie merge dev --force
# !nessie contents --list
# !nessie --verbose branch
# Create `etl` branch
# ----------------------
#
# - Create a branch `etl` out of `main`
# - add data to nation
# - alter region
# - create table city
# - query the tables in `etl`
# - query the tables in `main`
# - promote `etl` branch to `main`
# !nessie branch etl main
hadoop_conf.set("nessie.ref", "etl")
Nation = Row("N_NATIONKEY", "N_NAME", "N_REGIONKEY", "N_COMMENT")
new_nations = spark.createDataFrame([
Nation(25, "SYLDAVIA", 3, "King Ottokar's Sceptre"),
Nation(26, "SAN THEODOROS", 1, "The Picaros")])
new_nations.write.option('hadoop.nessie.ref', 'etl').format("delta").mode("append").save("testing.nation")
# changing the default branch
hadoop_conf.set('nessie.ref', 'etl')
base_table = os.getcwd() + "/spark_warehouse/testing/"
spark.sql("ALTER TABLE delta.`" + base_table + "region` ADD COLUMNS (R_ABBREV STRING)")
# Creating city table
sc.getConf().set("spark.hadoop.nessie.ref", "etl")
spark.sql("CREATE TABLE city (C_CITYKEY BIGINT, C_NAME STRING, N_NATIONKEY BIGINT, C_COMMNT STRING) USING delta PARTITIONED BY (N_NATIONKEY) LOCATION 'spark_warehouse/testing/city'")
from pynessie import init
nessie = init()
nessie.list_keys('main').entries
[i.name for i in nessie.list_keys('etl').entries]
{i.name:i.hash_ for i in nessie.list_references()}
nessie.merge('main', 'etl')
{i.name:i.hash_ for i in nessie.list_references()}
# Create `experiment` branch
# --------------------------------
#
# - create `experiment` branch from `main`
# - drop `nation` table
# - add data to `region` table
# - compare `experiment` and `main` tables
# !nessie branch experiment main
# +
# changing the default branch
hadoop_conf.set('nessie.ref', 'experiment')
jvm.DeltaLog.clearCache()
deltaTable = jvm.DeltaTable.forPath("spark_warehouse/testing/nation")
deltaTable.delete()
# -
spark.sql("set spark.hadoop.nessie.ref=experiment")
spark.sql('INSERT INTO TABLE delta.`' + base_table + 'region` VALUES (5, "AUSTRALIA", "Let\'s hop there", "AUS")')
spark.sql('INSERT INTO TABLE delta.`' + base_table + 'region` VALUES (6, "ANTARTICA", "It\'s cold", "ANT")')
# !nessie contents --list --ref experiment
spark.sql("select * from delta.`" + base_table + "region`").toPandas()
# The branch used for Delta queries should be changed manually to query a
# different branch
hadoop_conf.set('nessie.ref', 'main')
jvm.DeltaLog.clearCache()
spark.sql("set spark.hadoop.nessie.ref=main")
spark.sql("select * from delta.`/home/ryan/workspace/nessie/python/demo/spark_warehouse/testing/region`").toPandas()
| python/demo/nessie-delta-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Digital temperature sensor
# ======================
#
# For this exercise, you need a ds18b20 digital temperature sensor and a 4.7k ohm resistor.
#
# First you need to add this line to /boot/config.txt:
#
# ```
# dtoverlay=w1-gpio
# ```
#
# Reboot and connect it up:
#
# * 4.7k resistor connects between pi pin 1 and pi pin 7
# * ds18b20 pin 1 to pi pin 6
# * ds18b20 pin 2 to pi pin 7
# * ds18b20 pin 3 to pi pin 1
#
# Notice you can add multiple temperature sensors. read_temp takes as its parameter the index of the temperature to check, indexed from 0.
# +
from IPython.display import HTML
import os
import glob
import time
base_dir = '/sys/bus/w1/devices/'
device_folders = sorted(glob.glob(base_dir + '28*'))
def read_temp_raw(id):
device_file = device_folders[id] + '/w1_slave'
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp(id):
lines = read_temp_raw(id)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_f #, temp_f
HTML("""
<script type="text/javascript" src="js/smoothie.js"></script>
<script type="text/Javascript">
var running = false;
var sc = new SmoothieChart({
interpolation: 'linear', millisPerPixel: 75,
grid: { strokeStyle:'rgb(125, 0, 0)', fillStyle:'rgb(60, 0, 0)',
lineWidth: 1, millisPerLine: 250, verticalSections: 6 },
labels: { fillStyle:'rgb(255, 255, 255)' } });
var line1 = new TimeSeries();
sc.addTimeSeries(line1,
{ strokeStyle:'rgb(0, 255, 0)', fillStyle:'rgba(0, 255, 0, 0.4)', lineWidth:3 });
sc.streamTo(document.getElementById("graphcanvas1"));
function append_value(out) {
if(out.msg_type == 'error') {
running = false;
} else {
var value = out.content.data["text/plain"];
line1.append(new Date().getTime(), value);
}
}
function watch_input() {
if(running) {
Jupyter.notebook.kernel.execute("read_temp(0)",
{iopub: {output: append_value}}, {silent: false});
setTimeout(watch_input, 1500);
}
}
setTimeout(function() { running = true; watch_input(); }, 3000);
</script>
<canvas id="graphcanvas1" width="700" height="200"/>
""")
# -
| learner4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df_realestate = pd.read_csv('RealEstate.csv')
df_realestate.shape
df_realestate.head()
df_realestate.dtypes
df_realestate.isnull().sum()
df_realestate.drop(['No'], axis=1, inplace=True)
df_realestate.corr()
df_realestate_X = df_realestate.drop(['Y house price of unit area'], axis=1)
df_realestate_y = df_realestate[['Y house price of unit area']]
print(df_realestate_X.shape)
print(df_realestate_y.shape)
from sklearn.preprocessing import StandardScaler
sc_X=StandardScaler()
df_realestate_X1 = sc_X.fit_transform(df_realestate_X)
sc_y=StandardScaler()
df_realestate_y1 = sc_y.fit_transform(df_realestate_y)
import statsmodels.formula.api as sm
X=np.append(arr=np.ones((414,1)).astype(int), values=df_realestate_X1, axis=1)
X_opt=X[:,1:]
X_opt.shape
y=df_realestate_y1
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()
regressor_OLS.summary()
X_opt=X[:,1:6]
X_opt.shape
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()
regressor_OLS.summary()
df_realestate_X1[:,1:6]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_realestate_X1, df_realestate_y1, test_size=0.3, random_state=0)
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDRegressor, Lasso, Ridge
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.svm import SVR, SVC
from sklearn.metrics import mean_absolute_error, accuracy_score
from scipy.stats import mode
# ### Average Ensembling
# +
knn_regressor = KNeighborsRegressor(1)
lasso_regressor = Lasso()
svr_regressor = SVR(kernel="poly", degree=5)
knn_regressor.fit(X_train, y_train)
lasso_regressor.fit(X_train, y_train)
svr_regressor.fit(X_train, y_train)
y_pred1 = knn_regressor.predict(X_test)
y_pred2 = lasso_regressor.predict(X_test)
y_pred3 = svr_regressor.predict(X_test)
y_pred1 = y_pred1.reshape(y_test.shape[0], 1)
y_pred2 = y_pred2.reshape(125, 1)
y_pred3 = y_pred3.reshape(125, 1)
avg_pred = np.mean([y_pred1,y_pred2,y_pred3], axis =0)
# -
print("Average Ensembler Mean Absolute Error:", mean_absolute_error(y_test, avg_pred))
print("KNN Mean Absolute Error:", mean_absolute_error(y_test, y_pred1))
print("Lasso Mean Absolute Error:", mean_absolute_error(y_test, y_pred2))
print("SVR Mean Absolute Error:", mean_absolute_error(y_test, y_pred3))
# ### Weighted Average Ensembling
# +
knn_regressor = KNeighborsRegressor(1)
lasso_regressor = Lasso()
svr_regressor = SVR(kernel="poly", degree=5)
knn_regressor.fit(X_train, y_train)
lasso_regressor.fit(X_train, y_train)
svr_regressor.fit(X_train, y_train)
y_pred1 = knn_regressor.predict(X_test)
y_pred2 = lasso_regressor.predict(X_test)
y_pred3 = svr_regressor.predict(X_test)
y_pred1 = y_pred1.reshape(y_test.shape[0], 1)
y_pred2 = y_pred2.reshape(125, 1)
y_pred3 = y_pred3.reshape(125, 1)
weighted_pred = (y_pred1*0.40+y_pred2*0.20+y_pred3*0.40)
# -
print("Weightage Average Ensembler Mean Absolute Error:", mean_absolute_error(y_test, weighted_pred))
print("KNN Mean Absolute Error:", mean_absolute_error(y_test, y_pred1))
print("Lasso Mean Absolute Error:", mean_absolute_error(y_test, y_pred2))
print("SVR Mean Absolute Error:", mean_absolute_error(y_test, y_pred3))
# ### Max Voting Ensembling
# +
category = pd.cut(df_realestate['Y house price of unit area'],bins=[0,20,40,60,100,120],labels=['20','40','60','100','120'])
X_train, X_test, y_train, y_test = train_test_split(df_realestate_X1, category, test_size=0.3, random_state=0)
knn_classifier = KNeighborsClassifier(1)
logistic_regressor = LogisticRegression()
svc_classifier = SVC(kernel="poly", degree=5)
knn_classifier.fit(X_train, y_train)
logistic_regressor.fit(X_train, y_train)
svc_classifier.fit(X_train, y_train)
y_pred1 = knn_classifier.predict(X_test)
y_pred2 = logistic_regressor.predict(X_test)
y_pred3 = svc_classifier.predict(X_test)
maxvoted_pred = []
for i in range(y_test.shape[0]):
maxvoted_pred.append(mode([y_pred1[i], y_pred2[i], y_pred3[i]])[0][0])
print(maxvoted_pred)
# -
print("Max Voted Ensembler Accuracy:", accuracy_score(y_test, maxvoted_pred)*100)
print("KNN Accuracy:", accuracy_score(y_test, y_pred1)*100)
print("Logistic Regression Accuracy:", accuracy_score(y_test, y_pred2)*100)
print("SVC Accuracy:", accuracy_score(y_test, y_pred3)*100)
# #### sklearn implementation for MaxVoting
# +
from sklearn.ensemble import VotingClassifier
maxVotingClassifier = VotingClassifier(estimators=[('KNN', knn_classifier), ('Logistic Regression', logistic_regressor), ('SVC', svc_classifier)], voting='hard')
maxVotingClassifier.fit(X_train, y_train)
y_pred = maxVotingClassifier.predict(X_test)
print("Sklearn Max Voting Classifier Accuracy:", accuracy_score(y_test, y_pred)*100)
# -
| EnsembleTechniques1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # _Crash course_ de _conda_
# ## Objetivos
#
# - Aprender a utilizar o _conda_ para gerenciar a instalação de pacotes, dependências e ambientes;
# - Entender conceitos de distribuição, módulo e repositório;
# - Compreender comandos do _conda_ e praticá-los em terminal;
# ## Pré-requisitos
#
# - [Instalação](https://docs.anaconda.com/anaconda/install/) da distribuição Anaconda (recomenda-se Python 3.x);
# - Acesso simples a um terminal ou ao _Anaconda Prompt_;
# ## Introdução
#
# - _conda_ é um gerenciador de pacotes, dependências e ambientes para múltiplas linguagens;
#
# - Pacote de código aberto executável em Windows, macOS e Linux;
#
# - Seu objetivo é fornecer suporte à rápida instalação, desinstalação e atualização de pacotes
#
# - Embora criado para Python, hoje pode ser aplicado a softwares escritos em R, Ruby, Lua, Java, Javascript, C++ etc.
#
# - Atualmente, há cerca de 7500 pacotes gerenciados no repositório [repo.anaconda.com](http://repo.anaconda.com) que são mantidos pela empresa Anaconda Inc.
# ### Benefícios do _conda_ para cientistas de dados
#
# - Fornece pacotes pré-construídos que evitam a necessidade de lidar com compiladores ou saber como configurar uma ferramenta específica;
#
# - Gerencia instalações de ferramentas que são mais difíceis de instalar (como TensorFlow) com apenas um passo;
#
# - Permite que você forneça seu ambiente a outras pessoas em diferentes plataformas, assim oferecendo suporte à reprodutibilidade de fluxos de trabalho de pesquisa;
#
# - Permite o uso de outras ferramentas de gerenciamento de pacotes, tais como _pip_, dentro de ambientes _conda_ onde uma biblioteca ou ferramentas ainda não foram empacotadas para _conda_;
#
# - Fornece bibliotecas e ferramentas de ciência de dados comumente usadas, tais como _R_, _NumPy_, _SciPy_ e _TensorFlow_. Estes são construídos usando bibliotecas específicas de hardware otimizadas (como MKL da Intel, ou CUDA da NVIDIA) que aceleram o desempenho sem alterações de código.
# ## Conceitos fundamentais
#
# - Anaconda: distribuição de código aberto, alto desempenho e otimizada para Python e R.
#
# - Anaconda Cloud: repositório de pacotes hospedado na web (nuvem)
#
# - Anaconda Navigator: interface gráfica incluída na distribuição para fácil gestão de pacotes, ambientes e canais.
#
# - Canal: local dos repositórios onde o _conda_ procura por pacotes. Pode ser um repositório público, na web, ou privado, dentro da universidade, em uma empresa, na sua casa, etc.
#
# - _conda_: gerenciador de pacotes e ambientes que vem incluído na distribuição.
#
# - _conda environment_ (Ambiente): diretório que contém uma coleção específica de pacotes e dependências que pode ser administrado separadamente. Por exemplo, é possível manter um ambiente Python 2 e Python 3 totalmente isolados sem que um interfira em outro.
#
# - _conda package_ (Pacote): arquivo comprimido que contém todos os elementos necessários para o funcionamento de um software: bibliotecas, módulos, executáveis e componentes.
#
# - _conda repository_ (Repositório, ou _repo_): o repositório em nuvem mantido pela Anaconda.
#
# - Miniconda: é uma versão menor da distribuição que inclui apenas pacotes essenciais, tais como `conda`, `pip`, `zlib` e outros considerados básicos. Pode ser expandido pela instalação de pacotes adicionais.
#
# - _noarch package_ (Pacote independente de arquitetura): pacote que não contém nada específico à arquitetura de um sistema e que pode ser instalado em qualquer plataforma. `noarch` constitui um subdiretório em um canal.
#
# - Repositório: qualquer local onde ativos de software são armazenados e podem ser baixados ou recuperados para instalação e uso em computadores.
#
#
# ```{note}
# Se você é novo com Python ou conda, recomenda-se instalar a distribuição _Anaconda_ (inteira) em vez da _Miniconda_, embora mais tempo e espaço em disco sejam necessários. A distribuição padrão requer cerca de 3 Gb de espaço em disco, ao passo que a _Miniconda_ ocupa em torno de 400 Mb.
# ```
# ## Comandos fundamentais
#
# A lista a seguir não é exaustiva e contempla os comandos `conda` mais frequentes. Baseia-se na [[Conda Cheat Sheet]](https://docs.conda.io/projects/conda/en/latest/_downloads/843d9e0198f2a193a3484886fa28163c/conda-cheatsheet.pdf)
#
# Aqui, dividiremos os comandos nos seguintes grupos:
#
# 1. informação e atualização
# 2. ambientes
# 3. pacotes e canais
# 4. adicionais
# ### Comandos para manutenção e atualização
#
# |comando|o que faz?|
# |---|---|
# |`conda --version` ou `conda -V`|verifica se conda está instalado |
# |`conda info`|verifica instalação e versão do conda|
# |`conda update -n base conda`|atualiza o gerenciador para a versão atual|
# |`conda update conda`|idem|
# |`conda update anaconda`|atualiza todos os pacotes da distribuição para versões estáveis|
# ### Comandos para trabalhar com ambientes
#
# |comando|o que faz?|
# |---|---|
# |`conda create --name AMB python=3.x "PKG1>v.s" PKG2`|cria novo ambiente com nome "AMB" para funcionar com a versão Python 3.x e instala neste ambiente os pacotes PKG1 e PKG2, sendo o primeiro na versão específica "v.s" e o outro a estável mais atual|
# |`conda activate AMB`|ativa o ambiente de nome AMB|
# |`conda activate /caminho/para/amb`|ativa um ambiente dado seu local|
# |`conda deactivate`|desativa o ambiente ativo|
# |`conda list`|lista todos os pacotes do ambiente ativo|
# |`conda list --name AMB`|lista todos os pacotes do ambiente AMB|
# |`conda remove --name AMB --all`|deleta todo o ambiente AMB|
# |`conda create --clone AMB --name NAMB`|faz um clone NAMB de AMB|
# |`conda env export --name AMB > amb.yml`|exporta configurações de AMB em um arquivo YAML|
# |`conda env create --file amb.yml`|cria AMB a partir de configurações contidas em um arquivo YAML|
#
# ```{note}
# YAML (acrônimo para "YAML Ain't Markup Language") é uma linguagem de serialização de dados legível por humanos comumente usada para organizar arquivos de configuração. É utilizada em múltiplas linguagens. Veja [[YAML]](https://yaml.org).
# ```
#
# ```{warning}
# `conda activate` e `conda deactivate` somente funcionam a partir das versões 4.6 do `conda`. Para versões anteriores, no Windows usa-se `activate`/`deactivate` e no macOS, usa-se `source activate`/`source deactivate`.
# ```
# + [markdown] tags=[]
# ### Comandos para trabalhar com pacotes e canais
#
# |comando|o que faz?|
# |---|---|
# |`conda search PCT=2.8 "PCT [version='>=2.8,<3.2']"`|procura pelo pacote PCT nos canais configurados cuja versão esteja no intervalo 2.8 <= v < 3.2|
# |`conda install PCT`|instala o pacote PCT, se disponível|
# |`conda install -c CH PCT`|instala o pacote AMB a partir do canal CH|
# |`conda install PCT==4.1.0`|instala o PCT com a versão especificada (4.1.0)|
# |`conda install "PCT[version='3.1.0\|3.1.1']"`|instala pacote com uma das versões especificadas (OU)|
# |`conda install "PCT>3.1,<3.5" `|instala uma das das versões do pacote especificadas (E)|
# -
# ```{note}
# A lista de canais padrão utilizadas pela distribuição fica armazenada no arquivo oculto `.condarc`. A partir do caderno interativo, execute o comando `!cat ~/.condarc` para uma visão geral do conteúdo do arquivo `.condarc`.
# ```
# + [markdown] tags=[]
# ### Comandos adicionais
#
# |comando|o que faz?|
# |---|---|
# |`conda search AMB --info`|fornece informação detalhada sobre o pacote AMB|
# |`conda clean --all`|remove pacotes inutilizados|
# |`conda uninstall PCT --name AMB`|remove o pacote PCT do ambiente AMB|
# |`conda update --all --name AMB`|atualiza todos os pacotes do ambiente AMB|
# |`conda install --yes PCT1 PCT2`|instala pacotes sem exigir prompt do usuário|
# |`conda -h`|para obter ajuda sobre os comandos disponíveis do gerenciador|
# -
# ```{tip}
# Em muitos casos, opções de comandos que são precedidas por 2 hífens (`--`) podem ser abreviadas para apenas 1 hífen e a primeira letra da opção. Então, `--name` e `-n`, bem como `--envs` e `-n` são opções equivalentes.
# ```
# ## Exemplos
#
# - Criar ambiente chamado "dataScience" com versão Python 3.8 contendo os pacotes numpy, versão 1.19.1, e pandas, mais atual no repositório Anaconda.
#
# ```bash
# conda create --name dataScience python=3.8 numpy=1.19.1 pandas
# ```
#
# - Alternar entre ambientes
#
# Abaixo, vamos reproduzir a mudança de um ambiente para outro em um Z Shell apontando para a pasta _ICD_ e mostrar que o pacote `scipy` está instalado em um ambiente, mas não em outro.
#
#
# ```bash
# # no ambiente 'base', procuramos pelo pacote 'scipy'
# (base) gustavo@GloryCrown ICD % conda list scipy
# ```
# ```
# # packages in environment at /Users/gustavo/opt/anaconda3:
# #
# # Name Version Build Channel
# scipy 1.6.2 py38hd5f7400_1
# ```
#
# ```bash
# # ativamos um novo ambiente chamado 'lecture'
# (base) gustavo@GloryCrown ICD % conda activate lecture
# (lecture) gustavo@GloryCrown ICD %
# ```
#
# ```bash
# # dentro do ambiente 'lecture', procuramos pelo pacote 'scipy'
# (lecture) gustavo@GloryCrown ICD % conda list scipy
# ```
#
# ```
# # packages in environment at /Users/gustavo/opt/anaconda3/envs/lecture:
# #
# # Name Version Build Channel
# ```
# Nada é mostrado, significando que o pacote `scipy` está indisponível no ambiente `lecture`. Enfim, desativamos o ambiente ativo.
#
# ```bash
# # desativamos 'lecture' e voltamos para 'base'
# (lecture) gustavo@GloryCrown ICD % conda deactivate
# (base) gustavo@GloryCrown ICD %
# ```
#
# - Criar arquivo YAML para construção de ambiente personalizado
#
# - Abra seu editor de texto preferido (sugestão: no Windows, `notepad++`; no Linux, `gedit`; no macOS, `TextEdit`);
# - Salve o arquivo como `icd.yml`;
# - Personalize o seu ambiente (use o modelo a seguir);
# - Use o comando `conda env create -f icd.yml` para criar o ambiente;
# - Verifique se o ambiente foi criado corretamente com `conda env list`. Você deve ver algo como:
#
# ```
# (base) gustavo@GloryCrown ICD % conda env list
# # conda environments:
# #
# base * /Users/gustavo/opt/anaconda3
# icd /Users/gustavo/opt/anaconda3/envs/icd
# ```
# ```yaml
# # Conteúdo do arquivo "icd.yaml"
# # para construir o ambiente 'icd'
# name: icd # nome do ambiente
# channels: # lista de canais a utilizar
# - defaults # canais padrão
# - conda-forge
# dependencies: # pacotes dependentes
# - numpy
# - scipy
# - sympy
# - matplotlib
# - pandas
# - seaborn
# ```
# ```{tip}
# Quando você precisar atualizar o seu ambiente de ciência de dados, seja porque necessita de um pacote novo, ou porque encontrou um pacote melhor, basta atualizar o conteúdo do arquivo `icd.yml` e então executar o seguinte comando: `conda env update --file icd.yml --prune`. A opção `--prune` faz com que o conda remova dependêncaias que não forem mais necessárias.
# ```
# ## Desempenho do _conda_
#
# A instalação de pacotes pode tornar-se lenta por uma variedade de motivos: processamento de metadados, velocidade de internt, verificação de dependências, etc. À medida que o número de pacotes disponíveis aumenta, a busca que o _conda_ realiza pode perdurar por mais tempo.
#
# Caso ao tentar instalar um pacote haja lentidão, há algumas questões a analisar. Verifique se há dependências instaladas via `pip`; se os canais estão disponíveis; se o pacote que você está tentando instalar está disponível.
#
# Para melhorar o desempenho consulte recomendações [aqui](https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/conda-performance.html#improving-conda-performance).
#
# ```{hint}
# Trabalhar com ambientes menores e dedicados é sempre melhor e mais rápido do que ter ambientes grandes, que são bem mais difíceis de gerenciar. Para ganhar desempenho, reduza a complexidade de seus ambientes.
# ```
# ## Exercícios
#
# - Usando o `conda`, crie um novo ambiente de trabalho para o curso chamado `icd`.
# - Ative o novo ambiente e instale:
# - o pacote `numpy` em sua versão mais recente;
# - o pacote `scipy` na versão 1.6.0;
# - Execute o comando `conda list ncurses`. Qual é a resposta? Ele está instalado? Qual é a versão?
# - Use um comando para buscar informações sobre a versão mais recente do pacote `sympy`. Quantas e quais são as suas dependências?
# - Desinstale o pacote `scipy`. Quantos pacotes permaneceram em seu ambiente?
# - Desinstale o pacote `numpy`. Algum pacote ainda permaneceu em seu ambiente? Por quê?
# - Desative o ambiente `icd` e delete-o completamente.
# - Crie um novo ambiente personalizado `icd` a partir de um arquivo YAML.
| ipynb/.ipynb_checkpoints/02b-conda-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cmocean.cm as cm
import h5py
import matplotlib as mpl
import matplotlib.colorbar as colorbar
import matplotlib.colors as mplcolours
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# %matplotlib inline
# -
mesh = xr.open_dataset('~/MEOPAR/grid/mesh_mask201702.nc')
# +
oil0d2000 = pd.read_csv('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_2000_AKNS/resOilOutput.sro', sep='\s+', skiprows=4)
oil0d2000 = oil0d2000.drop([0], axis=0)
length =len(oil0d2000)
oil0d2000 = oil0d2000.drop([length-3, length-2, length-1, length], axis=0)
oil0d5000 = pd.read_csv('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_5000_AKNS/resOilOutput.sro', sep='\s+', skiprows=4)
oil0d5000 = oil0d5000.drop([0], axis=0)
length =len(oil0d5000)
oil0d5000 = oil0d5000.drop([length-3, length-2, length-1, length], axis=0)
oil0d10k = pd.read_csv('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_10000_AKNS/resOilOutput.sro', sep='\s+', skiprows=4)
oil0d10k = oil0d10k.drop([0], axis=0)
length =len(oil0d10k)
oil0d10k = oil0d10k.drop([length-3, length-2, length-1, length], axis=0)
oil0d20k = pd.read_csv('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_20000_AKNS/resOilOutput.sro', sep='\s+', skiprows=4)
oil0d20k = oil0d20k.drop([0], axis=0)
length =len(oil0d20k)
oil0d20k = oil0d20k.drop([length-3, length-2, length-1, length], axis=0)
# -
fig, ax = plt.subplots(1, 1, figsize=(15,5))
oil0d2000.Area.plot(ax=ax, label="2000");
oil0d5000.Area.plot(ax=ax, label="5000");
oil0d10k.Area.plot(ax=ax, label="10000");
oil0d20k.Area.plot(ax=ax, label="20000");
plt.grid();
plt.legend();
# +
fig, ax = plt.subplots(1, 1, figsize=(15,5))
oil0d2000.MDissolved.plot();
oil0d2000.MEvaporated.plot();
oil0d2000.MassOil.plot();
oil0d2000.MDispersed.plot();
oil0d2000.MBio.plot()
massbeached2 = (oil0d2000.VolOilBeached*oil0d2000.Density/(1-oil0d2000.VWaterContent)
*(1-oil0d2000.MWaterContent))
massbeached2.plot(label="Beached")
plt.plot(oil0d2000.MDissolved + oil0d2000.MEvaporated + oil0d2000.MassOil
+ massbeached2 + oil0d2000.MDispersed + oil0d2000.MBio);
oil0d20k.MDissolved.plot(style='--x');
oil0d20k.MEvaporated.plot(style='--x');
oil0d20k.MassOil.plot(style='--x');
oil0d20k.MDispersed.plot(style='--x');
oil0d20k.MBio.plot(style='--x')
massbeached20 = (oil0d20k.VolOilBeached*oil0d20k.Density/(1-oil0d20k.VWaterContent)
*(1-oil0d20k.MWaterContent))
massbeached10 = (oil0d10k.VolOilBeached*oil0d10k.Density/(1-oil0d10k.VWaterContent)
*(1-oil0d10k.MWaterContent))
massbeached5 = (oil0d5000.VolOilBeached*oil0d5000.Density/(1-oil0d5000.VWaterContent)
*(1-oil0d5000.MWaterContent))
massbeached20.plot(label="Beached", style='--x')
plt.plot(oil0d20k.MDissolved + oil0d20k.MEvaporated + oil0d20k.MassOil
+ massbeached20 + oil0d20k.MDispersed + oil0d20k.MBio);
plt.legend();
plt.grid();
# -
(oil0d2000.Density/(1-oil0d2000.VWaterContent)
*(1-oil0d2000.MWaterContent)).plot();
(oil0d20k.Density/(1-oil0d20k.VWaterContent)
*(1-oil0d20k.MWaterContent)).plot();
oil0d2000.MDispersed.plot();
oil0d5000.MDispersed.plot();
oil0d10k.MDispersed.plot();
oil0d20k.MDispersed.plot();
fig, ax = plt.subplots(1, 1, figsize=(15, 3.5))
ax.plot(oil0d2000.MDissolved + oil0d2000.MEvaporated + oil0d2000.MassOil
+ massbeached2 + oil0d2000.MDispersed + oil0d2000.MBio);
ax.plot(oil0d5000.MDissolved + oil0d5000.MEvaporated + oil0d5000.MassOil
+ massbeached5 + oil0d5000.MDispersed + oil0d5000.MBio);
ax.plot(oil0d10k.MDissolved + oil0d10k.MEvaporated + oil0d10k.MassOil
+ massbeached10 + oil0d10k.MDispersed + oil0d10k.MBio);
ax.plot(oil0d20k.MDissolved + oil0d20k.MEvaporated + oil0d20k.MassOil
+ massbeached20 + oil0d20k.MDispersed + oil0d20k.MBio);
ax.grid();
massbeached2.plot(label="Beached");
massbeached5.plot(label="Beached");
massbeached10.plot(label="Beached");
massbeached20.plot(label="Beached");
plt.grid()
(oil0d2000.MDispersed/(oil0d2000.MDispersed + oil0d2000.MassOil))[:120].plot()
(oil0d20k.MDispersed/(oil0d20k.MDispersed + oil0d20k.MassOil))[:120].plot()
plt.grid()
oil0d2000.VolumeOil.plot(style='+-');
oil0d20k.VolumeOil.plot();
plt.legend();
plt.grid();
oil0d2000.AnalyteMass1.plot()
oil0d2000.AnalyteMass2.plot()
oil0d2000.AnalyteMass3.plot()
oil0d2000.AnalyteMass4.plot()
oil0d2000.AnalyteMass5.plot();
oil0d20k.AnalyteMass1.plot()
oil0d20k.AnalyteMass2.plot()
oil0d20k.AnalyteMass3.plot()
oil0d20k.AnalyteMass4.plot()
oil0d20k.AnalyteMass5.plot();
oil0d2000.Thickness.plot();
oil0d5000.Thickness.plot();
oil0d10k.Thickness.plot();
oil0d20k.Thickness.plot();
oilLag2000 = xr.open_dataset('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_2000_AKNS/Lagrangian_SoG_2000_AKNS_SoG_2000_AKNS.nc')
oilLag5000 = xr.open_dataset('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_5000_AKNS/Lagrangian_SoG_5000_AKNS_SoG_5000_AKNS.nc')
oilLag10k = xr.open_dataset('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_10000_AKNS/Lagrangian_SoG_10000_AKNS_SoG_10000_AKNS.nc')
oilLag20k = xr.open_dataset('/data/sallen/results/MIDOSS/ParticleNoTests/SoG_20000_AKNS/Lagrangian_SoG_20000_AKNS_SoG_20000_AKNS.nc')
imin, imax = 460, 560
jmin, jmax = 180, 260
# +
fig, axs = plt.subplots(4, 2, figsize=(15, 20))
it = 20
field = oilLag2000.OilWaterColumnOilVol_3D[it]
(field[:, imin:imax , jmin:jmax].sum(axis=0)).plot(ax=axs[0, 0], cmap='copper')
print (oilLag2000.OilWaterColumnOilVol_3D[it, :, imin:imax, jmin:jmax].sum(axis=0).sum(axis=0).sum(axis=0))
axs[0, 1].plot(oilLag2000.time[0:72], oilLag2000.OilWaterColumnOilVol_3D[0:72].sum(axis=1).sum(axis=1).sum(axis=1)
)
field = oilLag5000.OilWaterColumnOilVol_3D[it]
(field[:, imin:imax , jmin:jmax].sum(axis=0)).plot(ax=axs[1, 0], cmap='copper')
print (oilLag5000.OilWaterColumnOilVol_3D[it, :, imin:imax, jmin:jmax].sum(axis=0).sum(axis=0).sum(axis=0))
axs[1, 1].plot(oilLag5000.time[0:72], oilLag5000.OilWaterColumnOilVol_3D[0:72].sum(axis=1).sum(axis=1).sum(axis=1)
)
field = oilLag10k.OilWaterColumnOilVol_3D[it]
(field[:, imin:imax , jmin:jmax].sum(axis=0)).plot(ax=axs[2, 0], cmap='copper')
print (oilLag10k.OilWaterColumnOilVol_3D[it, :, imin:imax, jmin:jmax].sum(axis=0).sum(axis=0).sum(axis=0))
axs[2, 1].plot(oilLag10k.time[0:72], oilLag10k.OilWaterColumnOilVol_3D[0:72].sum(axis=1).sum(axis=1).sum(axis=1)
)
field = oilLag20k.OilWaterColumnOilVol_3D[it]
(field[:, imin:imax , jmin:jmax].sum(axis=0)).plot(ax=axs[3, 0], cmap='copper')
print (oilLag20k.OilWaterColumnOilVol_3D[it, :, imin:imax, jmin:jmax].sum(axis=0).sum(axis=0).sum(axis=0))
axs[3, 1].plot(oilLag20k.time[0:72], oilLag20k.OilWaterColumnOilVol_3D[0:72].sum(axis=1).sum(axis=1).sum(axis=1)
)
# -
fig, axs = plt.subplots(2, 2, figsize=(15, 15))
imin, imax = 465, 490
jmin, jmax = 230, 255
oilLag2000.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[0, 0], cmap='gist_ncar');
oilLag5000.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[0, 1], cmap='gist_ncar');
oilLag10k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[1, 0], cmap='gist_ncar');
oilLag20k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[1, 1], cmap='gist_ncar');
axs[0, 1].set_title('5000');
fig, axs = plt.subplots(2, 2, figsize=(15, 15))
(oilLag2000.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0)-
oilLag20k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0)).plot(ax=axs[0, 0], cmap='bwr');
(oilLag5000.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0)-
oilLag20k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0)).plot(ax=axs[0, 1], cmap='bwr');
(oilLag10k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0)-
oilLag20k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0)).plot(ax=axs[1, 0], cmap='bwr');
oilLag20k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[1, 1], cmap='gist_ncar');
axs[0, 1].set_title('2000');
fig, axs = plt.subplots(2, 2, figsize=(15, 15))
imin, imax = 460, 560
jmin, jmax = 180, 260
oilLag2000.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[0, 0], cmap='gist_ncar');
oilLag5000.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[0, 1], cmap='gist_ncar');
oilLag10k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[1, 0], cmap='gist_ncar');
oilLag20k.OilWaterColumnOilVol_3D[:, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[1, 1], cmap='gist_ncar');
axs[0, 1].set_title('5000');
fig, axs = plt.subplots(2, 2, figsize=(15, 15))
imin, imax = 540, 800
jmin, jmax = 100, 220
oilLag2000.Beaching_Volume[imin:imax , jmin:jmax].plot(ax=axs[0,0], cmap='copper', vmax=8)
oilLag5000.Beaching_Volume[imin:imax , jmin:jmax].plot(ax=axs[0,1], cmap='copper', vmax=8)
oilLag10k.Beaching_Volume[imin:imax , jmin:jmax].plot(ax=axs[1,0], cmap='copper', vmax=8)
oilLag20k.Beaching_Volume[imin:imax , jmin:jmax].plot(ax=axs[1,1], cmap='copper', vmax=8);
# +
fig, axs = plt.subplots(2, 2, figsize=(15, 15))
(oilLag2000.Beaching_Volume[imin:imax , jmin:jmax]
-oilLag20k.Beaching_Volume[imin:imax , jmin:jmax]).plot(ax=axs[0,0], cmap='bwr')
(oilLag5000.Beaching_Volume[imin:imax , jmin:jmax]-
oilLag20k.Beaching_Volume[imin:imax , jmin:jmax]).plot(ax=axs[0,1], cmap='bwr')
(oilLag10k.Beaching_Volume[imin:imax , jmin:jmax]-
oilLag20k.Beaching_Volume[imin:imax , jmin:jmax]).plot(ax=axs[1,0], cmap='bwr')
oilLag20k.Beaching_Volume[imin:imax , jmin:jmax].plot(ax=axs[1,1], cmap='copper', vmax=8);
# -
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
bt2000 = np.array(oilLag2000.Beaching_Time[imin:imax, jmin:jmax] - oilLag2000.Beaching_Time[imin:imax, jmin:jmax].min())
axs[0].plot(bt2000/1e9/3600, 'ro');
bt5000 = np.array(oilLag5000.Beaching_Time[imin:imax, jmin:jmax] - oilLag5000.Beaching_Time[imin:imax, jmin:jmax].min())
axs[1].plot(bt5000/1e9/3600, 'ro');
bt10k = np.array(oilLag10k.Beaching_Time[imin:imax, jmin:jmax] - oilLag10k.Beaching_Time[imin:imax, jmin:jmax].min())
axs[2].plot(bt10k/1e9/3600, 'ro');
bt20k = np.array(oilLag20k.Beaching_Time[imin:imax, jmin:jmax] - oilLag20k.Beaching_Time[imin:imax, jmin:jmax].min())
for ax in axs:
ax.plot(bt20k/1e9/3600, 'bx');
# Time to Run:
# * 2000 : 1:55
# * 5000 : 2:06
# * 10000 : 2:08
# * 20000 : 2:23
| MIDOSS/AKNS_SoG_npoints.ipynb |