code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np, scipy, matplotlib.pyplot as plt, scipy.integrate
from scipy.interpolate import interp1d
plt.style.use(('presentation'))
# ### Problem 1
# **Part (b)** - The problem is of a particle moving in potential $V(x) = \alpha x^2/2 + \beta x^4/4$
# +
def V(alpha,beta,x):
x = np.asarray(x)
return alpha*x**2 + beta*x**4
params = [[3,-1],[-3,1],[0.25,0.25],[-0.25,-0.25]]
x = np.linspace(-2,2,1000)
#plt.figure(figsize=(4,4))
for parameter in params:
alpha, beta = parameter
plt.plot(x,V(alpha,beta,x),label=r'$\alpha = %s, \beta = %s$'%(alpha,beta))
plt.xlabel('$x$')
plt.ylabel('$V(x)$')
plt.legend()
plt.savefig('1.png')
# -
# As is clearly evident from the plots (and the equation for $V(x)$),the function will have an extremum as $x=0$, irrespective of the value of $\alpha$ and $\beta$.
#
# * For $(\alpha,\beta)=(3,-1)$, the potential is bounded from above.
# * For $(\alpha,\beta)=(-3,1)$, the potential is bounded from below
# * For $(\alpha,\beta)=(0.25,0.25)$, the potential is bounded from below
# * For $(\alpha,\beta)=(-0.25,-0.25)$, the potential is bounded from below
# **Part (c)** -When $\delta = 0$, the energy can be written as $E = m\dot{x}^2/2 + \alpha x^2/2 + \beta x^4/4$. In the phase space, a constant value of energy will give a closed curve for all values of $\alpha$ and $\beta$, except for the case when $\alpha < 0$ and $\beta < 0$.
# +
params = [[3,-1],[-3,1],[0.25,0.25]]
def xdot(E,m,alpha,beta,x):
x = np.asarray(x)
return np.sqrt((2./m)*(E - V(alpha,beta,x)))
m = 1.0
E = 1.0
for parameter in params:
alpha, beta = parameter
xd = xdot(E,m,alpha,beta,x)
plt.plot(np.append(x,x),np.append(xd,-xd),label=r'$\alpha = %s, \beta = %s$'%(alpha,beta))
plt.xlabel('$x$')
plt.ylabel('$\dot{x}$')
plt.legend()
# -
# We also solve the equations of motion numerically for the same set of parameter values.
params = [[3,-1],[-3,1],[0.25,0.25]]
for alpha, beta in params:
def f(t,y):
x = y[0]
xd = y[1]
return np.array([xd, -alpha*x - beta*x**3])
ini = [0,1]
sol = scipy.integrate.solve_ivp(f,[0,10],[ini[0],ini[1]],
t_eval=np.linspace(0,10,100))
plt.plot(sol.y[0],sol.y[1],label=r'$\alpha = %s, \beta = %s$'%(alpha,beta))
plt.xlabel('$x$')
plt.ylabel('$\dot{x}$')
plt.legend()
plt.show()
# **Part (e)**When $\delta \ne 0$, there is dissipation in the system, and hence the phase space plots will differ. We now solve the resultant equation of motion $m \ddot{x} + \delta \dot{x} + \alpha x + \beta x^3 = 0$ numerically (taking $m=1)$.
params = [[3,-1,0.25],[-3,1,0.25],[0.25,0.25,0.25]]
for alpha, beta, delta in params:
def f(t,y):
x = y[0]
xd = y[1]
return np.array([xd, -delta*xd -alpha*x - beta*x**3])
ini = [0,1]
sol = scipy.integrate.solve_ivp(f,[0,100],[ini[0],ini[1]],
t_eval=np.linspace(0,50,10000))
plt.plot(sol.y[0],sol.y[1],label=r'$\alpha = %s, \beta = %s$'%(alpha,beta))
plt.xlabel('$x$')
plt.ylabel('$\dot{x}$')
plt.legend()
plt.show()
# **Part(h)** - Poincare Plot
params = [[-3,1]]
for alpha, beta in params:
def f(t,y):
x = y[0]
xd = y[1]
return np.array([xd, -alpha*x - beta*x**3 + np.cos(4.*t)])
ini = [0,1]
sol = scipy.integrate.solve_ivp(f,[0,10000],[ini[0],ini[1]],rtol=1e-7,atol=1e-7)
#plt.plot(sol.y[0],sol.y[1])
q_arr = sol.y[0]
qdot_arr = sol.y[1]
time = sol.t
q = interp1d(time,q_arr)
qdot = interp1d(time,qdot_arr)
tm = np.arange(1,10000,np.pi/4.)
tm_1 = np.linspace(0,10,10000)
plt.plot(q(tm_1),qdot(tm_1))
plt.xlabel('$x$')
plt.ylabel('$\dot{x}$')
plt.figure()
plt.scatter(q(tm),qdot(tm),s=1)
plt.xlabel('$x$')
plt.ylabel('$\dot{x}$')
plt.title('Poincare Plot')
plt.show()
| sem1/cmech/assign_1/Problem 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Importing files
#
# The output format in this machine is different from what PlateMate originally was able to read. Thus, we will extend its reading module. To achieve this, we explore below openpyxl as an option to read directly from the xlsx files provided by the plate reader.
import numpy as np
import pylab as pl
import openpyxl as xl
import string
# %matplotlib inline
# +
dataset_folder = "../datasets/"
read = xl.load_workbook(dataset_folder + "20161003 pJP22mcherry 01.xlsx")['final']
# -
print read['B74'].value
# All plate readings start in cells B36, B74, B106 and B144. It would be interesting to have a better documentation of what is the difference among each of these readings -- of course, it's got to be something related to the wavelengths used in the excitation and emission.
#
# Let's write a function that reads a plate given its starting point and the number of columns to be considered.
def readPlate( spsheet, InitialRow, ncols = 12, nrows = 8 ):
read = np.zeros( (nrows, ncols) )
LTS = list(string.ascii_uppercase)[1:20]
for row in range(nrows):
line = row + InitialRow
for col in range(ncols):
read[row,col] = float( spsheet[LTS[col]+str(line)].value )
return read
# Let's check the first two:
readPlate(read, 36)
readPlate(read, 74)
# All we need to do now is to parse it into a pandas dataframe. To do that, it is helpful to reshape this numpy array into a single line.
# +
X = readPlate(read, 74)
print X.shape
np.reshape(X, (X.shape[0]*X.shape[1]) )
# -
#
| notebooks/Importing files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
path = '/home/seq/pelenet'
sys.path.append(path)
os.chdir(path)
# +
# Allow reload of objects
# %load_ext autoreload
# %autoreload
from pelenet.utils import Utils
from pelenet.experiments.assemblies import AssemblyExperiment
from pelenet.experiments.assembliesoutput import AssemblyOutputExperiment
# Official modules
import numpy as np
from scipy import sparse
import nxsdk.api.n2a as nx
import matplotlib
import matplotlib.pyplot as plt
from time import time
from copy import deepcopy
# -
# Overwrite default parameters (pelenet/parameters/ and pelenet/experiments/random.py)
parameters = {
# Experiment
'seed': 2, # Random seed
'trials': 20, #20, # Number of trials
'stepsPerTrial': 50, #40, #50, # Number of simulation steps for every trial
'isReset': True, # Activate reset after every trial
# Network
'reservoirExSize': 400, # Number of excitatory neurons
'reservoirConnPerNeuron': 40, #40, #35 # Number of connections per neuron
# Noise
'isNoise': True,
'onlyExcitatory': True, # Noisy input is only excitatory
'noiseMaxWeight': 120, #120,
# Neurons
'refractoryDelay': 2, # Refactory period
'voltageTau': 100, #100 # Voltage time constant
'currentTau': 6, #5 # Current time constant
'thresholdMant': 1200, # Spiking threshold for membrane potential
# Plasticity
'isLearningRule': True,
'learningEpoch': 4,
'learningRule': '2^-4*x1*y1*x0*y0 - 2^-3*y0*w*w',
# Input
'inputIsAlternating': True,
'inputAlternatingNum': 1,
'inputOffset': 10,
'inputGenSpikeProb': 0.8, # Probability of spikes for the spike generators
'inputNumTargetNeurons': 40, # Number of neurons targeted by the spike generators
'inputSteps': 40, #20, #30, # Number of steps the input is active
# Probes
'isExSpikeProbe': True, # Probe excitatory spikes
'isInSpikeProbe': True, # Probe inhibitory spikes
'isWeightProbe': True # Probe weight matrix at the end of the simulation
}
# Initilizes the experiment and utils
exp = AssemblyExperiment(name='assemblies', parameters=parameters)
utils = Utils.instance()
# +
# Build network
exp.build()
# Get spectral radius
utils.getSpectralRadius(exp.net.initialWeights)
# +
#exp.net.plot.initialExWeightDistribution()
# +
#exp.net.noiseWeights.toarray()[:20,:20]
# -
pars = exp.net.p
# Run network
exp.run()
# Plot spike trains of the excitatory (red) and inhibitory (blue) neurons
exp.net.plot.reservoirSpikeTrain(figsize=(12,6))
exp.net.plot.initialExWeightDistribution(figsize=(6,4), xlim=(0,100))
# +
#exp.net.plot.trainedExWeightDistribution(figsize=(6,4), xlim=(0,100))
wgt = exp.net.trainedWeightsExex[-1].toarray().flatten()
nz = np.array(np.nonzero(wgt)[0])
plt.hist(wgt[nz], bins=np.arange(np.max(wgt[nz])))
plt.xlim((0,100))
pl = plt.show()
# -
# Weight matrix before learning
exp.net.plot.initialExWeightMatrix()
# Weight matrix after learning
exp.net.plot.trainedExWeightMatrix()
# +
size = exp.p.inputNumTargetNeurons
input_regions = exp.net.trainedWeightsExex[-1].toarray()[:2*size,:2*size]
inp_lt = input_regions[:size,:size]
inp_rt = input_regions[size:,:size]
inp_lb = input_regions[:size,size:]
inp_rb = input_regions[size:,size:]
print(np.mean(inp_lt), np.mean(inp_rt), np.mean(inp_lb), np.mean(inp_rb))
# -
# Sorted weight matrix after learning
expSupportMask = utils.getSupportWeightsMask(exp.net.trainedWeightsExex[-1])
exp.net.plot.weightsSortedBySupport(expSupportMask, exp.net.trainedWeightsExex[-1])
nCs = exp.net.p.inputNumTargetNeurons
nEx = exp.net.p.reservoirExSize
nC = exp.net.p.inputAlternatingNum
nCs, nEx, nC
# +
#weightsExEx = exp.net.trainedWeightsExex[-1]
#np.save('weightsExEx', weightsExEx)
# -
inputs = utils.getInputLetterList(exp.net.inputTrials)
supportMasks = utils.getSupportMasksForAllTrials(exp.net.initialWeights.exex, exp.net.trainedWeightsExex)
supportMasks.shape
np.array(supportMasks)[0][1].shape
supportShare = np.array([ np.sum(supportMasks[i], axis=1) for i in range(exp.p.trials+1)]).T
# +
cluster_mean = []
all_mean = []
cluster_mean.append(np.mean(exp.net.initialWeights.exex[:40,:40]))
all_mean.append(np.mean(exp.net.initialWeights.exex))
for i in range(exp.p.trials):
cluster_mean.append(np.mean(exp.net.trainedWeightsExex[i][:40,:40]))
all_mean.append(np.mean(exp.net.trainedWeightsExex[i]))
cluster_mean = np.array(cluster_mean)
all_mean = np.array(all_mean)
x = cluster_mean/all_mean
plt.plot(x)
# -
exp.net.plot.supportShare(supportMasks)
exp.net.plot.supportTurnover(supportMasks)
# New parameters
newParameters = {
**parameters,
'trials': 2*exp.p.trials,
'inputAlternatingNum': 2
}
# Initilizes a second experiment
exp2 = AssemblyExperiment(name='assemblies-2', parameters=newParameters)
# +
# Get weights from previous experiment
weights = deepcopy(exp.net.initialWeights)
weights.exex = exp.net.trainedWeightsExex[-1]
# Build experiment
exp2.build()
exp2.net.initialMasks = exp.net.initialMasks
exp2.net.initialWeights = weights
# -
# Run network
exp2.run()
exp2.net.plot.reservoirSpikeTrain(figsize=(12,6))
exp2.net.plot.reservoirRates(figsize=(10,3), fr=0, to=370)
# +
wgt = exp2.net.trainedWeightsExex[-1].toarray().flatten()
nz = np.array(np.nonzero(wgt)[0])
plt.hist(wgt[nz], bins=np.arange(np.max(wgt[nz])))
plt.xlim((0,100))
pl = plt.show()
# -
# Weight matrix after learning
exp2.net.plot.trainedExWeightMatrix()
# +
size = exp2.p.inputNumTargetNeurons
input_regions = exp2.net.trainedWeightsExex[-1].toarray()[:2*size,:2*size]
inp_lt = input_regions[:size,:size]
inp_rt = input_regions[size:,:size]
inp_lb = input_regions[:size,size:]
inp_rb = input_regions[size:,size:]
print(np.mean(inp_lt), np.mean(inp_rt), np.mean(inp_lb), np.mean(inp_rb))
# +
cluster1_mean = []
cluster2_mean = []
all_mean = []
cluster1_mean.append(np.mean(exp.net.initialWeights.exex[:size,:size]))
cluster2_mean.append(np.mean(exp.net.initialWeights.exex[size:2*size,size:2*size]))
all_mean.append(np.mean(exp.net.initialWeights.exex))
for i in range(exp.p.trials):
cluster1_mean.append(np.mean(exp2.net.trainedWeightsExex[i][:size,:size]))
cluster2_mean.append(np.mean(exp2.net.trainedWeightsExex[i][size:2*size,size:2*size]))
all_mean.append(np.mean(exp2.net.trainedWeightsExex[i]))
cluster1_mean = np.array(cluster1_mean)
cluster2_mean = np.array(cluster2_mean)
all_mean = np.array(all_mean)
c1 = cluster1_mean/all_mean
c2 = cluster2_mean/all_mean
plt.plot(c1, label='A')
plt.plot(c2, label='B')
plt.legend()
pl = plt.show()
| assemblies_thesis_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="W7rEsKyWcxmu"
# ##### Copyright 2019 The TF-Agents Authors.
#
# + cellView="form" colab={} colab_type="code" id="nQnmcm0oI1Q-"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="G6aOV15Wc4HP"
# ### Checkpointer and PolicySaver
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/agents/tutorials/10_checkpointer_policysaver_tutorial">
# <img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
# View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
# Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
# View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="M3HE5S3wsMEh"
# ## Introduction
#
# `tf_agents.utils.common.Checkpointer` is a utility to save/load the training state, policy state, and replay_buffer state to/from a local storage.
#
# `tf_agents.policies.policy_saver.PolicySaver` is a tool to save/load only the policy, and is lighter than `Checkpointer`. You can use `PolicySaver` to deploy the model as well without any knowledge of the code that created the policy.
#
# In this tutorial, we will use DQN to train a model, then use `Checkpointer` and `PolicySaver` to show how we can store and load the states and model in an interactive way. Note that we will use TF2.0's new saved_model tooling and format for `PolicySaver`.
#
# + [markdown] colab_type="text" id="vbTrDrX4dkP_"
# ## Setup
# + [markdown] colab_type="text" id="Opk_cVDYdgct"
# If you haven't installed the following dependencies, run:
# + colab={} colab_type="code" id="Jv668dKvZmka"
#@test {"skip": true}
# !sudo apt-get install -y xvfb ffmpeg
# !pip install 'gym==0.10.11'
# !pip install 'imageio==2.4.0'
# !pip install 'pyglet==1.3.2'
# !pip install 'xvfbwrapper==0.2.9'
# !pip install tf-agents
# + colab={} colab_type="code" id="bQMULMo1dCEn"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import imageio
import io
import matplotlib
import matplotlib.pyplot as plt
import os
import shutil
import tempfile
import tensorflow as tf
import zipfile
import IPython
try:
from google.colab import files
except ImportError:
files = None
from tf_agents.agents.dqn import dqn_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import q_network
from tf_agents.policies import policy_saver
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
tf.compat.v1.enable_v2_behavior()
tempdir = os.getenv("TEST_TMPDIR", tempfile.gettempdir())
# + colab={} colab_type="code" id="AwIqiLdDCX9Q"
#@test {"skip": true}
# Set up a virtual display for rendering OpenAI gym environments.
import xvfbwrapper
xvfbwrapper.Xvfb(1400, 900, 24).start()
# + [markdown] colab_type="text" id="AOv_kofIvWnW"
# ## DQN agent
# We are going to set up DQN agent, just like in the previous colab. The details are hidden by default as they are not core part of this colab, but you can click on 'SHOW CODE' to see the details.
# + [markdown] colab_type="text" id="cStmaxredFSW"
# ### Hyperparameters
# + cellView="both" colab={} colab_type="code" id="yxFs6QU0dGI_"
env_name = "CartPole-v1"
collect_steps_per_iteration = 100
replay_buffer_capacity = 100000
fc_layer_params = (100,)
batch_size = 64
learning_rate = 1e-3
log_interval = 5
num_eval_episodes = 10
eval_interval = 1000
# + [markdown] colab_type="text" id="w4GR7RDndIOR"
# ### Environment
# + colab={} colab_type="code" id="fZwK4d-bdI7Z"
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
# + [markdown] colab_type="text" id="0AvYRwfkeMvo"
# ### Agent
# + cellView="both" colab={} colab_type="code" id="cUrFl83ieOvV"
#@title
q_net = q_network.QNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
global_step = tf.compat.v1.train.get_or_create_global_step()
agent = dqn_agent.DqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
td_errors_loss_fn=common.element_wise_squared_loss,
train_step_counter=global_step)
agent.initialize()
# + [markdown] colab_type="text" id="p8ganoJhdsbn"
# ### Data Collection
# + cellView="both" colab={} colab_type="code" id="XiT1p78HdtSe"
#@title
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity)
collect_driver = dynamic_step_driver.DynamicStepDriver(
train_env,
agent.collect_policy,
observers=[replay_buffer.add_batch],
num_steps=collect_steps_per_iteration)
# Initial data collection
collect_driver.run()
# Dataset generates trajectories with shape [BxTx...] where
# T = n_step_update + 1.
dataset = replay_buffer.as_dataset(
num_parallel_calls=3, sample_batch_size=batch_size,
num_steps=2).prefetch(3)
iterator = iter(dataset)
# + [markdown] colab_type="text" id="8V8bojrKdupW"
# ### Train the agent
# + cellView="both" colab={} colab_type="code" id="-rDC3leXdvm_"
#@title
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
agent.train = common.function(agent.train)
def train_one_iteration():
# Collect a few steps using collect_policy and save to the replay buffer.
for _ in range(collect_steps_per_iteration):
collect_driver.run()
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = agent.train(experience)
iteration = agent.train_step_counter.numpy()
print ('iteration: {0} loss: {1}'.format(iteration, train_loss.loss))
# + [markdown] colab_type="text" id="vgqVaPnUeDAn"
# ### Video Generation
# + cellView="both" colab={} colab_type="code" id="ZY6w-fcieFDW"
#@title
def embed_gif(gif_buffer):
"""Embeds a gif file in the notebook."""
tag = '<img src="data:image/gif;base64,{0}"/>'.format(base64.b64encode(gif_buffer).decode())
return IPython.display.HTML(tag)
def run_episodes_and_create_video(policy, eval_tf_env, eval_py_env):
num_episodes = 3
frames = []
for _ in range(num_episodes):
time_step = eval_tf_env.reset()
frames.append(eval_py_env.render())
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = eval_tf_env.step(action_step.action)
frames.append(eval_py_env.render())
gif_file = io.BytesIO()
imageio.mimsave(gif_file, frames, format='gif', fps=60)
IPython.display.display(embed_gif(gif_file.getvalue()))
# + [markdown] colab_type="text" id="y-oA8VYJdFdj"
# ### Generate a video
# Check the performance of the policy by generating a video.
# + colab={} colab_type="code" id="FpmPLXWbdG70"
print ('global_step:')
print (global_step)
run_episodes_and_create_video(agent.policy, eval_env, eval_py_env)
# + [markdown] colab_type="text" id="7RPLExsxwnOm"
# ## Setup Checkpointer and PolicySaver
#
# Now we are ready to use Checkpointer and PolicySaver.
# + [markdown] colab_type="text" id="g-iyQJacfQqO"
# ### Checkpointer
#
# + colab={} colab_type="code" id="2DzCJZ-6YYbX"
checkpoint_dir = os.path.join(tempdir, 'checkpoint')
train_checkpointer = common.Checkpointer(
ckpt_dir=checkpoint_dir,
max_to_keep=1,
agent=agent,
policy=agent.policy,
replay_buffer=replay_buffer,
global_step=global_step
)
# + [markdown] colab_type="text" id="MKpWNZM4WE8d"
# ### Policy Saver
# + colab={} colab_type="code" id="8mDZ_YMUWEY9"
policy_dir = os.path.join(tempdir, 'policy')
tf_policy_saver = policy_saver.PolicySaver(agent.policy)
# + [markdown] colab_type="text" id="1OnANb1Idx8-"
# ### Train one iteration
# + colab={} colab_type="code" id="ql_D1iq8dl0X"
#@test {"skip": true}
print('Training one iteration....')
train_one_iteration()
# + [markdown] colab_type="text" id="eSChNSQPlySb"
# ### Save to checkpoint
# + colab={} colab_type="code" id="usDm_Wpsl0bu"
train_checkpointer.save(global_step)
# + [markdown] colab_type="text" id="gTQUrKgihuic"
# ### Restore checkpoint
#
# For this to work, the whole set of objects should be recreated the same way as when the checkpoint was created.
# + colab={} colab_type="code" id="l6l3EB-Yhwmz"
train_checkpointer.initialize_or_restore()
global_step = tf.compat.v1.train.get_global_step()
# + [markdown] colab_type="text" id="Nb8_MSE2XjRp"
# Also save policy and export to a location
# + colab={} colab_type="code" id="3xHz09WCWjwA"
tf_policy_saver.save(policy_dir)
# + [markdown] colab_type="text" id="Mz-xScbuh4Vo"
# The policy can be loaded without having any knowledge of what agent or network was used to create it. This makes deployment of the policy much easier.
#
# Load the saved policy and check how it performs
# + colab={} colab_type="code" id="J6T5KLTMh9ZB"
saved_policy = tf.compat.v2.saved_model.load(policy_dir)
run_episodes_and_create_video(saved_policy, eval_env, eval_py_env)
# + [markdown] colab_type="text" id="MpE0KKfqjc0c"
# ## Export and import
# The rest of the colab will help you export / import checkpointer and policy directories such that you can continue training at a later point and deploy the model without having to train again.
#
# Now you can go back to 'Train one iteration' and train a few more times such that you can understand the difference later on. Once you start to see slightly better results, continue below.
# + cellView="both" colab={} colab_type="code" id="fd5Cj7DVjfH4"
#@title Create zip file and upload zip file (double-click to see the code)
def create_zip_file(dirname, base_filename):
return shutil.make_archive(base_filename, 'zip', dirname)
def upload_and_unzip_file_to(dirname):
if files is None:
return
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
shutil.rmtree(dirname)
zip_files = zipfile.ZipFile(io.BytesIO(uploaded[fn]), 'r')
zip_files.extractall(dirname)
zip_files.close()
# + [markdown] colab_type="text" id="hgyy29doHCmL"
# Create a zipped file from the checkpoint directory.
# + colab={} colab_type="code" id="nhR8NeWzF4fe"
train_checkpointer.save(global_step)
checkpoint_zip_filename = create_zip_file(checkpoint_dir, os.path.join(tempdir, 'exported_cp'))
# + [markdown] colab_type="text" id="VGEpntTocd2u"
# Download the zip file.
# + colab={} colab_type="code" id="upFxb5k8b4MC"
#@test {"skip": true}
if files is not None:
files.download(checkpoint_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469
# + [markdown] colab_type="text" id="VRaZMrn5jLmE"
# After training for some time (10-15 times), download the checkpoint zip file,
# and go to "Runtime > Restart and run all" to reset the training,
# and come back to this cell. Now you can upload the downloaded zip file,
# and continue the training.
# + colab={} colab_type="code" id="kg-bKgMsF-H_"
#@test {"skip": true}
upload_and_unzip_file_to(checkpoint_dir)
train_checkpointer.initialize_or_restore()
global_step = tf.compat.v1.train.get_global_step()
# + [markdown] colab_type="text" id="uXrNax5Zk3vF"
# Once you have uploaded checkpoint directory, go back to 'Train one iteration' to continue training or go back to 'Generate a video' to check the performance of the loaded poliicy.
# + [markdown] colab_type="text" id="OAkvVZ-NeN2j"
# Alternatively, you can save the policy (model) and restore it.
# Unlike checkpointer, you cannot continue with the training, but you can still deploy the model. Note that the downloaded file is much smaller than that of the checkpointer.
# + colab={} colab_type="code" id="s7qMn6D8eiIA"
tf_policy_saver.save(policy_dir)
policy_zip_filename = create_zip_file(policy_dir, os.path.join(tempdir, 'exported_policy'))
# + colab={} colab_type="code" id="rrGvCEXwerJj"
#@test {"skip": true}
if files is not None:
files.download(policy_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469
# + [markdown] colab_type="text" id="DyC_O_gsgSi5"
# Upload the downloaded policy directory (exported_policy.zip) and check how the saved policy performs.
# + colab={} colab_type="code" id="bgWLimRlXy5z"
#@test {"skip": true}
upload_and_unzip_file_to(policy_dir)
saved_policy = tf.compat.v2.saved_model.load(policy_dir)
run_episodes_and_create_video(saved_policy, eval_env, eval_py_env)
# + [markdown] colab_type="text" id="HSehXThTm4af"
# ## SavedModelPyTFEagerPolicy
#
# If you don't want to use TF policy, then you can also use the saved_model directly with the Python env through the use of `py_tf_eager_policy.SavedModelPyTFEagerPolicy`.
#
# Note that this only works when eager mode is enabled.
# + colab={} colab_type="code" id="iUC5XuLf1jF7"
eager_py_policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(
policy_dir, eval_py_env.time_step_spec(), eval_py_env.action_spec())
# Note that we're passing eval_py_env not eval_env.
run_episodes_and_create_video(eager_py_policy, eval_py_env, eval_py_env)
| site/en-snapshot/agents/tutorials/10_checkpointer_policysaver_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Explore Numba - aka. Numpy for GPU
# ## Create and Run a Custom Python Function
# Note the slow execution time.
# +
import math
def hypot(x, y):
x = abs(x);
y = abs(y);
t = min(x, y);
x = max(x, y);
t = t / x;
return x * math.sqrt(1+t*t)
# +
# %%timeit
hypot(3.0, 4.0)
# -
# ## Create and Run a JIT'd Custom Python Function
# Note the faster execution time.
# +
from numba import jit
import math
@jit
def hypot_jit(x, y):
x = abs(x);
y = abs(y);
t = min(x, y);
x = max(x, y);
t = t / x;
return x * math.sqrt(1+t*t)
# +
# %%timeit
hypot_jit(3.0, 4.0)
# -
# ## Run the Underlying Custom Python Function
# Note the similar execution time.
# +
# %%timeit
hypot_jit.py_func(3.0, 4.0)
# -
# ## Run the Python Math Library Function
# Note the fast execution time. These are already compiled into C.
# +
# %%timeit
math.hypot(3.0, 4.0)
# -
# ## Inspect JIT'd Types and Code
# By default, python floats and ints are 64-bit. For CPUs, this is fine.
#
# For GPUs, you may want to reduce the precision to 32, 16, or even 8-bit.
#
# Use `np.astype(np.float32)` in numpy.
hypot_jit.inspect_types()
# ## Making `ufuncs`
#
# Numba has the ability to create compiled ufuncs. You implement a scalar function of all the inputs, and Numba will figure out the broadcast rules for you. Generating a ufunc that uses CUDA requires giving an explicit type signature and setting the `target` attribute:
# +
from numba import vectorize
@vectorize(['int64(int64, int64)'], target='cuda')
def add_ufunc(x, y):
return x + y
# -
a = 12
b = 17
b_col = 11
c = 1
print('a+b:\n', add_ufunc(a, b))
print()
print('b_col + c:\n', add_ufunc(b_col, c))
# ## Why is the GPU Slower Sometimes?
#
# This is to be expected because we have (deliberately) misused the GPU in several ways in this example:
#
# * **Our inputs are too small**: the GPU achieves performance through parallelism, operating on thousands of values at once. Our test inputs have only 4 and 16 integers, respectively. We need a much larger array to even keep the GPU busy.
# * **Our calculation is too simple**: Sending a calculation to the GPU involves quite a bit of overhead compared to calling a function on the CPU. If our calculation does not involve enough math operations (often called "arithmetic intensity"), then the GPU will spend most of its time waiting for data to move around.
# * **We copy the data to and from the GPU**: While including the copy time can be realistic for a single function, often we want to run several GPU operations in sequence. In those cases, it makes sense to send data to the GPU and keep it there until all of our processing is complete.
# * **Our data types are larger than necessary**: Our example uses `int64` when we probably don't need it. Scalar code using data types that are 32 and 64-bit run basically the same speed on the CPU, but 64-bit data types have a significant performance cost on the GPU. Basic arithmetic on 64-bit floats can be anywhere from 2x (Pascal-architecture Tesla) to 24x (Maxwell-architecture GeForce) slower than 32-bit floats. NumPy defaults to 64-bit data types when creating arrays, so it is important to set the `dtype` attribute or use the `ndarray.astype()` method to pick 32-bit types when you need them.
#
#
# Given the above, let's try an example that is faster on the GPU:
# +
import numpy as np
import math # Note that for the CUDA target, we need to use the scalar functions from the math module, not NumPy
from numba import vectorize
# This gets inlined at compile time
SQRT_2PI = np.float32((2*math.pi)**0.5)
@vectorize(['float32(float32, float32, float32)'], target='cuda')
# Probability Distribution Function
def gaussian_pdf(x, mean, sigma):
'''Compute the value of a Gaussian probability density function at x with given mean and sigma.'''
return math.exp(-0.5 * ((x - mean) / sigma)**2) / (sigma * SQRT_2PI)
# +
# Evaluate the Gaussian a million times!
x = np.random.uniform(-3, 3, size=1000000).astype(np.float32)
mean = np.float32(0.0)
sigma = np.float32(1.0)
# Quick test
gaussian_pdf(x[0], 0.0, 1.0)
# -
# ## Compare to SciPy
import scipy.stats # for definition of gaussian distribution
norm_pdf = scipy.stats.norm
# %timeit norm_pdf.pdf(x, loc=mean, scale=sigma)
# # CUDA with Numba
# `device=True` keeps the code on the GPU. A CPU-based kernel is not created.
# +
from numba import cuda
@cuda.jit(device=True)
def polar_to_cartesian(rho, theta):
x = rho * math.cos(theta)
y = rho * math.sin(theta)
return x, y # This is Python, so let's return a tuple
@vectorize(['float32(float32, float32, float32, float32)'], target='cuda')
def polar_distance(rho1, theta1, rho2, theta2):
x1, y1 = polar_to_cartesian(rho1, theta1)
x2, y2 = polar_to_cartesian(rho2, theta2)
return ((x1 - x2)**2 + (y1 - y2)**2)**0.5
# -
n = 1000000
rho1 = np.random.uniform(0.5, 1.5, size=n).astype(np.float32)
theta1 = np.random.uniform(-np.pi, np.pi, size=n).astype(np.float32)
rho2 = np.random.uniform(0.5, 1.5, size=n).astype(np.float32)
theta2 = np.random.uniform(-np.pi, np.pi, size=n).astype(np.float32)
polar_distance(rho1, theta1, rho2, theta2)
# ## Managing GPU Memory (Experimental)
#
# During the benchmarking in the previous notebook, we used NumPy arrays on the CPU as inputs and outputs. If you want to reduce the impact of host-to-device/device-to-host bandwidth, it is best to copy data to the GPU explicitly and leave it there to amortize the cost over multiple function calls. In addition, allocating device memory can be relatively slow, so allocating GPU arrays once and refilling them with data from the host can also be a performance improvement.
#
# Let's create our example addition ufunc again:
# +
from numba import vectorize
import numpy as np
@vectorize(['float32(float32, float32)'], target='cuda')
def add_ufunc(x, y):
return x + y
# -
n = 100000
x = np.arange(n).astype(np.float32)
y = 2 * x
# %timeit add_ufunc(x, y) # Baseline performance with host arrays
# The `numba.cuda` module includes a function that will copy host data to the GPU and return a CUDA device array:
# +
from numba import cuda
x_device = cuda.to_device(x)
y_device = cuda.to_device(y)
print(x_device)
print(x_device.shape)
print(x_device.dtype)
# -
# Device arrays can be passed to CUDA functions just like NumPy arrays, but without the copy overhead:
# %timeit add_ufunc(x_device, y_device)
# That's a big performance improvement already, but we are still allocating a device array for the output of the ufunc and copying it back to the host. We can create the output buffer with the `numba.cuda.device_array()` function:
# Similar to np.empty()
## Just allocating memory buffer - not initializing data
out_device = cuda.device_array(shape=(n,), dtype=np.float32) # does not initialize the contents, like np.empty()
# And then we can use a special `out` keyword argument to the ufunc to specify the output buffer:
# %timeit add_ufunc(x_device, y_device, out=out_device)
# Now that we have removed the device allocation and copy steps, the computation runs *much* faster than before. When we want to bring the device array back to the host memory, we can use the `copy_to_host()` method:
out_host = out_device.copy_to_host()
print(out_host[:10])
# ## CUDA in Python
# That's a lot more typing than our ufunc example, and it is much more limited: only works on 1D arrays, doesn't verify input sizes match, etc. Most of the function is spent figuring out how to turn the block and grid indices and dimensions into unique offsets into the input arrays. The pattern of computing a starting index and a stride is a common way to ensure that your grid size is independent of the input size. The striding will maximize bandwidth by ensuring that threads with consecuitive indices are accessing consecutive memory locations as much as possible. Thread indices beyond the length of the input (`x.shape[0]`, since `x` is a NumPy array) automatically skip over the for loop.
#
# Also note that we did not need to specify a type signature for the CUDA kernel. Unlike `@vectorize`, Numba can infer the type signature from the inputs automatically, and much more reliably.
#
# Let's create and run a function on some data:
# +
from numba import cuda
@cuda.jit
def add_kernel(x, y, out):
tx = cuda.threadIdx.x # this is the unique thread ID within a 1D block
ty = cuda.blockIdx.x # Similarly, this is the unique block ID within the 1D grid
block_size = cuda.blockDim.x # number of threads per block
grid_size = cuda.gridDim.x # number of blocks in the grid
start = tx + ty * block_size
stride = block_size * grid_size
# assuming x and y inputs are same length
for i in range(start, x.shape[0], stride):
out[i] = x[i] + y[i]
# +
import numpy as np
n = 100000
x = np.arange(n).astype(np.float32)
y = 2 * x
out = np.empty_like(x)
threads_per_block = 128
blocks_per_grid = 30
add_kernel[blocks_per_grid, threads_per_block](x, y, out)
print(out[:10])
# -
# The unusual syntax for calling the kernel function is designed to mimic the CUDA Runtime API in C, where the above call would look like:
# ```
# add_kernel<<<blocks_per_grid, threads_per_block>>>(x, y, out)
# ```
# The arguments within the square brackets define the size and shape of the thread grid, and the arguments with parentheses correspond to the kernel function arguments.
#
# Note that, unlike the ufunc, the arguments are passed to the kernel as full NumPy arrays. The kernel can access any element in the array it wants, regardless of its position in the thread grid. This is why CUDA kernels are significantly more powerful that ufuncs. (But with great power, comes a greater amount of typing...)
#
# Numba includes [several helper functions](http://numba.pydata.org/numba-doc/dev/cuda/kernels.html#absolute-positions) to simplify the thread offset calculations above. You can write the function much more simply as:
@cuda.jit
def add_kernel(x, y, out):
start = cuda.grid(1) # 1 = one dimensional thread grid, returns a single value
stride = cuda.gridsize(1) # ditto
# assuming x and y inputs are same length
for i in range(start, x.shape[0], stride):
out[i] = x[i] + y[i]
# As before, using NumPy arrays forces Numba to allocate GPU memory, copy the arguments to the GPU, run the kernel, then copy the argument arrays back to the host. This not very efficient, so you will often want to allocate device arrays:
x_device = cuda.to_device(x)
y_device = cuda.to_device(y)
out_device = cuda.device_array_like(x)
# %timeit add_kernel[blocks_per_grid, threads_per_block](x, y, out)
# %timeit add_kernel[blocks_per_grid, threads_per_block](x_device, y_device, out_device); out_device.copy_to_host()
# ## Kernel Synchronization
#
# *One extremely important caveat should be mentioned here*: CUDA kernel execution is designed to be asynchronous with respect to the host program. This means that the kernel launch (`add_kernel[blocks_per_grid, threads_per_block](x_device, y_device, out_device)`) returns immediately, allowing the CPU to continue executing while the GPU works in the background. Only host<->device memory copies or an explicit synchronization call will force the CPU to wait until previously queued CUDA kernels are complete.
#
# When you pass host NumPy arrays to a CUDA kernel, Numba has to synchronize on your behalf, but if you pass device arrays, processing will continue. If you launch multiple kernels in sequence without any synchronization in between, they will be queued up to run sequentially by the driver, which is usually what you want. If you want to run multiple kernels on the GPU in parallel (sometimes a good idea, but beware of race conditions!), take a look at [CUDA streams](http://numba.pydata.org/numba-doc/dev/cuda-reference/host.html?highlight=synchronize#stream-management).
#
# Here's some sample timings (using `%time`, which only runs the statement once to ensure our measurement isn't affected by the finite depth of the CUDA kernel queue):
# CPU input/output arrays, implied synchronization for memory copies
# %time add_kernel[blocks_per_grid, threads_per_block](x, y, out)
# GPU input/output arrays, no synchronization (but force sync before and after)
cuda.synchronize()
# %time add_kernel[blocks_per_grid, threads_per_block](x_device, y_device, out_device)
cuda.synchronize()
# GPU input/output arrays, include explicit synchronization in timing
cuda.synchronize()
# %time add_kernel[blocks_per_grid, threads_per_block](x_device, y_device, out_device); cuda.synchronize()
# **Always be sure to synchronize with the GPU when benchmarking CUDA kernels!**
# ## Atomic Operations and Avoiding Race Conditions
#
# CUDA, like many general purpose parallel execution frameworks, makes it possible to have race condtions in your code. A race condition in CUDA arises when threads read or write a memory location that might be modified by another independent thread. Generally speaking, you need to worry about:
#
# * read-after-write hazards: One thread is reading a memory location at the same time another thread might be writing to it.
# * write-after-write hazards: Two threads are writing to the same memory location, and only one write will be visible when the kernel is complete.
#
# A common strategy to avoid both of these hazards is to organize your CUDA kernel algorithm such that each thread has exclusive responsibility for unique subsets of output array elements, and/or to never use the same array for both input and output in a single kernel call. (Iterative algorithms can use a double-buffering strategy if needed, and switch input and output arrays on each iteration.)
#
# However, there are many cases where different threads need to combine results. Consider something very simple, like: "every thread increments a global counter." Implementing this in your kernel requires each thread to:
#
# 1. Read the current value of a global counter.
# 2. Compute `counter + 1`.
# 3. Write that value back to global memory.
#
# However, there is no guarantee that another thread has not changed the global counter between steps 1 and 3. To resolve this problem, CUDA provides "atomic operations" which will read, modify and update a memory location in one, indivisible step. Numba supports several of these functions, [described here](http://numba.pydata.org/numba-doc/dev/cuda/intrinsics.html#supported-atomic-operations).
#
# Let's make our thread counter kernel:
# +
@cuda.jit
def thread_counter_race_condition(global_counter):
global_counter[0] += 1 # This is bad
@cuda.jit
def thread_counter_safe(global_counter):
cuda.atomic.add(global_counter, 0, 1) # Safely add 1 to offset 0 in global_counter array
# +
# This gets the wrong answer
global_counter = cuda.to_device(np.array([0], dtype=np.int32))
thread_counter_race_condition[64, 64](global_counter)
print('Should be %d:' % (64*64), global_counter.copy_to_host())
# +
# This works correctly
global_counter = cuda.to_device(np.array([0], dtype=np.int32))
thread_counter_safe[64, 64](global_counter)
print('Should be %d:' % (64*64), global_counter.copy_to_host())
# -
# ## CUDA Memcheck
#
# Another common error occurs when a CUDA kernel has an invalid memory access, typically caused by running off the end of an array. The full CUDA toolkit from NVIDIA (not the `cudatoolkit` conda package) contain a utility called `cuda-memcheck` that can check for a wide range of memory access mistakes in CUDA code.
#
# Let's debug the following code:
# Note the `debug=True` flag
# + language="bash"
#
# cat /root/src/main/python/numba/histogram.py
# + language="bash"
#
# cuda-memcheck python /root/src/main/python/numba/histogram.py
# -
# ## Shared Memory
#
# We briefly mention in notebook #4 that the CUDA programming model organizes threads into a two-layer structure. A grid is composed of many blocks, which are composed of many threads. Threads within the same block can communicate much more easily than threads in different blocks. The main mechanism for this communication is *shared memory*. Shared memory is discussed extensively in the CUDA C Programming Guide, as well as many other books on CUDA programming. We will only describe it very briefly here, and focus mainly on the Python syntax for using it.
#
# Shared memory is a section of memory that is visible at the block level. Different blocks cannot see each other's shared memory, and all the threads within a block see the same shared memory. It does not persist after a CUDA kernel finishes executing. Shared memory is scarce hardware resource, so should be used sparingly or side effects such as lower performance or even kernel launch failure (if you exceed the hardware limit of 48 kB per block) will occur.
#
# Shared memory is good for several things:
# * caching of lookup tables that will be randomly accessed
# * buffering output from threads so it can be coalesced before writing it back to device memory.
# * staging data for scatter/gather operations within a block
#
# As an example of the power of shared memory, let's write a transpose kernel that takes a 2D array in row-major order and puts it in column-major order. (This is based on <NAME>' blog post at: https://devblogs.nvidia.com/parallelforall/efficient-matrix-transpose-cuda-cc/)
#
# First, let's do the naive approach where we let each thread read and write individual elements independently:
# +
TILE_DIM = 32
BLOCK_ROWS = 8
@cuda.jit
def transpose(a_in, a_out):
x = cuda.blockIdx.x * TILE_DIM + cuda.threadIdx.x
y = cuda.blockIdx.y * TILE_DIM + cuda.threadIdx.y
for j in range(0, TILE_DIM, BLOCK_ROWS):
a_out[x, y + j] = a_in[y + j, x]
# +
size = 1024
a_in = cuda.to_device(np.arange(size*size, dtype=np.int32).reshape((size, size)))
a_out = cuda.device_array_like(a_in)
print(a_in.copy_to_host())
# -
grid_shape = (int(size/TILE_DIM), int(size/TILE_DIM))
# %timeit transpose[grid_shape,(TILE_DIM, BLOCK_ROWS)](a_in, a_out); cuda.synchronize()
print(a_out.copy_to_host())
# Now let's use shared memory to copy a 32x32 tile at a time. We'll use a global value for the tile size so it will be known act compile time:
# +
import numba.types
TILE_DIM_PADDED = TILE_DIM + 1 # Read Mark Harris' blog post to find out why this improves performance!
@cuda.jit
def tile_transpose(a_in, a_out):
# THIS CODE ASSUMES IT IS RUNNING WITH A BLOCK DIMENSION OF (TILE_SIZE x TILE_SIZE)
# AND INPUT IS A MULTIPLE OF TILE_SIZE DIMENSIONS
tile = cuda.shared.array((TILE_DIM, TILE_DIM_PADDED), numba.types.int32)
x = cuda.blockIdx.x * TILE_DIM + cuda.threadIdx.x
y = cuda.blockIdx.y * TILE_DIM + cuda.threadIdx.y
for j in range(0, TILE_DIM, BLOCK_ROWS):
tile[cuda.threadIdx.y + j, cuda.threadIdx.x] = a_in[y + j, x] # transpose tile into shared memory
cuda.syncthreads() # wait for all threads in the block to finish updating shared memory
#Compute transposed offsets
x = cuda.blockIdx.y * TILE_DIM + cuda.threadIdx.x
y = cuda.blockIdx.x * TILE_DIM + cuda.threadIdx.y
for j in range(0, TILE_DIM, BLOCK_ROWS):
a_out[y + j, x] = tile[cuda.threadIdx.x, cuda.threadIdx.y + j];
# +
a_out = cuda.device_array_like(a_in) # replace with new array
# %timeit tile_transpose[grid_shape,(TILE_DIM, BLOCK_ROWS)](a_in, a_out); cuda.synchronize()
print(a_out.copy_to_host())
# -
# That's a 30% speed up!
| jupyterhub/notebooks/gpu/01b_Explore_Numba.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import brickschema
import datetime
from relaxation_graphs.fast_relaxation_graph import *
from evaluation.similarity_score import *
import uuid
import pickle
import time
from rdflib import URIRef, Literal, BNode
from queries.mortar import queries as q1
# %matplotlib inline
# +
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 32}
matplotlib.rc('font', **font)
# -
df =pd.read_csv('data_figure6.csv', index_col=0)
df
import seaborn as sns
# df = sns.load_dataset('iris')
sns.set(rc={'axes.facecolor':'#F8F8F8'}, font_scale=1.8)
plt.Figure()
plt.rcParams['figure.figsize'] = [12, 6]
linewidth=2
markersize=10
plt.grid(axis='y')
plt.plot(df.Q0, '-', label='Q0', marker='.', linewidth=linewidth, markersize=markersize)
plt.plot(df.Q1, '--', label='Q1', linewidth=linewidth, markersize=markersize)
plt.plot(df.Q2, '-.', label='Q2', linewidth=linewidth, markersize=markersize)
plt.plot(df.Q3, marker='^', label='Q3', linewidth=linewidth, markersize=markersize)
plt.plot(df.Q4, marker='v', label='Q4', linewidth=linewidth, markersize=markersize)
plt.plot(df.Q5, marker='H', label='Q5', linewidth=linewidth, markersize=markersize)
plt.plot(df.Q6, marker='*', label='Q6', linewidth=linewidth, markersize=markersize)
plt.legend()
plt.xlabel("Relaxation Level")
plt.ylabel("Number of buildings qualified\n(cumulative)")
plt.savefig('qualified_building_level.png')
# +
useful_queries = [
"""SELECT ?sensor WHERE {
?sensor rdf:type brick:Zone_Air_Temperature_Sensor .
?sensor brick:isPointOf ?equip
}""",
"""SELECT ?sp WHERE {
?sp rdf:type brick:Zone_Air_Temperature_Setpoint .
?sp brick:isPointOf ?equip
}""",
"""SELECT ?sensor ?sp ?equip WHERE {
?sensor rdf:type brick:Air_Flow_Sensor .
?sp rdf:type brick:Air_Flow_Setpoint .
?sensor brick:isPointOf ?equip .
?sp brick:isPointOf ?equip
}""",
"""SELECT * WHERE {
?equip rdf:type brick:VAV .
?equip brick:hasPoint ?air_flow .
?air_flow rdf:type brick:Supply_Air_Flow_Sensor
}""",
"""SELECT ?vav WHERE {
?vav rdf:type brick:VAV
}""",
"""SELECT ?floor ?room WHERE {
?floor rdf:type brick:Floor .
?room rdf:type brick:Room .
?room brick:isPartOf+ ?floor .
}""",
"""SELECT * WHERE {
?oat_damper a brick:Outside_Damper .
?pos a brick:Damper_Position_Command .
?oat_damper brick:hasPoint ?pos .
?oat a brick:Outside_Air_Temperature_Sensor
}""",
"""SELECT * WHERE {
?equip rdf:type brick:VAV .
?equip brick:isFedBy ?ahu .
?ahu brick:hasPoint ?upstream_ta .
?equip brick:hasPoint ?dnstream_ta .
?upstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .
?dnstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .
?equip brick:hasPoint ?vlv .
?vlv rdf:type brick:Valve_Command
}"""
]
# +
files = os.listdir('Data - Brick models/')
brick_models = []
with open("brick_models.pkl", 'rb') as fp:
brick_models = pickle.load(fp)
# -
| plot_figure6_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
script_dir = os.getcwd()
root_dir = f"{script_dir}/../../"
sys.path.append(os.path.join(root_dir, "dpc"))
import numpy as np
import scipy.io
import imageio
import matplotlib.pyplot as plt
# %matplotlib inline
import open3d
from open3d import JVisualizer
from util.system import setup_environment
from util.euler import quaternion2euler
from util.image import preprocess_input_image
from render.render_point_cloud import render_point_cloud
# #!/usr/bin/env python
import pdb
import time
import torch
from models import model_pc_to as model_pc
from run.ShapeRecords import ShapeRecords
from util.app_config import config as app_config
from util.system import setup_environment
from util.fs import mkdir_if_missing
def get_smooth_sigma(cfg, global_step):
num_steps = cfg.max_number_of_steps
diff = (cfg.pc_relative_sigma_end - cfg.pc_relative_sigma)
sigma_rel = cfg.pc_relative_sigma + global_step / num_steps * diff
return sigma_rel
def gauss_kernel_1d(l, sig):
"""
creates gaussian kernel with side length l and a sigma of sig
"""
xx = torch.arange(-l // 2 + 1., l // 2 + 1)
kernel = torch.exp(-xx**2 / (2. * sig**2))
return kernel / kernel.sum()
def separable_kernels(kernel):
size = kernel.shape[0]
k1 = kernel.reshape((1, 1, 1, 1, size))
k2 = kernel.reshape((1, 1, 1, size, 1))
k3 = kernel.reshape((1, 1, size, 1, 1))
return [k1, k2, k3]
def smoothing_kernel(cfg, sigma):
fsz = cfg.pc_gauss_kernel_size
kernel_1d = gauss_kernel_1d(fsz, sigma)
if cfg.vox_size_z != -1:
vox_size_z = cfg.vox_size_z
vox_size = cfg.vox_size
ratio = vox_size_z / vox_size
sigma_z = sigma * ratio
fsz_z = int(np.floor(fsz * ratio))
if fsz_z % 2 == 0:
fsz_z += 1
kernel_1d_z = gauss_kernel_1d(fsz_z, sigma_z)
k1 = kernel_1d.reshape((1, 1, 1, 1, fsz))
k2 = kernel_1d.reshape((1, 1, 1, fsz, 1))
k3 = kernel_1d_z.reshape((1, 1, fsz, 1, 1))
kernel = [k1, k2, k3]
else:
if cfg.pc_separable_gauss_filter:
kernel = separable_kernels(kernel_1d)
return kernel
# -
import sys
sys.path.append('../../')
from dpc.util.point_cloud_to import pointcloud_project_fast
# +
global_step=0
cfg = app_config
setup_environment(cfg)
sigma_rel = get_smooth_sigma(cfg, global_step)
_sigma_rel = sigma_rel
_gauss_sigma = sigma_rel / cfg.vox_size
_gauss_kernel = smoothing_kernel(cfg, sigma_rel)
device = 'cpu'
train_dir = cfg.checkpoint_dir
mkdir_if_missing(train_dir)
split_name = "train"
dataset_folder = cfg.inp_dir
dataset = ShapeRecords(dataset_folder, cfg, 'test')
dataset_loader = torch.utils.data.DataLoader(dataset,
batch_size=8, shuffle=False,
num_workers=8,drop_last=True)
log_dir = '../../dpc/run/model_run_data/'
model = model_pc.ModelPointCloud(cfg)
global_step = 100000
if global_step>0:
checkpoint_path = os.path.join(log_dir,'model.ckpt_{}.pth'.format(global_step))
print("Loading from path:",checkpoint_path)
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
global_step_val = checkpoint['global_step']
model.load_state_dict(checkpoint['model_state_dict'])
else:
global_step_val = global_step
model = model.to(device)
for i, train_data in enumerate(dataset_loader, 0):
for k in train_data.keys():
try:
train_data[k] = train_data[k].to(device)
except AttributeError:
pass
inputs = model.preprocess(train_data, cfg.step_size)
outputs = model(inputs, global_step_val, is_training=False, run_projection=True)
break
# -
inputs['images'].shape
# +
# select an input image from the validation set (splits defined in data/splits)
# the dataset has 5 different viewpoints for the same model
img_idx_1 = 4
img_idx_2 = 8
input_image_np_1 = inputs['images'].detach().cpu().numpy()[img_idx_1].transpose(1,2,0)
input_image_np_2 = inputs['images'].detach().cpu().numpy()[img_idx_2].transpose(1,2,0)
input_image_np_1.shape
# show input image
plt.figure()
plt.subplot(121)
plt.imshow(input_image_np_1)
plt.subplot(1,2,2)
plt.imshow(input_image_np_2)
plt.show()
# -
outputs['poses'].shape
# +
# image 1
all_points = outputs['all_points']#[img_idx_1].unsqueeze(0).repeat(128,1,1)
all_rgb = outputs['all_rgb']
camera_pose = outputs['poses']#[img_idx_1].unsqueeze(0).repeat(128,1)
predicted_translation = outputs["predicted_translation"]
proj_out = pointcloud_project_fast(cfg, all_points, camera_pose, predicted_translation,
all_rgb, _gauss_kernel,
scaling_factor=outputs['all_scaling_factors'],
focal_length=outputs['all_focal_length'])
proj = proj_out["proj"]
# -
all_points.shape
projs = torch.squeeze(proj.reshape(32,4,64,64,1)).detach().cpu().numpy()
proj_1 = projs[img_idx_1]
proj_2 = projs[img_idx_2]
plt.imshow(torch.squeeze(inputs['masks'])[img_idx_1].cpu().numpy())
plt.imshow(proj_1[3])
plt.imshow(proj_2[0])
def scale(scale_1 = 0.0,scale_2 = 0.2, sh='i'):
mixed_points = outputs['all_points'][img_idx_2*4]
def mix_points(points_1, points_2, scale_1, scale_2, center_1, center_2):
points_1 = points_1*scale_1 + center_1
points_2 = points_2*scale_2 + center_2
return torch.cat((points_1,points_2),dim=0)
points_1 = outputs['all_points'][img_idx_1*4]
points_2 = outputs['all_points'][img_idx_2*4]
center_1 = torch.from_numpy(np.array([0, 0.35, 0.35]).reshape(1,3))
center_2 = torch.from_numpy(np.array([0, -0.35, -0.35]).reshape(1,3))
mixed_points = mix_points(points_1, points_2, scale_1, scale_2, center_1, center_2)
all_points = mixed_points.unsqueeze(0).repeat(128,1,1)
all_rgb = outputs['all_rgb']
camera_pose = outputs['poses'][img_idx_1*4].unsqueeze(0).repeat(128,1)
scaling_factors = outputs['all_scaling_factors'][img_idx_1*4].unsqueeze(0).repeat(128,1)
predicted_translation = outputs["predicted_translation"]
proj_out = pointcloud_project_fast(cfg, all_points, camera_pose, predicted_translation,
all_rgb, _gauss_kernel,
scaling_factor=scaling_factors,
focal_length=outputs['all_focal_length'])
proj = proj_out["proj"]
projs = torch.squeeze(proj.reshape(32,4,64,64,1)).detach().cpu().numpy()
plt.imshow(projs[1][1])
plt.savefig('figs/s'+sh+'.png')
i=0
for scal_ in range(1,8):
scale(0.7, scal_/10.,str(i))
i+=1
for scal_ in range(7,0,-1):
scale(scal_/10.,0.7,str(i))
i+=1
def trans(trans_x1 = 0.35, trans_y1=0.35, trans_x2 = -0.35, trans_y2 =-0.35, sh='i'):
mixed_points = outputs['all_points'][img_idx_2*4]
def mix_points(points_1, points_2, scale_1, scale_2, center_1, center_2):
points_1 = points_1*scale_1 + center_1
points_2 = points_2*scale_2 + center_2
return torch.cat((points_1,points_2),dim=0)
points_1 = outputs['all_points'][img_idx_1*4]
points_2 = outputs['all_points'][img_idx_2*4]
center_1 = torch.from_numpy(np.array([0, trans_x1, trans_y1]).reshape(1,3))
center_2 = torch.from_numpy(np.array([0, trans_x2, trans_y2]).reshape(1,3))
scale_1 = 0.5
scale_2 = 0.5
mixed_points = mix_points(points_1, points_2, scale_1, scale_2, center_1, center_2)
all_points = mixed_points.unsqueeze(0).repeat(128,1,1)
all_rgb = outputs['all_rgb']
camera_pose = outputs['poses'][img_idx_1*4].unsqueeze(0).repeat(128,1)
scaling_factors = outputs['all_scaling_factors'][img_idx_1*4].unsqueeze(0).repeat(128,1)
predicted_translation = outputs["predicted_translation"]
proj_out = pointcloud_project_fast(cfg, all_points, camera_pose, predicted_translation,
all_rgb, _gauss_kernel,
scaling_factor=scaling_factors,
focal_length=outputs['all_focal_length'])
proj = proj_out["proj"]
projs = torch.squeeze(proj.reshape(32,4,64,64,1)).detach().cpu().numpy()
plt.imshow(projs[1][1])
plt.savefig('figs/s'+sh+'.png')
# +
i=0
ls = np.linspace(-0.35,0.35,10)
for j in range(ls.shape[0]):
trans1_ = ls[j]
trans(0.35, trans1_, 0.35, -0.35,str(i))
i+=1
for j in range(ls.shape[0]):
trans2_ = ls[j]
trans(0.35, 0.35, -trans2_, -0.35,str(i))
i+=1
# -
| experiments/chair_unsupervised/.ipynb_checkpoints/visualise-checkpoint2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# df['Currencies'].apply(', '.join)
# -
# Now that the data is cleaned and we have a backup of our df let's explore
from bokeh.plotting import figure, output_notebook, show
output_notebook()
from bokeh.models import ColumnDataSource
from bokeh.models.tools import HoverTool
from bokeh.transform import factor_cmap
from bokeh.palettes import Spectral5, Spectral3, inferno, viridis, Category20
source = ColumnDataSource(df)
types = df['Currencies'].unique().tolist()
color_map = factor_cmap(field_name='Currencies', palette=viridis(18), factors=types)
# +
p = figure(x_axis_type='datetime')
p.circle(x='Date', y='positive', source=source, size=10, color=color_map)
# p.title.text = 'Pokemon Attack vs Speed'
# p.xaxis.axis_label = 'Attacking Stats'
# p.yaxis.axis_label = 'Speed Stats'
hover = HoverTool()
hover.tooltips=[
('Positive', '@positive'),
('Negative', '@negative'),
('Important', '@{important}'),
('Title', '@Title'),
]
p.add_tools(hover)
show(p)
# -
attribs = df.groupby('Currencies')['positive'].mean()
df = pd.concat([df.drop(['Currencies'], axis=1), df['Currencies'].apply(pd.Series)], axis=1).fillna(0)
df.describe()
| jupyter/Scratchpad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example of OmnisciDB UDTF: table column normalization
#
# The aim of this notebook is to demonstrate how to implement with a help of [RBC](https://github.com/xnd-project/rbc/) tool a OmnisciDB User-Defined Table Function (UDTF) in Python that normalizes a table column using the following formula:
#
# ```
# normalize(X) = (X - mean(X)) / std(X)
# ```
#
#
# ## Connecting to OmnisciDB server
#
# First, let us establish a connection to the OmnisciDB server using the tools from the RBC project:
import warnings; warnings.filterwarnings('ignore')
# NBVAL_IGNORE_OUTPUT
from rbc.omniscidb import RemoteOmnisci
omnisci = RemoteOmnisci(host='127.0.0.1', port=6274)
print(f'Connecting to OmnisciDB version {omnisci.version}, cuda={omnisci.has_cuda}')
assert omnisci.version[:3] >= (5, 4, 0), omnisci.version
assert not omnisci.has_cuda
# This notebook requires CUDA-disabled OmnisciDB version 5.4 or newer. If the above fails or one is connected to older version of OmnisciDB, please resolve the issue before continuing.
# ## Test data
#
# Next, let's create a test data that will be a sequence of 500 random numbers drawn from normal distribution with average value 10 and standard deviation 2:
from matplotlib import pyplot as plt
import numpy as np
np.random.seed(0)
from numpy.random import randn
SQL=lambda query: np.array(list(omnisci.sql_execute(query)[1]))
SQL('DROP TABLE IF EXISTS test_data');
SQL('CREATE TABLE IF NOT EXISTS test_data (X DOUBLE)')
omnisci.load_table_columnar('test_data', X=np.random.randn(500) * 2 + 10)
# Let's verify that the created table `test_data` contains the expected data by retriving the data and using numpy methods for analyzing it locally:
X = SQL('SELECT X FROM test_data') # X will be a ndarray with shape (500, 1)
X = X[:, 0] # normalize X to 1-D array
print(np.mean(X), np.std(X, ddof=1)) # print the mean and unbiased std of X
plt.plot(X);
# # Necessity of UDF/UDTF for analyzing large data sets
#
# For very large data sets retrieving the data set to a local system for analysis may be impractical because of network bandwidth or the local system may not have enough RAM to hold the data. In such situations it becomes necessary to implement analyzing tools locally but so that the tools are executed in the server system that holds the data.
#
# OmnisciDB server v 5.4 implements support for users to define UDFs and UDTFs in runtime. UDFs can be applied to table data row-by-row, hence UDFs are data item-wise functions and UDFs return a scalar value. UDTFs, on the other hand, can be applied to table data columns and UDTFs return a table. Hence we call UDTFs also as table functions.
#
# The UDFs and table functions can be defined as Python functions using the [RBC](https://github.com/xnd-project/rbc/) tool that exploits [Numba](https://numba.pydata.org/) to convert Python functions to [LLVM](https://en.wikipedia.org/wiki/LLVM) IR strings and that are registered in the OmnisciDB server as User-Defined SQL functions so that the LLVM IR will be compiled into the SQL queries of the OmnisciDB server.
#
# ## Analyze data remotely using OmnisciDB user-defined functions
#
# ### Unbiased standard deviation
#
# As an example of UDTF, let's implement a standard deviation function in Python using Welford's online algorithm:
# +
def std_welford(x):
A = 0.0 # running mean value
Q = 0.0 # running var value
for i in range(len(x)):
xi = x[i]
A1 = A + (xi - A) / (i + 1)
Q = Q + (xi - A) * (xi - A1)
A = A1
return pow(Q / (len(x) - 1), 0.5)
# Compare the results of std_welford and numpy.std function:
print(f'std_welford(X), numpy.std(X, ddof=1) -> {std_welford(X), np.std(X, ddof=1)}')
# -
# The `std_welford` takes a sequence `x` as input and returns a `std` as scalar value. To run this function on the data stored in OmnisciDB server, we create an OmnisciDB table function as follows:
# +
import numba
numba_std_welford = numba.njit(std_welford) # register for numba
# register a UDTF as a new SQL function std in OmnisciDB server
@omnisci('int32(Column<double>, int32|sizer=RowMultiplier, OutputColumn<double>)')
def unbiased_std(x, m, out):
out[0] = numba_std_welford(x) # store the computed std value in UDTFs output argument
return 1 # return the size of output column
# -
# Notice that table functions are annotated differently from UDFs. The following applies to all table functions:
#
# - A `omnisci`-decorated function is considered a UDTF when it's signature contains `Column` and `OutputColumn` arguments. Otherwise, the function defines a UDF.
#
# - A table function input column arguments must have type `Column<row-type>` where `row-type` is a type specification for column entries. The currently supported row types are `float`, `double`, `int8`, `int16`, `int32`, `int64`, and `bool`. All column input arguments have equal sizes. The input column arguments must be specified in SQL queries via `CURSOR` construction.
#
# - A table function must have `sizer` argument that has type `int64` and is bar-annotated with `sizer=<sizer-type>` where currently supported `sizer-type` is `RowMultiplier`. The value of `sizer` defines the pre-allocated memory size of output arguments, see below. The sizer argument must be specified in SQL queries using a literal integer value.
#
# - A table function output column arguments must have type `OutputColumn<row-type>`. The output column arguments use pre-allocated memory that size is equal to `<sizer value> * <size of input columns>`.
#
# - A table function must return the actual size of output column arguments as `int32` value that will be used for reallocating the memory of output column arguments. Table functions are called in SQL queries using `TABLE` construction.
#
# For example, the table function `unbiased_std` takes three arguments: `x` is input column argument, `m` is sizer argument, and `out` is output column argument. For the sizes of column arguments, `len(out) == m * len(x)` holds. The table function returns `1` that indicates to OmnisciDB query engine that the size of output column `out` is equal to `1` and the engine will reallocate the `out` memory accordingly.
#
# Now let's compute the standard deviation in the server:
SQL('SELECT * FROM TABLE(unbiased_std(CURSOR(SELECT X FROM test_data), 1));')
# Notice that the retrived value matches with the result of locally computed standard deviation values.
#
# ### Normalize a table column
#
# To normalize a table column containing random data using a formula:
# ```
# normalize(X) = (X - mean(X))/std(X)
# ```
# we apply the following UDF to a table column:
@omnisci('double(double, double, double)')
def normalize_udf(x, mean, std):
return (x - mean)/std
X_udf = SQL('SELECT normalize_udf(X, 10, 2) FROM test_data')
print(f'mean(X_udf)={np.mean(X_udf):.3f}, std(X_udf)={np.std(X_udf, ddof=1):.3f}')
# As expected, the mean of a normalized column is close to 0 and the standard deviation is close to 1.
#
# In case the mean and standard deviation of the data column are unknown, we can compute the mean and std using the Welford algorithm and normalize the data in a single call to the following table function:
@omnisci('int32(Column<double>, int32|sizer=RowMultiplier, OutputColumn<double>)')
def normalize_udtf(x, m, out):
A = 0.0 # running mean value
Q = 0.0
for i in range(len(x)):
xi = x[i]
A1 = A + (xi - A) / (i + 1)
Q = Q + (xi - A) * (xi - A1)
A = A1
s = pow(Q / (len(x) - 1), 0.5) # unbiased std value
for i in range(len(x)):
out[i] = (x[i] - A) / s
return len(x) # return the size of output column
X_udtf = SQL('SELECT * FROM TABLE(normalize_udtf(CURSOR(SELECT X FROM test_data), 1))')
print(f'mean(X_udtf)={np.mean(X_udtf):.3f}, std(X_udtf)={np.std(X_udtf, ddof=1):.3f}')
# As expected, the mean and std values of normalized sequence are exactly 0 and 1, respectively.
# ### Final notes
#
# - Table functions currently work only on CUDA-disabled OmnisciDB server.
# - SQL aggregate functions and likely other operations do not work on table functions, for example:
#
try:
SQL('SELECT AVG(out0) FROM TABLE(normalize_udtf(CURSOR(SELECT X FROM test_data), 1))')
except Exception as msg:
print(msg)
# As a workaround, one can save the table function result in a auxiliary table and apply SQL operations to it's columns:
SQL('DROP TABLE IF EXISTS result_data');
SQL('CREATE TABLE IF NOT EXISTS result_data (Y DOUBLE)')
SQL('INSERT INTO result_data (Y) SELECT out0 FROM TABLE(normalize_udtf(CURSOR(SELECT X FROM test_data), 1))')
SQL('SELECT AVG(Y) FROM result_data')
| notebooks/rbc-omnisci-udtf-normalize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# !ls
# +
#DB Structure:
# name|||||test acc|||||train acc|||||f1m train|||||f1m test|||||dataset #////k-folds ff scikit
df = pd.DataFrame(columns=['ref','type' ,'train_acc', 'test_acc', 'f1m_train', 'f1m_test', 'kfolds', 'dataset', 'parameters','aug', 'train_time', 'imsize'])
test = {'train_acc':[0], 'test_acc':[0], 'f1m_train':[0], 'f1m_test':[0], 'kfolds':[0], 'dataset':[0],'parameters':[[0,1,2,2,3]] }
# -
test = pd.DataFrame.from_dict(test)
test
df
df.append(test)
# !pwd
df.to_pickle("/home/santiagovargas/dev/CancerImaging/CancerImaging/Algorithm_DB/***statistics_DONTDELETE.pkl")
df = pd.read_pickle("/home/santiagovargas/dev/CancerImaging/CancerImaging/Algorithm_DB/***statistics_DONTDELETE.pkl")
df = pd.read_pickle("/home/santiagovargas/dev/CancerImaging/CancerImaging/***statistics_DONTDELETE.pkl")
df[df.type.str.contains("svm")]
import os
os.getcwd()
| Data_Notebooks/006-Algorithm_Database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ettoday 網路爬蟲實作練習
#
#
# * 能夠利用 Request + BeatifulSour 撰寫爬蟲,並存放到合適的資料結構
#
# ## 作業目標
#
# 根據範例 ,完成以下問題:
#
# * ① 取出今天所有的發文
# * ② 如果想要依照類別分類,怎麼儲存會比較好?
# * ③ 哪一個類別的文章最多
#
#
#
# ### ① 取出今天所有的發文
# +
import requests
import time
from bs4 import BeautifulSoup
from selenium import webdriver
import pyautogui as pag
url = 'https://www.ettoday.net/news/news-list.htm'
chrome_path = "./chromedriver"
driver = webdriver.Chrome(chrome_path) #開啟chrome
driver.maximize_window()
driver.get(url) #開啟google網頁
# mark the following code because the waiting time is so long
# for i in range(1, 50):
# driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
# time.sleep(1)
soup = BeautifulSoup(driver.page_source , "html5lib")
# driver.close()
for d in soup.find(class_="part_list_2").find_all('h3'):
if(d.find(class_="date").text.split( )[0]=='2020/12/27'):
print(d.find(class_="date").text, d.find(class_="tag").text, d.find_all('a')[-1].text)
# -
# ### ② 如果想要依照類別分類,怎麼儲存會比較好?
# +
res = {}
for d in soup.find(class_="part_list_2").find_all('h3'):
if(d.find(class_="date").text.split( )[0]=='2020/12/27'):
date = d.find(class_="date").text
title = d.find_all('a')[-1].text
tag = d.select('.tag')[0].text
res.setdefault(tag, [])
res[tag].append({
'title': title,
'date': date
})
res
# -
# ### ③ 哪一個類別的文章最多
# +
data = []
for i in res:
print(i, len(res[i]))
d = {}
d['tag'] = i
d['count'] = len(res[i])
data.append(d)
for d in sorted(data, key=lambda d: d['count']):
print(d)
# -
| Day012_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Soo..
#
# The idea here is to play a little bit with the "Frozen Lake" classic gym enviroment.
#
# Here is a description from the open ai gym documentation
#
# https://github.com/openai/gym/blob/master/gym/envs/toy_text/frozen_lake.py
#
# """
# Winter is here. You and your friends were tossing around a frisbee at the park
# when you made a wild throw that left the frisbee out in the middle of the lake.
# The water is mostly frozen, but there are a few holes where the ice has melted.
# If you step into one of those holes, you'll fall into the freezing water.
# At this time, there's an international frisbee shortage, so it's absolutely imperative that
# you navigate across the lake and retrieve the disc.
# However, the ice is slippery, so you won't always move in the direction you intend.
# The surface is described using a grid like the following
#
# SFFF
# FHFH
# FFFH
# HFFG
#
# S : starting point, safe
# F : frozen surface, safe
# H : hole, fall to your doom
# G : goal, where the frisbee is located
# The episode ends when you reach the goal or fall in a hole.
# You receive a reward of 1 if you reach the goal, and zero otherwise.
# """
# Importing the libraries
import numpy as np
import gym
# The actions available are:
#
# LEFT = 0
# DOWN = 1
# RIGHT = 2
# UP = 3
# Lets create our enviroment with Frozen Lake
env = gym.make("FrozenLake-v0", is_slippery=True)
# As mentioned before the enviroment takes into account a "is_slippery" state, this creates a transition probability where you can take an action at a particular state but there is a probability you will end up in a different state of what your action intended you to go
# Lets define our initial states, states-values, discount factor, actions and threshold
V = np.zeros(env.nS)
S = np.arange(0,16)
threshold = 1e-3
gamma = 0.9
actions = {'LEFT':0,'DOWN':1,'RIGHT':2,'UP':3}
# There are several ways to solve this problem. In this notebook i will use the value iteration algorithm.
# +
''' Lets first define a function that will use bellman optimality equation
to calculate the optimal state value functions for all states'''
def optimal_state_value(env,S,V):
loop = True
i = 0
while loop == True:
delta = 0
for s in S:
v_old = V[s]
best_v = float('-inf')
for a in actions.values():
expected_v = 0
expected_r = 0
transitions = env.P[s][a]
for (probs, state_prime, r, done) in transitions:
expected_r += probs * r
expected_v += probs * V[state_prime]
v_new = expected_r + (gamma * expected_v)
if v_new > best_v:
best_v = v_new
V[s] = best_v
delta = max(delta, abs(v_old - best_v))
if delta <= threshold:
loop = False
i+=1
return (V)
# +
''' Now lets use a policy imporvement function to calculate the optimal policy
given the optimal state values previously calculated'''
def optimal_policy(env,S,V):
policy = np.zeros(env.nS)
V = optimal_state_value(env,S,V)
for s in S:
best_a = None
best_v = float('-inf')
for k,a in actions.items():
expected_v = 0
expected_r = 0
transitions = env.P[s][a]
for (probs, state_prime, r, done) in transitions:
expected_r += probs * r
expected_v += probs * V[state_prime]
v_new = expected_r + (gamma * expected_v)
if v_new > best_v:
best_v = v_new
best_a = a
policy[s] = best_a
return(policy)
# -
# Now we can run the algorithms and view the results
env.render()
print(optimal_policy(env,S,V).reshape(4,4), actions, sep="\n")
# As one can see, the actions at each state seem kind of counter-intuitive but thats because the "is_slippery" effect.
#
# Let's calculate the results again but turning off the "is_slippery effect"
env2 = gym.make("FrozenLake-v0", is_slippery=False)
env2.render()
print(optimal_policy(env2,S,V).reshape(4,4), actions, sep="\n")
# As one can see, without the "is_slippery" effect the calculated policy seems to be quite intuitive.
| value_iteration/Frozen_lake_Value_Iteration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clustering - Unsupervised Machine Learning
# # Hierarchical clusternig with Python and Scikit-learn
# +
# The difference between supervised and unsupervised machine learning is whether or not we, the scientist,
# are providing the machine with labeled data.
# Unsupervised machine learning is where the scientist does not provide the machine with labeled data,
# and the machine is expected to derive structure from the data all on its own.
# +
# The main form of unsupervised machine learning is clustering.
# Within clustering, you have "flat" clustering or "hierarchical" clustering.
# Flat clustering is where the scientist tells the machine how many categories to cluster the data into.
# Hierarchical clustering is where the machine is allowed to decide how many clusters to create based on its own algorithms.
# +
# Scikit-learn (sklearn) is a popular machine learning module for the Python programming language.
# The Scikit-learn module depends on Matplotlib, SciPy, and NumPy as well.
# +
import numpy as np
from sklearn.cluster import MeanShift
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
# +
# Dataset is loaded
from sklearn.datasets.samples_generator import make_blobs
centers = [[1,1],[4,4],[3,9]]
X, Y = make_blobs(n_samples = 1000, centers = centers, cluster_std = 1)
# -
# Visualize the data
plt.scatter(X[:,0],X[:,1])
plt.show()
# Using Hierarchical Clustering
ms = MeanShift()
ms.fit(X)
labels = ms.labels_
centroids = ms.cluster_centers_
# +
# Visualize the result
print('Clusters formed through Hierarchical Clustering:',end=" ")
print(len(np.unique(labels)))
print('\n\nCentroid of each Cluster:')
for i in centroids:
print('(',round(i[0],3),',',round(i[1],3),')')
# +
# Visulaize the result
colors = ["y.","r.","c.","m.","b.","g.","w."]
for i in range(len(X)):
plt.plot(X[i][0], X[i][1], colors[labels[i]], markersize = 10)
plt.scatter(centroids[:, 0],centroids[:, 1], c='black', marker = "x", s=100, linewidths = 5, zorder = 10)
plt.show()
# +
# Visualizing the result, we can conclude that the clusters formed are accurate
# The centers formed also apprximates to the original centers of the dataset
| Hierarchical Clustering - Unsupervised ML - Copy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Initialization
import sagemaker as sm
import boto3
import json
from datetime import datetime
from time import strftime, gmtime
model_s3_path = 's3://sagemaker-eu-west-1-113147044314/MUSE/model.tar.gz'
local_model_path = "../../../models/MUSE/large/000003"
bucket = sm.session.Session().default_bucket()
print(f"Default bucket: {bucket}")
# # Retrieving and packaging the model for SageMaker
# We already downloaded the model when we first tried to deploy it using the SageMaker SDK support for Tensorflow. Now we just need to copy it to the proper location.
# !tar -czf /tmp/model.tar.gz -C {local_model_path} .
# !ls -la /tmp/*.tar.gz
# !aws s3 cp /tmp/model.tar.gz s3://{bucket}/MUSE/model.tar.gz
# # Common script used by local, local SM and Endpoit
# +
# %%writefile modelscript_tensorflow.py
import tensorflow as tf
import numpy as np
import tensorflow_hub as hub
import tensorflow_text
import json
#Return loaded model
def load_model(modelpath):
model = hub.load(modelpath)
return model
# return prediction based on loaded model (from the step above) and an input payload
def predict(model, payload):
if not isinstance(payload, str):
payload = payload.decode()
try:
try:
if isinstance(json.loads(payload), dict):
data = json.loads(payload).get('instances', [payload]) # If it has no instances field, assume the payload is a string
elif isinstance(json.loads(payload), list):
data = json.loads(payload)
except json.JSONDecodeError: # If it can't be decoded, assume it's a string
data = [payload]
result = np.asarray(model(data))
out = result.tolist()
except Exception as e:
out = str(e)
return json.dumps({'output': out})
# -
# # Testing local inference
# The first step to check if we got the correct model is testing it locally. In order to do that, we need to update the libraries the model used to the same versions used to train it. As can be seen on [Tensorflow Hub](https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3), those are:
# - Tensorflow 2: we'll use version 2.2.0
# -Tensorflow Text: we'll use version 2.2.0, under the assumption that it's the one compatible with Tensorflow 2.2
# - We'll also install Tensorflow Hub, because it provides the function to load the model.
# #!pip uninstall -y tensorflow-gpu
# !pip install --force-reinstall tensorflow>=2.2.0 tensorflow-hub>=0.8.0 tensorflow-text==2.2.0 protobuf
# +
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text
import numpy as np
from sagemaker.tensorflow.serving import Model
# my_devices = tf.config.experimental.list_physical_devices(device_type='CPU')
# tf.config.experimental.set_visible_devices(devices= my_devices, device_type='CPU')
# print(f"Tensorflow version: {tf.__version__}")
# print(f"Tensorflow text does not provide a version object")
# print(f"Tensorflow hub version: {hub.__version__}")
# -
tf.config.list_physical_devices('CPU')
tf.debugging.set_log_device_placement(True)
tf.config.set_visible_devices([], 'GPU')
converter = tf.lite.TFLiteConverter.from_saved_model(local_model_path)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.target_spec.supported_types = [tf.float16]
Tflite_quantized_model = converter.convert()
from modelscript_tensorflow import *
model = load_model(local_model_path)
# The model expects its input as a JSON object in one of the following formats:
# ```javascript
# {
# "instances": ["example 1", "example 2", ...]
# }
# ["example 1", "example 2", ....]
# ```
# and will return the embeddings in the following format:
# ```javascript
# {
# "output": [[<embeddings for example 1>], [<embeddings for example 2>], ...]
# }
# ```
#
# We'll try the two calls to test that the model itself is working.
inputs = ['The quick brown fox jumped over the lazy dog.', 'This is a test']
inputs_json = json.dumps({'instances': inputs})
inputs_json_list = json.dumps(inputs)
print(f"Input: {inputs_json}\n")
print(f"Result:\n{json.loads(predict(model, inputs_json))}")
print(f"Input: {inputs_json_list}\n")
print(f"Result:\n{json.loads(predict(model, inputs_json_list))}")
# The model can also be called with a simple string as input. From the example below, you can see that the result format is always the same:
json.loads(predict(model, inputs[0]))
# **You may have to restart the Kernel and run the initialization and setting of model paths before continuing.** The locally loaded model cannot be released from GPU otherwise, and the local SM won't have enough memory to proceed.
# # Deploying on SageMaker and a Custom Container Based on EZSMDeploy
# [EZSMDeploy](https://pypi.org/project/ezsmdeploy/) got us started, but it's too limited to deploy an optimized configuration. On the other hand, SageMaker's [Tensorflow Serving image](https://github.com/aws/sagemaker-tensorflow-serving-container) also doesn't work, so we'll need to create our own container. We have copied the `src` folder created by EZSMDeploy and edited the files. Let's take a look at our changes:
# ## Dockerfile
# We made the following changes to the Dockerfile:
# - Based it on Nvidia's `cuda:10.1-base-ubuntu18.04`. That should make the CUDA libraries available.
# - We also need to install several additional packages for them to work: `cuda-command-line-tools-10-1`, `cuda-cufft-10-1`, `cuda-curand-10-1`,
# `cuda-cusolver-10-1`, `cuda-cusparse-10-1`, `libcublas10=10.1.0.105-1`, `libcublas-dev=10.1.0.105-1`, `libcudnn7`, `libnccl2`, `libgomp1`.
# !pygmentize src/Dockerfile
# ## Build Script
# We made the following changes to the `build-docker.sh` script:
# - Changed `algorithm-name` to `"muse-large-000003"`
# - Added a `latest` tag to the image
# - Removed the creation of `done.txt`, since we're calling the script synchronously from the notebook
# - If you are running the script from inside a SageMaker notebook instance on a GPU-enabled AWS instance, no additional configuration is needed -
# the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-docker/wiki) is already installed. If you are running from some other environment, please check their instructions.
# !pygmentize src/build-docker.sh
# !src/build-docker.sh
# ## Deploying Locally
# +
from sagemaker import get_execution_role
role = get_execution_role()
# +
from sagemaker.model import Model
from sagemaker.predictor import RealTimePredictor
model = Model(model_data=model_s3_path, image='113147044314.dkr.ecr.eu-west-1.amazonaws.com/muse-large-000003', role=role,
predictor_cls= RealTimePredictor, name='muse-large-000003')
# -
local_predictor = model.deploy(initial_instance_count=1, instance_type='local', endpoint_name='muse-large-000003-local', wait=True)
inputs = ['The quick brown fox jumped over the lazy dog.', 'This is a test']
inputs_json = json.dumps({'instances': inputs})
inputs_json_list = json.dumps(inputs)
out = local_predictor.predict(inputs_json_list.encode()).decode()
local_predictor.delete_endpoint()
# ## Deploying to a SageMaker Endpoint
"520713654638.dkr.ecr.eu-west-1.amazonaws.com/{}:{}".format(get_ecr_image_uri_prefix(account, region), ecr_repo, tag)
# +
from sagemaker.model import Model
from sagemaker.predictor import RealTimePredictor
model = Model(model_data=model_s3_path, image='113147044314.dkr.ecr.eu-west-1.amazonaws.com/muse-large-000003', role=role,
predictor_cls= RealTimePredictor, env={'MODEL_SERVER_WORKERS': '1'}, name='muse-large-000003')
# -
predictor = model.deploy(initial_instance_count=1, instance_type='ml.p3dn.24xlarge', endpoint_name='muse-large-000003-g4dn', wait=True)
out = predictor.predict(inputs_json_list.encode()).decode()
# # EZSMDeploy - Remove afterwards
# Then we create a local deployment (for quick testing purposes), passing it:
# - the location of the model we downloaded
# - the script we defined above with the `load_model` and `predict` functions
# - the dependencies we'll need to run the model
# - A model name that SageMaker will use to create metadata and track the model creation.
#
# We also tell it to deploy on local mode. Local mode (requested by specifying `local` as the instance type) deploys the Docker container in the machine where the call to deploy was made. It's a convenience for testing ideas fast, disconnected from the SageMaker service. It should not be used for real inference, just small tests.
ez = ezsmdeploy.Deploy(
model = local_model_path,
script = 'modelscript_tensorflow.py',
requirements = ['numpy','tensorflow-gpu==2.2.0','tensorflow_hub', 'tensorflow-text==2.2.0'], #or pass in the path to requirements.txt
instance_type = 'local',
monitor=False,
name='muse-large-000003',
wait = True
)
# From the log we can see we had some problems with GPU. This is because EZSMDeploy doesn't start from an image that has the required GPU drivers. In fact, we can check the Dockerfile used by EZSMDeploy and see it starts from standard Ubuntu 16.04:
# !pygmentize src/Dockerfile
# All the code generated by EZSMDeploy to create and serve the model is under the `src` folder. The Dockerfile is doing some interesting things:
# - It installs all the requirements from a requirements file generated by EZSMDeploy based on the parameter passed by us
# - It copies the entire contents of the folder into the image.
#
# Besides the `Dockerfile` above, you may also want to check:
# - `transformscript.py`: That's a copy of the script created by us and passed as a parameter.
# - `serve`: The base script run by the container (default SageMaker call when serving and no other entrypoint was provided). It just starts the web services:
# - nginx
# - gunicorn
# - `wsgi.py`: Used by gunicorn to start the actual workers. As you can see, it's just a simple wrapper around a flask application defined in
# - `predictor.py`: The most interesting function here is called `transformation`. Interesting things happening here:
# - It imports `transformscript`, effectively having the functions to load and generate inference from the model.
# - It adds several `print` statements that generate useful log. While useful, it could have performance and security impacts, and we recommend that these are reviewed and removed later.
#
# In general, EZSMDeploy is a quick way to generate a deployment template to get started faster when creating new models, but it has its limitations. Let's see how well it works.
inputs = ['The quick brown fox jumped over the lazy dog.', 'This is a test']
inputs_json = json.dumps({'instances': inputs})
inputs_json_list = json.dumps(inputs)
out = ez.predictor.predict(inputs_json_list.encode()).decode()
# You can see the actual input and output in the logs above. And here's the result:
json.loads(out)['output']
# So, we have generated an embedding from a deployed endpoint, and it seems to work locally. In the next section, we'll see if it also works for production deployment. But first let's remove the local endpoint and release the resources.
ez.predictor.delete_endpoint()
# # Deploying to a SageMaker Endpoint
# ## Deploying through EZSMDeploy Interfacce
# EZSMDeploy always rebuilds the image when rerun - but Docker will be smart about its caching, so the building and push should be faster. Most of the time spent here should be on starting and configuring an EC2 instance to deploy the model to.
ezonsm = ezsmdeploy.Deploy(
model = local_model_path, #Since we are loading a model from TF hub,
script = 'modelscript_tensorflow.py',
requirements = ['numpy','tensorflow-gpu==2.2.0','tensorflow_hub', 'tensorflow-text==2.2.0'],
wait = True,
instance_type = 'ml.p3.2xlarge',
monitor=False,
name='muse-large-000003'
)
# We copied a few examples from the book depository dataset to try our endpoint on.
messages = json.dumps({'instances':[
"<NAME>'s classic introduction to the world of microlight flying has endeared itself to several generations of pilots.",
"BECAUSE NOT ALL KRAV MAGA IS THE SAME(R) This book is designed for krav maga trainees, security-conscious civilians, law enforcement officers, security professionals, and military personnel alike who wish to refine their essential krav maga combatives, improve their chances of surviving a hostile attack and prevail without serious injury. Combatives are the foundation of krav maga counter-attacks. These are the combatives of the original Israeli Krav Maga Association (Grandmaster Gidon). It is irrefutable that you need only learn a few core combatives to be an effective fighter. Simple is easy. Easy is effective. Effective is what is required to end a violent encounter quickly, decisively, and on your terms. This book stresses doing the right things and doing them in the right way. Right technique + Correct execution = Maximum Effect. Contents include Key strategies for achieving maximum combative effects Krav maga's 12 most effective combatives Developing power and balance Combatives for the upper and lower body Combative combinations and retzev (continuous combat motion) Combatives for takedowns and throws Combatives for armbars, leglocks, and chokes Whatever your martial arts or defensive tactics background or if you have no self-defense background at all, this book can add defensive combatives and combinations to your defensive repertoire. Our aim is to build a strong self-defense foundation through the ability to optimally counter-attack.",
"""-AWESOME FACTS ABOUT THE RUGBY WORLD CUP: I have intentionally selected a specific range of "Rugby World Cup" facts that I feel will not only help children to learn new information but more importantly, remember it. -FUN LEARNING TOOL FOR ALL AGES: This book is designed to capture the imagination of everyone through the use of "WoW" trivia, cool photos and memory recall quiz. -COOL & COLORFUL PICTURES: Each page contains a quality image relating to the subject in question. This helps the reader to match and recall the content. -SHORT QUIZ GAME - POSITIVE REINFORCEMENT: No matter what the score is, everyone's a WINNER! The purpose of the short quiz at the end is to help check understanding, to cement the information and to provide a positive conclusion, regardless of the outcome. Your search for the best "Rugby Union" book is finally over. When you purchase from me today, here are just some of the things you can look forward to..... Amazing and extraordinary "Rugby World Cup" facts. This kind of trivia seems to be one of the few things my memory can actually recall. I'm not sure if it's to do with the shock or the "WoW" factor but for some reason my brain seems to store at least some of it for a later date. A fun way of learning. I've always been a great believer in that whatever the subject, if a good teacher can inspire you and hold your attention, then you'll learn! Now I'm not a teacher but the system I've used in previous publications on Kindle seems to work well, particularly with children. A specific selection of those "WoW" facts combined with some pretty awesome pictures, if I say so myself! Words and images combined to stimulate the brain and absorb the reader using an interactive formula. At the end there is a short "True or False" quiz to check memory recall. Don't worry though, it's a bit of fun but at the same time, it helps to check understanding. Remember, "Everyone's a Winner!" Enjoy ......... Matt."""
]})
out = ezonsm.predictor.predict(messages.encode()).decode()
#x = np.array(out['output'])
# We can see below that the result was a list of lists, with each sublist containing 512 elements. Then we check that these elements are indeed values for the vector embedding.
[len(json.loads(out)['output'][x]) for x in range(len(json.loads(out)['output']))]
print(json.loads(out)['output'][0])
# Let's delete the model to save resources.
ezonsm.predictor.delete_endpoint()
# ## Deploying from the SageMaker SDK Model Object created by EZSMDeploy
# EZSMDeploy also gives us the SageMaker SDK Model object it creates to deploy the model. We can use that to deploy the model as well.
model = ezonsm.sagemakermodel
model_name = ezonsm.sagemakermodel.name
predictor = model.deploy(initial_instance_count=1, instance_type='ml.p3.2xlarge', endpoint_name=model_name)
messages = json.dumps({'instances':[
"<NAME>'s classic introduction to the world of microlight flying has endeared itself to several generations of pilots.",
"BECAUSE NOT ALL KRAV MAGA IS THE SAME(R) This book is designed for krav maga trainees, security-conscious civilians, law enforcement officers, security professionals, and military personnel alike who wish to refine their essential krav maga combatives, improve their chances of surviving a hostile attack and prevail without serious injury. Combatives are the foundation of krav maga counter-attacks. These are the combatives of the original Israeli Krav Maga Association (Grandmaster Gidon). It is irrefutable that you need only learn a few core combatives to be an effective fighter. Simple is easy. Easy is effective. Effective is what is required to end a violent encounter quickly, decisively, and on your terms. This book stresses doing the right things and doing them in the right way. Right technique + Correct execution = Maximum Effect. Contents include Key strategies for achieving maximum combative effects Krav maga's 12 most effective combatives Developing power and balance Combatives for the upper and lower body Combative combinations and retzev (continuous combat motion) Combatives for takedowns and throws Combatives for armbars, leglocks, and chokes Whatever your martial arts or defensive tactics background or if you have no self-defense background at all, this book can add defensive combatives and combinations to your defensive repertoire. Our aim is to build a strong self-defense foundation through the ability to optimally counter-attack.",
"""-AWESOME FACTS ABOUT THE RUGBY WORLD CUP: I have intentionally selected a specific range of "Rugby World Cup" facts that I feel will not only help children to learn new information but more importantly, remember it. -FUN LEARNING TOOL FOR ALL AGES: This book is designed to capture the imagination of everyone through the use of "WoW" trivia, cool photos and memory recall quiz. -COOL & COLORFUL PICTURES: Each page contains a quality image relating to the subject in question. This helps the reader to match and recall the content. -SHORT QUIZ GAME - POSITIVE REINFORCEMENT: No matter what the score is, everyone's a WINNER! The purpose of the short quiz at the end is to help check understanding, to cement the information and to provide a positive conclusion, regardless of the outcome. Your search for the best "Rugby Union" book is finally over. When you purchase from me today, here are just some of the things you can look forward to..... Amazing and extraordinary "Rugby World Cup" facts. This kind of trivia seems to be one of the few things my memory can actually recall. I'm not sure if it's to do with the shock or the "WoW" factor but for some reason my brain seems to store at least some of it for a later date. A fun way of learning. I've always been a great believer in that whatever the subject, if a good teacher can inspire you and hold your attention, then you'll learn! Now I'm not a teacher but the system I've used in previous publications on Kindle seems to work well, particularly with children. A specific selection of those "WoW" facts combined with some pretty awesome pictures, if I say so myself! Words and images combined to stimulate the brain and absorb the reader using an interactive formula. At the end there is a short "True or False" quiz to check memory recall. Don't worry though, it's a bit of fun but at the same time, it helps to check understanding. Remember, "Everyone's a Winner!" Enjoy ......... Matt."""
]})
out = predictor.predict(messages.encode()).decode()
#x = np.array(out['output'])
[len(json.loads(out)['output'][x]) for x in range(len(json.loads(out)['output']))]
print(json.loads(out)['output'][0])
# We have the same results as the deployment through EZSMDeploy. That is good, but not perfect. If we check the logs, we see that we are still not leveraging GPU, so the P3 instance is not being used to its fullest. The message that shows the problem is this:
# ```
# tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
# tensorflow/stream_executor/cuda/cuda_driver.cc:313] failed call to cuInit: UNKNOWN ERROR (303)
# tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: model.aws.local
# tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: model.aws.local
# tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: Not found: was unable to find libcuda.so DSO loaded into this program
# ```
# That is because the image created by EZSMDeploy is not created with GPU support.
predictor.delete_endpoint()
# # Creating a Batch Transform
# Besides starting an endpoint and using it on request, we can also tell SageMaker to apply a batch transformation to an entire dataset. Let's get the latest processed data and use that.
smclient = boto3.client('sagemaker')
latest_job = smclient.list_processing_jobs(
CreationTimeBefore=datetime.now(),
NameContains='muse-dask-processing',
StatusEquals='Completed',
SortBy='CreationTime',
SortOrder='Descending',
MaxResults=1
)['ProcessingJobSummaries'][0]['ProcessingJobName']
print(f"Latest processing job: {latest_job}")
job_description = smclient.describe_processing_job(ProcessingJobName=latest_job)
s3_processed_data = next(output['S3Output']['S3Uri'] for output in job_description['ProcessingOutputConfig']['Outputs'] if output['OutputName'] == 'processed-dataset')
print(f"Location of latest processed data: {s3_processed_data}")
# Now that we have the location of the latest processed dataset, let's feed it into the transformer. First, we need to create a [Transformer](https://sagemaker.readthedocs.io/en/stable/api/inference/transformer.html#sagemaker.transformer.Transformer) based on the model we used before (more info on batch transformation [here](https://sagemaker.readthedocs.io/en/stable/overview.html#sagemaker-batch-transform)).
#
# Since we know that our container is not correctly set up for using GPU, let's use a cheaper instance for this one and leverage some parallelism.
timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
s3_inference_output = f"s3://{bucket}/sagemaker/muse-inference/output/{timestamp_prefix}"
print(f"Inference results will be saved at {s3_inference_output}")
muse_transformer = model.transformer(
instance_count=5,
instance_type='ml.m5.xlarge',
output_path=s3_inference_output,
accept="application/json", # Needs to be specified when using output filter
assemble_with="Line", # Needs to be specified when using output filter
max_concurrent_transforms=1)
muse_transformer.transform(
data=s3_processed_data,
data_type='S3Prefix',
content_type="text/csv", # Needs to be specified to use input filter
compression_type=None,
split_type="Line", # Needs to be specified to use input filter
job_name=f"muse-inference-transform-{timestamp_prefix}",
input_filter="$[2]", # Take only Field #2 of the input (the description)
output_filter="$.output", # Return the "output" field of the returned object
join_source=None,
wait=True,
logs=True
)
| notebooks/3-sm-job/byoc/byoc_tf_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
# --------------------------------------------------
#
# CRF10.ipynb
#
# Token features:
# (a) token (surface form)
# (b) is_first : is token at the beginning of the sentence?
# (c) is_last : is token at the end of the sentence?
# (d) is_capitalized : does token start with a capital letter?
# (e) is_all_capitalized : is all letters of the token capitalized?
# (f) is_capitals_inside : is there any capitalized letter inside the token?
# (g) is_numeric : is token numeric?
# (h) is_numeric_inside : is numeric characters inside the token?
# (i) is_alphanumeric : is token alphanumeric?
# (j) prefix-1 : first letter of the token
# (k) suffix-1 : last letter of the token
# (l) prefix-2 : first two letters of the token
# (m) suffix-2 : last two letters of the token
# (n) prefix-3 : first three letters of the token
# (o) suffix-3 : last three letters of the token
# (p) prefix-4 : first four letters of the token
# (q) suffix-4 : last four letters of the token
# (r) next-token : following token
# (s) prev-token : preceding token
# (t) 2-next-token : second following token
# (u) 2-prev-token : second preceding token
# (v) pos : part-of-speech tag
# (w) next-pos : part-of-speech tag of following word
# (x) prev-pos : part-of-speech tag of preceding word
#
# Written by cetinsamet -*- <EMAIL>
# May, 2019
# --------------------------------------------------
# -
from sklearn.model_selection import RandomizedSearchCV
from seqeval.metrics import classification_report
from sklearn.metrics import make_scorer
from sklearn_crfsuite import metrics
from sklearn_crfsuite import CRF
from tqdm import tqdm
import jpype as jp
import pickle
import scipy
# +
ZEMBEREK_PATH = 'bin/zemberek-full.jar'
# Start the JVM
jp.startJVM(jp.getDefaultJVMPath(), '-ea', '-Djava.class.path=%s' % (ZEMBEREK_PATH))
# -
TurkishMorphology = jp.JClass('zemberek.morphology.TurkishMorphology')
morphology = TurkishMorphology.createWithDefaults()
def readFile(filepath):
text = []
sentence = []
with open(filepath, 'r') as infile:
for line in infile:
word, _, _, _ = line.strip().split('\t')
if word == '<S>':
text.append(sentence)
sentence = []
continue
sentence.append(line.strip())
return text
trainText = readFile('data/train.txt')
validText = readFile('data/valid.txt')
testText = readFile('data/test.txt')
def getFeature(token, token_index, sentence, pos, next_pos, prev_pos):
feature = {'token' : token,
'is_first' : token_index == 0,
'is_last' : token_index == len(sentence) - 1,
'is_capitalized' : token[0].upper() == token[0],
'is_all_capitalized': token.upper() == token,
'is_capitals_inside': token[1:].lower() != token[1:],
'is_numeric' : token.isdigit(),
'is_numeric_inside' : any([c.isdigit() for c in token]),
'is_alphanumeric' : token.isalnum(),
'prefix-1' : token[0],
'suffix-1' : token[-1],
'prefix-2' : '' if len(token) < 2 else token[:2],
'suffix-2' : '' if len(token) < 2 else token[-2:],
'prefix-3' : '' if len(token) < 3 else token[:3],
'suffix-3' : '' if len(token) < 3 else token[-3:],
'prefix-4' : '' if len(token) < 4 else token[:4],
'suffix-4' : '' if len(token) < 4 else token[-4:],
'prev-token' : '' if token_index == 0 else sentence[token_index - 1],
'next-token' : '' if token_index == len(sentence) - 1 else sentence[token_index + 1],
'2-prev-token' : '' if token_index <= 1 else sentence[token_index - 2],
'2-next-token' : '' if token_index >= len(sentence) - 2 else sentence[token_index + 2],
'pos' : pos,
'next-pos' : next_pos,
'prev-pos' : prev_pos
}
return feature
# +
trainFeatures = []
trainLabels = []
for sentence_ in tqdm(trainText):
sentence = [token.split('\t')[0] for token in sentence_]
labels = [token.split('\t')[-1] for token in sentence_]
features = []
sentence_str = ' '.join(sentence)
analysis = morphology.analyzeAndDisambiguate(sentence_str).bestAnalysis()
for i, word in enumerate(sentence):
if len(sentence) == 1:
pos = analysis[i].getPos().shortForm
next_pos = ''
prev_pos = ''
elif i == 0:
pos = analysis[i].getPos().shortForm
next_pos = analysis[i + 1].getPos().shortForm
prev_pos = ''
elif i == len(sentence) - 1:
pos = analysis[i].getPos().shortForm
next_pos = ''
prev_pos = analysis[i - 1].getPos().shortForm
else:
pos = analysis[i].getPos().shortForm
next_pos = analysis[i + 1].getPos().shortForm
prev_pos = analysis[i - 1].getPos().shortForm
features.append(getFeature(word, i, sentence, pos, next_pos, prev_pos))
trainFeatures.append(features)
trainLabels.append(labels)
# +
validFeatures = []
validLabels = []
for sentence_ in tqdm(validText):
sentence = [token.split('\t')[0] for token in sentence_]
labels = [token.split('\t')[-1] for token in sentence_]
features = []
sentence_str = ' '.join(sentence)
analysis = morphology.analyzeAndDisambiguate(sentence_str).bestAnalysis()
for i, word in enumerate(sentence):
if len(sentence) == 1:
pos = analysis[i].getPos().shortForm
next_pos = ''
prev_pos = ''
elif i == 0:
pos = analysis[i].getPos().shortForm
next_pos = analysis[i + 1].getPos().shortForm
prev_pos = ''
elif i == len(sentence) - 1:
pos = analysis[i].getPos().shortForm
next_pos = ''
prev_pos = analysis[i - 1].getPos().shortForm
else:
pos = analysis[i].getPos().shortForm
next_pos = analysis[i + 1].getPos().shortForm
prev_pos = analysis[i - 1].getPos().shortForm
features.append(getFeature(word, i, sentence, pos, next_pos, prev_pos))
validFeatures.append(features)
validLabels.append(labels)
# +
testFeatures = []
testLabels = []
for sentence_ in tqdm(testText):
sentence = [token.split('\t')[0] for token in sentence_]
labels = [token.split('\t')[-1] for token in sentence_]
features = []
sentence_str = ' '.join(sentence)
analysis = morphology.analyzeAndDisambiguate(sentence_str).bestAnalysis()
for i, word in enumerate(sentence):
if len(sentence) == 1:
pos = analysis[i].getPos().shortForm
next_pos = ''
prev_pos = ''
elif i == 0:
pos = analysis[i].getPos().shortForm
next_pos = analysis[i + 1].getPos().shortForm
prev_pos = ''
elif i == len(sentence) - 1:
pos = analysis[i].getPos().shortForm
next_pos = ''
prev_pos = analysis[i - 1].getPos().shortForm
else:
pos = analysis[i].getPos().shortForm
next_pos = analysis[i + 1].getPos().shortForm
prev_pos = analysis[i - 1].getPos().shortForm
features.append(getFeature(word, i, sentence, pos, next_pos, prev_pos))
testFeatures.append(features)
testLabels.append(labels)
# -
trainvalFeatures = trainFeatures + validFeatures
trainvalLabels = trainLabels + validLabels
# +
# define fixed parameters and parameters to search
crf = CRF( algorithm='lbfgs',
max_iterations=100,
all_possible_transitions=True,
verbose=True)
params_space = {'c1': scipy.stats.expon(scale=0.5),
'c2': scipy.stats.expon(scale=0.05)}
# use the same metric for evaluation
f1_scorer = make_scorer(metrics.flat_f1_score,
average='weighted')
# search
rs = RandomizedSearchCV(crf, params_space,
cv=3,
verbose=1,
n_jobs=-1,
n_iter=30,
random_state=123,
scoring=f1_scorer)
rs.fit(trainvalFeatures, trainvalLabels)
# -
print('best params:', rs.best_params_)
print('best CV score:', rs.best_score_)
print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))
crf = rs.best_estimator_
crf
# +
#crf.fit(trainvalFeatures, trainvalLabels)
# -
# SAVE CONDITIONAL RANDOM FIELDS MODEL
with open('model/crf10.pickle', 'wb') as outfile:
pickle.dump(crf, outfile, pickle.HIGHEST_PROTOCOL)
print("model is saved.")
# LOAD CONDITIONAL RANDOM FIELDS MODEL
with open('model/crf10.pickle', 'rb') as infile:
crf = pickle.load(infile)
# +
trainvalPredLabels = crf.predict(trainvalFeatures)
print("### TRAINVAL CLASSIFICATION REPORT ###\n")
print(classification_report(trainvalLabels, trainvalPredLabels))
# +
testPredLabels = crf.predict(testFeatures)
print("### TEST CLASSIFICATION REPORT ###\n")
print(classification_report(testLabels, testPredLabels))
# -
# Shutting down the JVM
jp.shutdownJVM()
| CRF10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# dependencies
import os,keras
import cv2 as cv
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# -
# !pip install opencv-contrib-python
cv.__version__
ss = cv.ximgproc.segmentation.createSelectiveSearchSegmentation()
# ### Semantic Segmentation vs instance segmentation
#
# Semantic Segmentation is the process of assigning a label to every pixel in the image. This is in stark contrast to classification, where a single label is assigned to the entire picture. Semantic segmentation treats multiple objects of the same class as a single entity. On the other hand, instance segmentation treats multiple objects of the same class as distinct individual objects (or instances). Typically, instance segmentation is harder than semantic segmentation.
#
# https://www.topbots.com/wp-content/uploads/2019/05/comparison_800px_web.jpg
| R-CNN/jupyter 1/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This example shows how to use viflow for fluid-structure interaction. The airfoil as the famous NACA0012, and we assume that it somehow behaves like a beam that is fixed in the front part up to .2c.
# To to this, we will not rely on viiflow to solve the aerodynamic problem, but just use its computed gradient and residual to couple those with our structure problem for a joint Newton step.
#
# This necessitates:
#
# * a structure model
# * the means to give the structure model the surface pressure based on the aerodynamic calculation
# * a coupling Newton loop
#
# This is why this notebook is rather long.
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
import viiflow as vf
import viiflowtools.vf_tools as vft
import viiflowtools.vf_plots as vfp
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Settings
RE = 1e6
ncrit = 9.0
Mach = 0.0
alpha = 8.0
# Read Airfoil Data
NACA0012 = vft.repanel(vft.read_selig("NACA0012STE.dat"),200)
# -
# ## Structural Model
# Let $s \in [0,L]$ be the variable along the length of the beam and $q$ be the load on this beam at these elements.
# The simple beam fixed at the s(0) is defined by
# $$ \frac{d^2}{ds^2} EI \frac{d^2}{ds^2} y = q $$
# $$ y(0) = 0, \frac{d}{ds} y(0) = 0 $$
# $$ \frac{d^2}{ds^2} y(L) = 0, \frac{d^3}{ds^3} y(L) = 0 $$
#
# To keep things sane, we will use $M = - EI \frac{d^2}{ds^2} y$ as an additional variable.
#
# $$ - \frac{d^2}{ds^2} M = q, M(L) = 0, \frac{d}{ds} M(L) = 0 \ (I) $$
# $$ - EI \frac{d^2}{ds^2} y = M (I) , y(0) = 0, \frac{d}{ds} y(0) = 0 \ (II) $$
#
# Since we use the foil coordinates from the top, the actual free end is at $s=0$, while the fixed and is at the end of $s$.
#
# The load $q$ is due to the aerodynamic pressure difference between top and bottom side. From the external velocities $u_{top}$ and $u_{bot}$ on the airfoil sides it is calculated by $q=u_{top}^2-u_{bot}^2$.
#
# +
# Define Structure Model
def beam(s,q):
# s is the variable along the length of the beam
# q is the load on this beam at these elements
EI = 1 # Random scale. Too low: So much movement that you see transient wiggling. Too high: Very little change in geometry.
N = len(s) # Number of elements
# Second order differential operator
# d²/ds²
D = np.zeros((N,N))
for k in range(1,N-1):
dm = s[k]-s[k-1]
dp = s[k+1]-s[k]
D[k,k-1] = 2/dm/(dm+dp)
D[k,k] = -2/dp/(dm+dp)-2/dm/(dm+dp)
D[k,k+1] = 2/dp/(dm+dp)
# Operator (I)
O1 = -D
O1[0,0] = 1
O1[-1,[0, 1]] = [-1,1]/(s[1]-s[0])
# Operator (II)
O2 = -np.multiply(EI,D)
O2[0,N-1] = 1
O2[-1,[N-2, N-1]] = [-1,1]/(s[N-1]-s[N-2])
# Solve for displacement
E0 = np.eye(N)
E0[0,0] = 0
E0[-1,-1] = 0
M = np.linalg.solve(O1,np.matmul(E0,q))
dMdq = np.linalg.solve(O1,E0)
y = np.linalg.solve(O2,np.matmul(E0,M))
dydq = np.linalg.solve(O2,np.matmul(E0,dMdq))
return [y,dydq]
def af_root(p):
# Find root of airfoil
iroot = 0
for k in range(1,p.foils[0].N):
if p.foils[0].X[0,k]>p.foils[0].X[0,k-1]:
iroot = k
break
return iroot
# How to calculate pressure difference on this beam based on the aerodynamics
def q_beam(SB,p,dgam):
iroot = af_root(p)
N = p.foils[0].N
# interpolate velocities to beam SB
utop = np.interp(-SB,-p.foils[0].X[0,0:iroot],np.asarray(p.gamma_viscid[0:iroot]).flatten())
ubot = np.interp(SB,p.foils[0].X[0,iroot::],np.asarray(p.gamma_viscid[iroot:N]).flatten())
# Pressure difference
q = utop**2-ubot**2
# Gradient. Only true for linear interpolation!
dq = None
if not dgam is None:
dq = np.zeros((q.shape[0],dgam.shape[1]))
# dq = dq_utop*dutop + dq_ubot*dubot
# = 2*utop*dutop + ...
for i in range(dgam.shape[1]):
dq[:,i]+= 2*utop*np.interp(-SB,-p.foils[0].X[0,0:iroot-1],np.asarray(dgam[0:iroot-1,i]).flatten())
dq[:,i]+= 2*ubot*np.interp(SB,p.foils[0].X[0,iroot::],np.asarray(dgam[iroot:N,i]).flatten())
return [q,dq]
# -
# ## Aerodynamic problem
# First, we solve the aerodynamic problem without the structural part. The solution is used as a starting point for the subsequent calculations.
# +
# Setup for viiflow
s = vf.setup(Re=RE,Ma=Mach,Ncrit=ncrit,Alpha=alpha)
# Internal iterations
#s.VirtualGradients = True # You need to set this for FSI problems.
s.Gradients = True # You need to set this for FSI problems.
# If iterate_wakes is False, the wake does not change when moving which may lead to better convergence.
# If True, the wake changes. Because the gradient does not take movement of nodes into account,
# this may leasd to poorer convergence.
s.IterateWakes = True
# Set-up and initialize based on inviscid panel solution
[p,bl,x] = vf.init([NACA0012],s)
res = None
grad = None
# Solve aerodynamic problem
[x,_,_,_,_] = vf.iter(x,bl,p,s,None,None)
cp0 = p.cp.copy();
# +
# FSI Setup
# This is our beam, the x values of the top side from TE until 0.2
ii = 0 # Index where the beam starts
for k in range(p.foils[0].N):
if p.foils[0].X[0,k]<0.2:
ii = k
break
SB = p.foils[0].X[0,0:ii]
ND = len(SB)
# How do we get the complete foil displacement from xtop?
def vdfull(p,xtop):
NXT = xtop.shape[0]
NP = p.foils[0].N
iroot = af_root(p)
vd = np.zeros(p.foils[0].N)
vd[0:NXT] = xtop
vd[iroot::] = -np.interp(-p.foils[0].X[0,iroot::],-p.foils[0].X[0,0:iroot],vd[0:iroot])
# Gradient
dvd_xtop = np.zeros((NP,NXT))
E = np.eye(NXT)
dvd_xtop[0:NXT,0:NXT] = E
for k in range(NXT):
dvd_xtop[iroot::,k] = -np.interp(-p.foils[0].X[0,iroot::],-p.foils[0].X[0,0:NXT],E[:,k])
return [vd, dvd_xtop]
# -
# ## Joint Newton Iteration
# The next section combines viiflow with the above structural model to solve for stationary conditions. That is, due to the pressure difference $q$ the airfoil is deformed, which changes its pressure variation and hence $q$. In the end, we need a structural displacement $y_{var}$ that is equivalent to the deformation due to the loads due to this deformation $y_q(q(y_{var}))=y_{var}$.
# +
# Newton Iteration
tol = 1e-5
# Additional unknowns: The displacement on the top side
# On the bottom, we have the negative displacement at the same x coordinates
xtop = np.zeros(ii) # Initialize with 0
xj = np.r_[x,xtop] # Joined aerodynamic and structural variables
NAERO = x.shape[0]
NTOP = ii
s.Itermax = 0 # Disable internal iterations. Important!
s.Silent = True
vres_s = [] # Vector of stuctural residuals
vres_a = [] # Vector of aerodynamic residuals
for k in range(50):
res = None
grad = None
# Current virtual displacements
[vd, dvd_xj] = vdfull(p,xj[NAERO::])
# Solve Aerodynamics
[xj[0:NAERO],flag,res,grad,grad_vii] = vf.iter(xj[0:NAERO],bl,p,s,res,grad,[vd])
# Gradient of aerodynamic problem w.r.t. xj
dres_xj = np.c_[grad,np.matmul(grad_vii.partial.res_vd,dvd_xj)]
# Get structural loads
[q,dq_dxaero] = q_beam(SB,p,grad_vii.partial.gam_x)
[_,dq_dvd] = q_beam(SB,p,grad_vii.partial.gam_vd)
# Solve structural part
[y,dydq] = beam(SB,q)
# Gradient of structural problem w.r.t. xj
dy_xaero = np.matmul(dydq,dq_dxaero)
dy_xtop = np.matmul(dydq,np.matmul(dq_dvd,dvd_xj))
# Interaction goal: y - xtop = 0
# Aerodynamic goal: res = 0
res_s = y-xj[NAERO::]
F = np.r_[res_s,res]
DF = np.r_[np.c_[dy_xaero,dy_xtop-np.eye(NTOP)],dres_xj]
# Newton Step
dxj = -np.linalg.solve(DF,F)
# Some Newton step-size restriction
lam = 1.0
for k in range(NAERO-3):
if abs(dxj[k])>0:
lam = min(lam,.1/(abs(dxj[k])/abs(xj[k])))
for k in range(NAERO,NAERO+NTOP):
if abs(dxj[k])>0:
lam = min(lam,.1/(abs(dxj[k])/max(abs(xj[k]),0.02)))
xj += lam*dxj
vres_s.append(np.sqrt(np.sum(res_s*res_s)))
vres_a.append(np.sqrt(np.sum(res*res)))
if np.sqrt(np.sum(F*F))<tol or np.sqrt(np.sum(dxj*dxj))<tol:
print("Converged")
break
# -
matplotlib.rcParams['figure.figsize'] = [12, 6]
# Plot current geometry
fig,ax=plt.subplots(1,1)
ax.plot(p.foils[0].X[0,:],-.1*cp0+.2,'-',color='tab:orange')
ax.plot(p.foils[0].X[0,:],-.1*p.cp+.2,'-',color='tab:red')
vfp.plot_geometry(ax,p,bl)
# Plot pressure in same plot
ax.legend(('Initial pressure','Final pressure','Initial Geometry','Deformed Geometry'))
ax.set_yticks([])
# Plot Residuals with iterations
fig, ax = plt.subplots()
ax.semilogy(vres_s)
ax.semilogy(vres_a)
ax.legend(('Structural Residual','Aerodynamic Residual'))
ax.set_xlabel('Iteration');
| NACA0012 Fluid-Structure Interaction/NACA0012-Fluid-Structure-Interaction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Datset source
# https://archive.ics.uci.edu/ml/datasets/Auto+MPG
# +
# Problem Statement: Fuel consumption of cars based on various factors
# +
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import os
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
# +
# Read the dataset
import pandas as pd
pd.options.display.max_columns = 1000
ampg_df = pd.read_csv('auto-mpg.data', sep='\s+', header=None, na_values='?',
names=['mpg','cylinders','displacement','horsepower','weight','acceleration','model_year','origin','car_name'],)
# dtype={'horsepower':np.float64})
print(ampg_df.shape)
ampg_df.head()
# +
# Check for NAN values in the entire dataframe
ampg_df.isnull().sum()
# +
# Remove NAN values from the dataframe
ampg_df.dropna(inplace=True)
print(ampg_df.shape)
# +
# Split the dataframe into features and labels
X = ampg_df.drop(['mpg', 'car_name'], axis=1).values
y = ampg_df.loc[:, 'mpg'].values
print("X shape: ", X.shape, "y shape: ", y.shape)
print("Sample X values: ", X[:5], "\n", "Sample y values: ", y[:5])
# +
# Split the dataset into train and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05, random_state=2)
print(" X_train shape: ", X_train.shape,"\n", "y_train shape: ", y_train.shape,"\n",
"X_test shape: ", X_test.shape,"\n", "y_test shape: ", y_test.shape,"\n")
# +
# Model 1
# Sklearn DecisionTreeRegressor model with max_depth 10
from sklearn.tree import DecisionTreeRegressor
dt_reg = DecisionTreeRegressor(max_depth=10, random_state=2)
dt_reg.fit(X_train, y_train)
# +
# R^2 values for train and test sets
print("Train set R^2 score: ", dt_reg.score(X_train, y_train))
print("Test set R^2 score: ", dt_reg.score(X_test, y_test))
# +
# Mean Squared Errors of train and test sets
from sklearn.metrics import mean_squared_error
print("Train set mse: ", mean_squared_error(y_train, dt_reg.predict(X_train)))
print("Test set mse: ", mean_squared_error(y_test, dt_reg.predict(X_test)))
# +
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, dt_reg.predict(X_train)))
print("Test set mae: ", mean_absolute_error(y_test, dt_reg.predict(X_test)))
# +
# The Decision Tree Regressor with max depth 10 achieves a good R^2 score, complex models will be developed to achieve higher R^2 score on test set
# +
# Model 2
# Sklearn DecisionTreeRegressor model with RandomizedSearchCV
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import RandomizedSearchCV
param_distributions = {'max_depth': list(range(3, 10)), 'min_samples_split': list(range(1, 10)), 'max_leaf_nodes': list(range(2, 200))}
dt_reg_rnd_search_cv = RandomizedSearchCV(DecisionTreeRegressor(random_state=2), param_distributions, n_iter=1000, n_jobs=10, verbose=5, cv=3, random_state=2)
dt_reg_rnd_search_cv.fit(X_train, y_train)
# -
dt_reg_rnd_search_cv.best_estimator_
# +
# R^2 values for train and test sets
print("Train set R^2 score: ", dt_reg_rnd_search_cv.best_estimator_.score(X_train, y_train))
print("Test set R^2 score: ", dt_reg_rnd_search_cv.best_estimator_.score(X_test, y_test))
# +
# Mean Squared Errors of train and test sets
from sklearn.metrics import mean_squared_error
print("Train set mse: ", mean_squared_error(y_train, dt_reg_rnd_search_cv.best_estimator_.predict(X_train)))
print("Test set mse: ", mean_squared_error(y_test, dt_reg_rnd_search_cv.best_estimator_.predict(X_test)))
# +
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, dt_reg_rnd_search_cv.best_estimator_.predict(X_train)))
print("Test set mae: ", mean_absolute_error(y_test, dt_reg_rnd_search_cv.best_estimator_.predict(X_test)))
| Auto_MPG_Regression/AMPG_test_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a>
# ___
# <center><em>Copyright <NAME></em></center>
# <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center>
# # VAR(p)
# ## Vector Autoregression
# In our previous SARIMAX example, the forecast variable $y_t$ was influenced by the exogenous predictor variable, but not vice versa. That is, the occurrence of a holiday affected restaurant patronage but not the other way around.
#
# However, there are some cases where variables affect each other. <a href='https://otexts.com/fpp2/VAR.html'>Forecasting: Principles and Practice</a> describes a case where changes in personal consumption expenditures $C_t$ were forecast based on changes in personal disposable income $I_t$.
# > However, in this case a bi-directional relationship may be more suitable: an increase in $I_t$ will lead to an increase in $C_t$ and vice versa.<br>An example of such a situation occurred in Australia during the Global Financial Crisis of 2008–2009. The Australian government issued stimulus packages that included cash payments in December 2008, just in time for Christmas spending. As a result, retailers reported strong sales and the economy was stimulated. Consequently, incomes increased.
#
# Aside from investigating multivariate time series, vector autoregression is used for
# * <a href='https://www.statsmodels.org/devel/vector_ar.html#impulse-response-analysis'>Impulse Response Analysis</a> which involves the response of one variable to a sudden but temporary change in another variable
# * <a href='https://www.statsmodels.org/devel/vector_ar.html#forecast-error-variance-decomposition-fevd'>Forecast Error Variance Decomposition (FEVD)</a> where the proportion of the forecast variance of one variable is attributed to the effect of other variables
# * <a href='https://www.statsmodels.org/devel/vector_ar.html#dynamic-vector-autoregressions'>Dynamic Vector Autoregressions</a> used for estimating a moving-window regression for the purposes of making forecasts throughout the data sample
#
# ### Formulation
# We've seen that an autoregression AR(p) model is described by the following:
#
# $y_{t} = c + \phi_{1}y_{t-1} + \phi_{2}y_{t-2} + \dots + \phi_{p}y_{t-p} + \varepsilon_{t}$
#
# where $c$ is a constant, $\phi_{1}$ and $\phi_{2}$ are lag coefficients up to order $p$, and $\varepsilon_{t}$ is white noise.
# A $K$-dimensional VAR model of order $p$, denoted <strong>VAR(p)</strong>, considers each variable $y_K$ in the system.<br>
#
# For example, The system of equations for a 2-dimensional VAR(1) model is:
#
# $y_{1,t} = c_1 + \phi_{11,1}y_{1,t-1} + \phi_{12,1}y_{2,t-1} + \varepsilon_{1,t}$<br>
# $y_{2,t} = c_2 + \phi_{21,1}y_{1,t-1} + \phi_{22,1}y_{2,t-1} + \varepsilon_{2,t}$
#
# where the coefficient $\phi_{ii,l}$ captures the influence of the $l$th lag of variable $y_i$ on itself,<br>
# the coefficient $\phi_{ij,l}$ captures the influence of the $l$th lag of variable $y_j$ on $y_i$,<br>
# and $\varepsilon_{1,t}$ and $\varepsilon_{2,t}$ are white noise processes that may be correlated.<br>
#
# Carrying this further, the system of equations for a 2-dimensional VAR(3) model is:
#
# $y_{1,t} = c_1 + \phi_{11,1}y_{1,t-1} + \phi_{12,1}y_{2,t-1} + \phi_{11,2}y_{1,t-2} + \phi_{12,2}y_{2,t-2} + \phi_{11,3}y_{1,t-3} + \phi_{12,3}y_{2,t-3} + \varepsilon_{1,t}$<br>
# $y_{2,t} = c_2 + \phi_{21,1}y_{1,t-1} + \phi_{22,1}y_{2,t-1} + \phi_{21,2}y_{1,t-2} + \phi_{22,2}y_{2,t-2} + \phi_{21,3}y_{1,t-3} + \phi_{22,3}y_{2,t-3} + \varepsilon_{2,t}$<br><br>
#
# and the system of equations for a 3-dimensional VAR(2) model is:
#
# $y_{1,t} = c_1 + \phi_{11,1}y_{1,t-1} + \phi_{12,1}y_{2,t-1} + \phi_{13,1}y_{3,t-1} + \phi_{11,2}y_{1,t-2} + \phi_{12,2}y_{2,t-2} + \phi_{13,2}y_{3,t-2} + \varepsilon_{1,t}$<br>
# $y_{2,t} = c_2 + \phi_{21,1}y_{1,t-1} + \phi_{22,1}y_{2,t-1} + \phi_{23,1}y_{3,t-1} + \phi_{21,2}y_{1,t-2} + \phi_{22,2}y_{2,t-2} + \phi_{23,2}y_{3,t-2} + \varepsilon_{2,t}$<br>
# $y_{3,t} = c_3 + \phi_{31,1}y_{1,t-1} + \phi_{32,1}y_{2,t-1} + \phi_{33,1}y_{3,t-1} + \phi_{31,2}y_{1,t-2} + \phi_{32,2}y_{2,t-2} + \phi_{33,2}y_{3,t-2} + \varepsilon_{3,t}$<br><br>
#
# The general steps involved in building a VAR model are:
# * Examine the data
# * Visualize the data
# * Test for stationarity
# * If necessary, transform the data to make it stationary
# * Select the appropriate order <em>p</em>
# * Instantiate the model and fit it to a training set
# * If necessary, invert the earlier transformation
# * Evaluate model predictions against a known test set
# * Forecast the future
#
# Recall that to fit a SARIMAX model we passed one field of data as our <em>endog</em> variable, and another for <em>exog</em>. With VAR, both fields will be passed in as <em>endog</em>.
# <div class="alert alert-info"><h3>Related Functions:</h3>
# <tt><strong>
# <a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.vector_ar.var_model.VAR.html'>vector_ar.var_model.VAR</a></strong><font color=black>(endog[, exog, …])</font> Fit VAR(p) process and do lag order selection<br>
# <strong><a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.vector_ar.var_model.VARResults.html'>vector_ar.var_model.VARResults</a></strong><font color=black>(endog, …[, …])</font> Estimate VAR(p) process with fixed number of lags<br>
# <strong><a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.vector_ar.dynamic.DynamicVAR.html'>vector_ar.dynamic.DynamicVAR</a></strong><font color=black>(data[, …])</font> Estimates time-varying vector autoregression (VAR(p)) using equation-by-equation least squares</tt>
#
# <h3>For Further Reading:</h3>
# <strong>
# <a href='https://www.statsmodels.org/stable/vector_ar.html'>Statsmodels Tutorial:</a></strong> <font color=black>Vector Autoregressions</font><br>
# <strong>
# <a href='https://otexts.com/fpp2/VAR.html'>Forecasting: Principles and Practice:</a></strong> <font color=black>Vector Autoregressions</font><br>
# <strong>
# <a href='https://en.wikipedia.org/wiki/Vector_autoregression'>Wikipedia:</a></strong> <font color=black>Vector Autoregression</font>
# </div>
# ### Perform standard imports and load dataset
# For this analysis we'll also compare money to spending. We'll look at the M2 Money Stock which is a measure of U.S. personal assets, and U.S. personal spending. Both datasets are in billions of dollars, monthly, seasonally adjusted. They span the 21 years from January 1995 to December 2015 (252 records).<br>
# Sources: https://fred.stlouisfed.org/series/M2SL https://fred.stlouisfed.org/series/PCE
# +
import numpy as np
import pandas as pd
# %matplotlib inline
# Load specific forecasting tools
from statsmodels.tsa.api import VAR, DynamicVAR
from statsmodels.tsa.stattools import adfuller
from statsmodels.tools.eval_measures import rmse
# Ignore harmless warnings
import warnings
warnings.filterwarnings("ignore")
# Load datasets
df = pd.read_csv('../Data/M2SLMoneyStock.csv',index_col=0, parse_dates=True)
df.index.freq = 'MS'
sp = pd.read_csv('../Data/PCEPersonalSpending.csv',index_col=0, parse_dates=True)
sp.index.freq = 'MS'
# -
# ### Inspect the data
df = df.join(sp)
df.head()
df = df.dropna()
df.shape
# ### Plot the source data
# +
title = 'M2 Money Stock vs. Personal Consumption Expenditures'
ylabel='Billions of dollars'
xlabel=''
ax = df['Spending'].plot(figsize=(12,5),title=title,legend=True)
ax.autoscale(axis='x',tight=True)
ax.set(xlabel=xlabel, ylabel=ylabel)
df['Money'].plot(legend=True);
# -
# ## Test for stationarity, perform any necessary transformations
def adf_test(series,title=''):
"""
Pass in a time series and an optional title, returns an ADF report
"""
print(f'Augmented Dickey-Fuller Test: {title}')
result = adfuller(series.dropna(),autolag='AIC') # .dropna() handles differenced data
labels = ['ADF test statistic','p-value','# lags used','# observations']
out = pd.Series(result[0:4],index=labels)
for key,val in result[4].items():
out[f'critical value ({key})']=val
print(out.to_string()) # .to_string() removes the line "dtype: float64"
if result[1] <= 0.05:
print("Strong evidence against the null hypothesis")
print("Reject the null hypothesis")
print("Data has no unit root and is stationary")
else:
print("Weak evidence against the null hypothesis")
print("Fail to reject the null hypothesis")
print("Data has a unit root and is non-stationary")
adf_test(df['Money'],title='Money')
adf_test(df['Spending'], title='Spending')
# Neither variable is stationary, so we'll take a first order difference of the entire DataFrame and re-run the augmented Dickey-Fuller tests. It's advisable to save transformed values in a new DataFrame, as we'll need the original when we later invert the transormations and evaluate the model.
df_transformed = df.diff()
df_transformed = df_transformed.dropna()
adf_test(df_transformed['Money'], title='MoneyFirstDiff')
print()
adf_test(df_transformed['Spending'], title='SpendingFirstDiff')
# Since Money is not yet stationary, we'll apply second order differencing to both series so they retain the same number of observations
df_transformed = df_transformed.diff().dropna()
adf_test(df_transformed['Money'], title='MoneySecondDiff')
print()
adf_test(df_transformed['Spending'], title='SpendingSecondDiff')
df_transformed.head()
len(df_transformed)
# ### Train/test split
# It will be useful to define a number of observations variable for our test set. For this analysis, let's use 12 months.
nobs=12
train, test = df_transformed[0:-nobs], df_transformed[-nobs:]
print(train.shape)
print(test.shape)
# ## VAR Model Order Selection
# We'll fit a series of models using the first seven p-values, and base our final selection on the model that provides the lowest AIC and BIC scores.
for i in [1,2,3,4,5,6,7]:
model = VAR(train)
results = model.fit(i)
print('Order =', i)
print('AIC: ', results.aic)
print('BIC: ', results.bic)
print()
model = VAR(train)
for i in [1,2,3,4,5,6,7]:
results = model.fit(i)
print('Order =', i)
print('AIC: ', results.aic)
print('BIC: ', results.bic)
print()
# The VAR(5) model seems to return the lowest combined scores.<br>
# Just to verify that both variables are included in the model we can run <tt>.endog_names</tt>
model.endog_names
# ## Fit the VAR(5) Model
results = model.fit(5)
results.summary()
# ## Predict the next 12 values
# Unlike the VARMAX model we'll use in upcoming sections, the VAR <tt>.forecast()</tt> function requires that we pass in a lag order number of previous observations as well. Unfortunately this forecast tool doesn't provide a DateTime index - we'll have to do that manually.
lag_order = results.k_ar
lag_order
z = results.forecast(y=train.values[-lag_order:], steps=12)
z
test
idx = pd.date_range('1/1/2015', periods=12, freq='MS')
df_forecast = pd.DataFrame(z, index=idx, columns=['Money2d','Spending2d'])
df_forecast
# ## Invert the Transformation
# Remember that the forecasted values represent second-order differences. To compare them to the original data we have to roll back each difference. To roll back a first-order difference we take the most recent value on the training side of the original series, and add it to a cumulative sum of forecasted values. When working with second-order differences we first must perform this operation on the most recent first-order difference.
#
# Here we'll use the <tt>nobs</tt> variable we defined during the train/test/split step.
# +
# Add the most recent first difference from the training side of the original dataset to the forecast cumulative sum
df_forecast['Money1d'] = (df['Money'].iloc[-nobs-1]-df['Money'].iloc[-nobs-2]) + df_forecast['Money2d'].cumsum()
# Now build the forecast values from the first difference set
df_forecast['MoneyForecast'] = df['Money'].iloc[-nobs-1] + df_forecast['Money1d'].cumsum()
# +
# Add the most recent first difference from the training side of the original dataset to the forecast cumulative sum
df_forecast['Spending1d'] = (df['Spending'].iloc[-nobs-1]-df['Spending'].iloc[-nobs-2]) + df_forecast['Spending2d'].cumsum()
# Now build the forecast values from the first difference set
df_forecast['SpendingForecast'] = df['Spending'].iloc[-nobs-1] + df_forecast['Spending1d'].cumsum()
# -
df_forecast
# ## Plot the results
# The VARResults object offers a couple of quick plotting tools:
results.plot();
results.plot_forecast(12);
# But for our investigation we want to plot predicted values against our test set.
df['Money'][-nobs:].plot(figsize=(12,5),legend=True).autoscale(axis='x',tight=True)
df_forecast['MoneyForecast'].plot(legend=True);
df['Spending'][-nobs:].plot(figsize=(12,5),legend=True).autoscale(axis='x',tight=True)
df_forecast['SpendingForecast'].plot(legend=True);
# ### Evaluate the model
#
# $RMSE = \sqrt{{\frac 1 L} \sum\limits_{l=1}^L (y_{T+l} - \hat y_{T+l})^2}$<br><br>
# where $T$ is the last observation period and $l$ is the lag.
RMSE1 = rmse(df['Money'][-nobs:], df_forecast['MoneyForecast'])
print(f'Money VAR(5) RMSE: {RMSE1:.3f}')
RMSE2 = rmse(df['Spending'][-nobs:], df_forecast['SpendingForecast'])
print(f'Spending VAR(5) RMSE: {RMSE2:.3f}')
# ## Let's compare these results to individual AR(5) models
from statsmodels.tsa.ar_model import AR,ARResults
# ### Money
modelM = AR(train['Money'])
AR5fit1 = modelM.fit(maxlag=5,method='mle')
print(f'Lag: {AR5fit1.k_ar}')
print(f'Coefficients:\n{AR5fit1.params}')
start=len(train)
end=len(train)+len(test)-1
z1 = pd.DataFrame(AR5fit1.predict(start=start, end=end, dynamic=False),columns=['Money'])
z1
# ### Invert the Transformation, Evaluate the Forecast
# +
# Add the most recent first difference from the training set to the forecast cumulative sum
z1['Money1d'] = (df['Money'].iloc[-nobs-1]-df['Money'].iloc[-nobs-2]) + z1['Money'].cumsum()
# Now build the forecast values from the first difference set
z1['MoneyForecast'] = df['Money'].iloc[-nobs-1] + z1['Money1d'].cumsum()
# -
z1
# +
RMSE3 = rmse(df['Money'][-nobs:], z1['MoneyForecast'])
print(f'Money VAR(5) RMSE: {RMSE1:.3f}')
print(f'Money AR(5) RMSE: {RMSE3:.3f}')
# -
# ## Personal Spending
modelS = AR(train['Spending'])
AR5fit2 = modelS.fit(maxlag=5,method='mle')
print(f'Lag: {AR5fit2.k_ar}')
print(f'Coefficients:\n{AR5fit2.params}')
z2 = pd.DataFrame(AR5fit2.predict(start=start, end=end, dynamic=False),columns=['Spending'])
z2
# ### Invert the Transformation, Evaluate the Forecast
# +
# Add the most recent first difference from the training set to the forecast cumulative sum
z2['Spending1d'] = (df['Spending'].iloc[-nobs-1]-df['Spending'].iloc[-nobs-2]) + z2['Spending'].cumsum()
# Now build the forecast values from the first difference set
z2['SpendingForecast'] = df['Spending'].iloc[-nobs-1] + z2['Spending1d'].cumsum()
# -
z2
# +
RMSE4 = rmse(df['Spending'][-nobs:], z2['SpendingForecast'])
print(f'Spending VAR(5) RMSE: {RMSE2:.3f}')
print(f'Spending AR(5) RMSE: {RMSE4:.3f}')
# -
# <strong>CONCLUSION:</strong> It looks like the VAR(5) model did <em>not</em> do better than the individual AR(5) models. That's ok - we know more than we did before. In the next section we'll look at VARMA and see if the addition of a $q$ parameter helps. Great work!
| tsa/jose/UDEMY_TSA_FINAL (1)/06-General-Forecasting-Models/08-Vector-AutoRegression-VAR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas_profiling
from sklearn.linear_model import LinearRegression
data1.isna().sum()
data1 = pd.read_csv("train.csv")
data2 = pd.read_csv('test.csv')
dt1=data1.dropna()
dt2=data2.dropna()
dt1.isnull().sum()
dt2.isnull().sum()
Lr = LinearRegression(normalize=True)
x_train = dt1[['x']]
y_train = dt1['y']
x_test = dt2[['x']]
y_test = dt2['y']
Lr.fit(x_train,y_train)
Train_Score = Lr.score(x_train,y_train)
Train_Score
Lr.fit(x_test,y_test)
Test_score = Lr.score(x_test,y_test)
Test_score
New_Pred = Lr.predict(x_test)
New_Pred
Data = pd.DataFrame({'New_Prediction':New_Pred,
'Actual_Data':y_test}, index=np.arange(300))
Data
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
r2_score(y_test,New_Pred)
mean_absolute_error(y_test,New_Pred)
mean_squared_error(y_test,New_Pred)
from sklearn.model_selection import cross_val_score
cv = cross_val_score(Lr,x_train,y_train,cv = 20)
cv
np.mean(cv)
np.max(cv)
np.min(cv)
plt.plot(y_test,New_Pred,'r')
sns.lmplot(x='Actual_Data',y='New_Prediction',data=Data)
Data.columns
| How to do already prepared train and test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this kernel used github repos [efficientdet-pytorch](https://github.com/rwightman/efficientdet-pytorch) and [pytorch-image-models](https://github.com/rwightman/pytorch-image-models) by [@rwightman](https://www.kaggle.com/rwightman). Don't forget add stars ;)
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# !pip install --no-deps '../input/timm-package/timm-0.1.26-py3-none-any.whl' > /dev/null
# !pip install --no-deps '../input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl' > /dev/null
# +
import sys
sys.path.insert(0, "../input/timm-efficientdet-pytorch")
sys.path.insert(0, "../input/omegaconf")
sys.path.insert(0, "../input/weightedboxesfusion")
from ensemble_boxes import *
import torch
import numpy as np
import pandas as pd
from glob import glob
from torch.utils.data import Dataset,DataLoader
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import cv2
import gc
from matplotlib import pyplot as plt
from effdet import get_efficientdet_config, EfficientDet, DetBenchEval
from effdet.efficientdet import HeadNet
# -
def get_valid_transforms():
return A.Compose([
A.Resize(height=512, width=512, p=1.0),
ToTensorV2(p=1.0),
], p=1.0)
# +
DATA_ROOT_PATH = '../input/global-wheat-detection/test'
class DatasetRetriever(Dataset):
def __init__(self, image_ids, transforms=None):
super().__init__()
self.image_ids = image_ids
self.transforms = transforms
def __getitem__(self, index: int):
image_id = self.image_ids[index]
image = cv2.imread(f'{DATA_ROOT_PATH}/{image_id}.jpg', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
if self.transforms:
sample = {'image': image}
sample = self.transforms(**sample)
image = sample['image']
return image, image_id
def __len__(self) -> int:
return self.image_ids.shape[0]
# +
dataset = DatasetRetriever(
image_ids=np.array([path.split('/')[-1][:-4] for path in glob(f'{DATA_ROOT_PATH}/*.jpg')]),
transforms=get_valid_transforms()
)
def collate_fn(batch):
return tuple(zip(*batch))
data_loader = DataLoader(
dataset,
batch_size=2,
shuffle=False,
num_workers=4,
drop_last=False,
collate_fn=collate_fn
)
# +
def load_net(checkpoint_path):
config = get_efficientdet_config('tf_efficientdet_d5')
net = EfficientDet(config, pretrained_backbone=False)
config.num_classes = 1
config.image_size=512
net.class_net = HeadNet(config, num_outputs=config.num_classes, norm_kwargs=dict(eps=.001, momentum=.01))
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['model_state_dict'])
del checkpoint
gc.collect()
net = DetBenchEval(net, config)
net.eval();
return net.cuda()
net = load_net('../input/wheat-effdet5-fold0-best-checkpoint/fold0-best-all-states.bin')
# +
def make_predictions(images, score_threshold=0.22):
images = torch.stack(images).cuda().float()
predictions = []
with torch.no_grad():
det = net(images, torch.tensor([1]*images.shape[0]).float().cuda())
for i in range(images.shape[0]):
boxes = det[i].detach().cpu().numpy()[:,:4]
scores = det[i].detach().cpu().numpy()[:,4]
indexes = np.where(scores > score_threshold)[0]
boxes = boxes[indexes]
boxes[:, 2] = boxes[:, 2] + boxes[:, 0]
boxes[:, 3] = boxes[:, 3] + boxes[:, 1]
predictions.append({
'boxes': boxes[indexes],
'scores': scores[indexes],
})
return [predictions]
def run_wbf(predictions, image_index, image_size=512, iou_thr=0.44, skip_box_thr=0.43, weights=None):
boxes = [(prediction[image_index]['boxes']/(image_size-1)).tolist() for prediction in predictions]
scores = [prediction[image_index]['scores'].tolist() for prediction in predictions]
labels = [np.ones(prediction[image_index]['scores'].shape[0]).tolist() for prediction in predictions]
boxes, scores, labels = weighted_boxes_fusion(boxes, scores, labels, weights=None, iou_thr=iou_thr, skip_box_thr=skip_box_thr)
boxes = boxes*(image_size-1)
return boxes, scores, labels
# +
import matplotlib.pyplot as plt
for j, (images, image_ids) in enumerate(data_loader):
break
predictions = make_predictions(images)
i = 0
sample = images[i].permute(1,2,0).cpu().numpy()
boxes, scores, labels = run_wbf(predictions, image_index=i)
boxes = boxes.astype(np.int32).clip(min=0, max=511)
fig, ax = plt.subplots(1, 1, figsize=(16, 8))
for box in boxes:
cv2.rectangle(sample, (box[0], box[1]), (box[2], box[3]), (1, 0, 0), 1)
ax.set_axis_off()
ax.imshow(sample);
# -
def format_prediction_string(boxes, scores):
pred_strings = []
for j in zip(scores, boxes):
pred_strings.append("{0:.4f} {1} {2} {3} {4}".format(j[0], j[1][0], j[1][1], j[1][2], j[1][3]))
return " ".join(pred_strings)
# +
results = []
for images, image_ids in data_loader:
predictions = make_predictions(images)
for i, image in enumerate(images):
boxes, scores, labels = run_wbf(predictions, image_index=i)
boxes = (boxes*2).astype(np.int32).clip(min=0, max=1023)
image_id = image_ids[i]
boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
result = {
'image_id': image_id,
'PredictionString': format_prediction_string(boxes, scores)
}
results.append(result)
# -
test_df = pd.DataFrame(results, columns=['image_id', 'PredictionString'])
test_df.to_csv('submission.csv', index=False)
test_df.head()
| efficientdet/notebooks/inference-efficientdet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Read realtime data from IOOS Sensor Map via ERDDAP tabledap
#
# Web Map Services are a great way to find data you may be looking for in a particular geographic area.
#
# Suppose you were exploring the [IOOS Sensor Map](https://via.hypothes.is/https://sensors.ioos.us/#map),
# and after selecting Significant Wave Height,
# had selected buoy 44011 on George's Bank:
#
# 
#
# You click the `ERDDAP` link and generate a URL to download the data as `CSV` .
#
# You notice that the URL that is generated
#
# [`https://erddap.axiomdatascience.com/erddap/tabledap/sensor_service.csvp?time,depth,station,parameter,unit,value&time>=2017-02-27T12:00:00Z&station="urn:ioos:station:wmo:44011"¶meter="Significant Wave Height"&unit="m"`](http://erddap.axiomdatascience.com/erddap/tabledap/sensor_service.csvp?time,depth,station,parameter,unit,value&time>=2017-02-27T12:00:00Z&station="urn:ioos:station:wmo:44011"¶meter="Significant Wave Height"&unit="m")
#
# is fairly easy to understand,
# and that a program could construct that URL fairly easily.
# Let's explore how that could work...
# +
import requests
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
def encode_erddap(urlbase, fname, columns, params):
"""
urlbase: the base string for the endpoint
(e.g.: https://erddap.axiomdatascience.com/erddap/tabledap).
fname: the data source (e.g.: `sensor_service`) and the response (e.g.: `.csvp` for CSV).
columns: the columns of the return table.
params: the parameters for the query.
Returns a valid ERDDAP endpoint.
"""
urlbase = urlbase.rstrip('/')
if not urlbase.lower().startswith(('http:', 'https:')):
msg = 'Expected valid URL but got {}'.format
raise ValueError(msg(urlbase))
columns = ','.join(columns)
params = urlencode(params)
endpoint = '{urlbase}/{fname}?{columns}&{params}'.format
url = endpoint(urlbase=urlbase, fname=fname,
columns=columns, params=params)
r = requests.get(url)
r.raise_for_status()
return url
# -
# Using the function we defined above, we can now bypass the forms and get the data by generating the URL "by hand". Below we have a query for `Significant Wave Height` from buoy `44011`, a buoy on George's Bank off the coast of Cape Cod, MA, starting at the beginning of the year 2017.
#
# \* For more information on how to use tabledap, please check the [NOAA ERDDAP documentation](https://via.hypothes.is/http://coastwatch.pfeg.noaa.gov/erddap/tabledap/documentation.html) for more information on the various parameters and responses of ERDDAP.
# +
try:
from urllib.parse import unquote
except ImportError:
from urllib2 import unquote
urlbase = 'https://erddap.axiomdatascience.com/erddap/tabledap'
fname = 'sensor_service.csvp'
columns = ('time',
'value',
'station',
'longitude',
'latitude',
'parameter',
'unit',
'depth')
params = {
# Inequalities do not exist in HTTP parameters,
# so we need to hardcode the `>` in the time key to get a '>='.
# Note that a '>' or '<' cannot be encoded with `urlencode`, only `>=` and `<=`.
'time>': '2017-01-00T00:00:00Z',
'station': '"urn:ioos:station:wmo:44011"',
'parameter': '"Significant Wave Height"',
'unit': '"m"',
}
url = encode_erddap(urlbase, fname, columns, params)
print(unquote(url))
# -
# Here is a cool part about ERDDAP `tabledap` - The data `tabledap` `csvp` response can be easily read by Python's pandas `read_csv` function.
# +
from pandas import read_csv
df = read_csv(url, index_col=0, parse_dates=True)
# Prevent :station: from turning into an emoji in the webpage.
df['station'] = df.station.str.split(':').str.join('_')
df.head()
# -
# With the `DataFrame` we can easily plot the data.
# +
# %matplotlib inline
ax = df['value'].plot(figsize=(11, 2.75), title=df['parameter'][0])
# -
# You may notice that slicing the time dimension on the sever side is very fast when compared with an OPeNDAP request. The downloading of the time dimension data, slice, and subsequent downloading of the actual data are all much faster.
#
# ERDDAP also allows for filtering of the variable's values. For example, let's get Wave Heights that are bigger than 6 meters starting from 2016.
#
# \*\* Note how we can lazily build on top of the previous query using Python's dictionaries.
# +
params.update(
{
'value>': 6,
'time>': '2016-01-00T00:00:00Z',
}
)
url = encode_erddap(urlbase, fname, columns, params)
df = read_csv(url, index_col=0, parse_dates=True)
# Prevent :station: from turning into an emoji in the webpage.
df['station'] = df.station.str.split(':').str.join('_')
df.head()
# -
# And now we can visualize the frequency of `Significant Wave Height` greater than 6 meters by month.
# +
def key(x):
return x.month
grouped = df['value'].groupby(key)
ax = grouped.count().plot.bar()
ax.set_ylabel('Significant Wave Height events > 6 meters')
m = ax.set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dec'])
# -
# Wow! Wintertime is pretty rough out on George's Bank!
#
# There is also a built-in relative time functionality so you can specify a specific time frame you look at. Here we demonstrate this part of the tool by getting the last 2 hours and displaying that with the `HTML` response in an `IFrame`.
# +
from IPython.display import HTML
fname = 'sensor_service.htmlTable'
params = {
'time>': 'now-2hours',
'time<': 'now',
'station': '"urn:ioos:station:nerrs:wqbchmet"',
'parameter': '"Wind Speed"',
'unit': '"m.s-1"'
}
url = encode_erddap(urlbase, fname, columns, params)
iframe = '<iframe src="{src}" width="650" height="370"></iframe>'.format
HTML(iframe(src=url))
# -
# `ERDDAP` responses are very rich. There are even multiple image formats in the automate graph responses.
# Here is how to get a `.png` file for the temperature time-series. While you can specify the width and height, we chose just an arbitrary size.
# +
fname = 'sensor_service.png'
params = {
'time>': 'now-7days',
'station': '"urn:ioos:station:wmo:44011"',
'parameter': '"Water Temperature"',
'unit': '"degree_Celsius"',
}
width, height = 450, 500
params.update(
{'.size': '{}|{}'.format(width, height)}
)
url = encode_erddap(urlbase, fname, columns, params)
iframe = '<iframe src="{src}" width="{width}" height="{height}"></iframe>'.format
HTML(iframe(src=url, width=width+5, height=height+5))
# -
# This example tells us it is rough and cold out on George's Bank!
#
# To explore more datasets, use the IOOS sensor map [website](https://sensors.ioos.us/#map)!
| notebooks/2017-03-21-ERDDAP_IOOS_Sensor_Map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="-UOOzCs9ukul"
project_name = "reco-tut-arr"; branch = "main"; account = "sparsh-ai"
# + colab={"base_uri": "https://localhost:8080/"} id="yjoT7OzOxK8t" executionInfo={"status": "ok", "timestamp": 1628007806152, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b7a0d74b-3e84-416f-fbfa-6ad72bb15dea"
import os
if not os.path.exists('/content/reco-tut-arr'):
# !cp /content/drive/MyDrive/mykeys.py /content
import mykeys
# !rm /content/mykeys.py
path = "/content/" + project_name;
# !mkdir "{path}"
# %cd "{path}"
import sys; sys.path.append(path)
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "reco-tut-arr"
# !git init
# !git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git
# !git pull origin "{branch}"
# !git checkout main
else:
# %cd '/content/reco-tut-arr'
# + id="ljYLIkBI_ijb"
# !git add . && git commit -m 'commit' && git push origin main
# + id="h75rrYXdzr6r"
import sys
sys.path.insert(0,f'/content/{project_name}/code')
from utils import *
# + id="J6GnSXizHC08"
import os
import numpy as np
from numpy import log, sqrt
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 1000)
# %matplotlib inline
# + id="Z12sTqxTDvMB"
vendors = pd.read_parquet('./data/bronze/vendors.parquet.gz')
orders = pd.read_parquet('./data/bronze/orders.parquet.gz')
train_customers = pd.read_parquet('./data/bronze/train_customers.parquet.gz')
train_locations = pd.read_parquet('./data/bronze/train_locations.parquet.gz')
test_customers = pd.read_parquet('./data/bronze/test_customers.parquet.gz')
test_locations = pd.read_parquet('./data/bronze/test_locations.parquet.gz')
# + [markdown] id="JU1LVY2kxw-3"
# ## Orders
# + [markdown] id="8Oc55GYj2XCl"
# ---
# + [markdown] id="gvU9Ydkixw-9"
# > Notes
# - Is **NOT** split into train/test
# - 135,233 orders
# - 131,942 made by customers in train_customers.csv
# - Other ~3k orders are ???
# - grand_total can be 0
# - vendor_discount_amount and promo_discount_percentage are mostly 0
# - vendor and driver ratings are mostly either 0 or 5
# - deliverydistance can be 0(?) and is at most ~20
# - delivery_date can be null but created_at is similar and never null
# - promo_code_discount_percentage is unreliable
#
# + [markdown] id="B0iJeFRU2X9V"
# ---
# + [markdown] id="CdNL1CLCxw-_"
# ### Check Some Values
# + id="lp6u7fUWxw_C"
# Train / Test split
train_orders = orders[orders['customer_id'].isin(train_customers['akeed_customer_id'])]
test_orders = orders[orders['customer_id'].isin(test_customers['akeed_customer_id'])]
# + id="B91O4RHExw_H"
# Remove duplicate customers and their orders
x = train_customers.groupby('akeed_customer_id').size()
duplicate_train_customers = train_customers[train_customers['akeed_customer_id'].isin(x[x>1].index)]['akeed_customer_id'].unique()
train_customers = train_customers[~train_customers['akeed_customer_id'].isin(duplicate_train_customers)]
train_orders = train_orders[~train_orders['customer_id'].isin(duplicate_train_customers)]
# + id="HvBRz9L-xw_K" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008214448, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="aa970576-1cce-4ac2-c49f-1367a9432600"
num_train_orders = orders[orders['customer_id'].isin(train_customers['akeed_customer_id'])].shape[0]
num_test_orders = orders[orders['customer_id'].isin(test_customers['akeed_customer_id'])].shape[0]
print(f'Num Orders: {orders.shape[0]}\nNum Train: {num_train_orders}\nNum Test: {num_test_orders}')
# + id="L89_w49gxw_N" colab={"base_uri": "https://localhost:8080/", "height": 343} executionInfo={"status": "ok", "timestamp": 1628008217935, "user_tz": -330, "elapsed": 727, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="741d5ef1-bace-4c86-ee8b-7a8c46ea6886"
train_orders.head(5)
# + id="-yZSHDkXxw_O" colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"status": "ok", "timestamp": 1628008227472, "user_tz": -330, "elapsed": 638, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a2dea60b-f766-4e02-e7c6-7a620555021e"
pd.concat([train_orders.dtypes.rename('dtype'), train_orders.isnull().sum().rename('num_null')], axis=1)
# + id="iX3Cp0SQxw_P" colab={"base_uri": "https://localhost:8080/", "height": 317} executionInfo={"status": "ok", "timestamp": 1628008233251, "user_tz": -330, "elapsed": 691, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="28cb544e-917e-4ccc-8beb-677834e78e97"
train_orders.describe()
# + id="W13Sb39Pxw_Q" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1628008236930, "user_tz": -330, "elapsed": 1014, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1e5e86db-593c-4017-c7ea-11f35f5b1409"
train_orders[train_orders['item_count'] < 20.5]['item_count'].hist(bins=20);
# + id="rf6amYX6xw_S" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1628008248835, "user_tz": -330, "elapsed": 729, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d042ddf7-bec1-420b-e743-425ffaf7d91d"
train_orders['vendor_rating'].hist();
# + id="rOfl9QO4xw_U" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1628008255097, "user_tz": -330, "elapsed": 1188, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="22e5bf5e-aaf5-4381-a44e-81442be602cc"
train_orders['driver_rating'].hist();
# + id="gi6u_t5Axw_V" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008261673, "user_tz": -330, "elapsed": 582, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="920b4327-290d-47a5-b4b7-98976af64064"
train_orders['is_favorite'].value_counts(dropna=False)
# + id="R6gW0ssmxw_X" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008270260, "user_tz": -330, "elapsed": 722, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="036a5ef7-e3d3-4ce8-8014-f0786ac7516d"
train_orders['is_rated'].value_counts(dropna=False)
# + id="A6pMO0w-xw_Z" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1628008272688, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ddc67810-ccdb-404f-f1e8-a605c72f6f0f"
train_orders['deliverydistance'].hist(bins=20);
# + id="m5shydknxw_a" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008280499, "user_tz": -330, "elapsed": 566, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="53adfc60-68ea-426f-f773-7f104a485c14"
train_orders['delivery_date'].isnull().value_counts(dropna=False)
# + id="Dhhi0sOexw_b" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008285309, "user_tz": -330, "elapsed": 586, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="5f27a088-d4af-41b0-bd16-c4f113f186fb"
train_orders['created_at'].isnull().value_counts(dropna=False)
# + [markdown] id="81DHXt5Rxw_c"
# ## Customers
# + [markdown] id="cTXX1I1x2xDH"
# ---
# + [markdown] id="nIAKmIzixw_g"
# > Notes
# - 34,467 customers
# - 26,741 have made at least 1 order
# - Most customers have only 1 location
# - Outliers in num_locations, dob
# - Constant columns: language
# + [markdown] id="CJOXBf1d20Ny"
# ---
# + [markdown] id="7juy79E3xw_h"
# ### Check Some Values
# + id="phmGRM9yxw_i" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008315952, "user_tz": -330, "elapsed": 622, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3774bd8e-dfe8-4f64-929d-d8354a44ce43"
train_customers.shape[0]
# + id="caKArvQxxw_j" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1628008316999, "user_tz": -330, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e60f6aa8-3d75-4524-ad76-b7d1c98748dc"
train_customers.head(5)
# + id="W8Lyk-EOxw_j" colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"status": "ok", "timestamp": 1628008317001, "user_tz": -330, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9b614021-0ea7-4cf0-e912-629ccb5f7cb7"
pd.concat([train_customers.dtypes.rename('dtype'), train_customers.isnull().sum().rename('num_null')], axis=1)
# + id="y_v21dpKxw_k"
# Add num_locations as new column in customer table
locations_customer_grp = train_locations.groupby(by=['customer_id'])
locations_per_customer = locations_customer_grp['location_number'].count().rename('num_locations')
train_customers = train_customers.merge(locations_per_customer, how='left', left_on='akeed_customer_id', right_index=True)
# + id="y5Bby0FIxw_k"
# Add num_orders as new column in customer table
orders_per_customer = train_orders.groupby('customer_id')['akeed_order_id'].count().rename('num_orders')
train_customers = train_customers.merge(orders_per_customer, how='left', left_on='akeed_customer_id', right_index=True)
# + id="sJeOdgaqxw_l" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008331129, "user_tz": -330, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ba9112f7-199d-4f9b-9187-b6574e9b9573"
train_customers[train_customers['num_orders'] < 1].shape[0]
# + id="7iQoqHoOxw_l" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008341964, "user_tz": -330, "elapsed": 732, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="276539ba-8e93-44bb-c64f-f340b2f804aa"
train_customers['num_orders'].value_counts(dropna=False).sort_index()[:5]
# + id="LbmZ3FQSxw_m" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008347319, "user_tz": -330, "elapsed": 572, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2b1992c6-0be9-429d-b6d7-0a11035f4dc1"
train_customers['num_orders'].isna().sum()
# + id="R5Bhe_VExw_n"
# Remove customers with no orders
train_customers = train_customers[train_customers['num_orders'] > 0]
# + id="HJQmEgKdxw_n" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008348286, "user_tz": -330, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b15d2965-3150-4e7e-8908-1d1c56b45435"
train_customers.shape[0]
# + id="WV2N3djZxw_o" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008349726, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7fc082de-3721-41f3-e89a-7edf9a4798ef"
train_customers['gender'].value_counts(dropna=False)
# + id="fBcFWwaexw_o" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008368237, "user_tz": -330, "elapsed": 611, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bdbe9bb6-5168-480e-f25a-813ade47bea4"
# Clean gender column and remove outliers
train_customers['gender'] = train_customers['gender'].str.strip()
train_customers['gender'] = train_customers['gender'].str.lower()
gender_filter = (train_customers['gender'] == 'male') | (train_customers['gender'] == 'female')
train_customers = train_customers[gender_filter]
train_customers['gender'].value_counts(dropna=False)
# + id="9jgKB53pxw_o" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008382485, "user_tz": -330, "elapsed": 1378, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="fdab15c2-e511-49c4-de07-01464d9b2073"
train_customers['language'].value_counts(dropna=False)
# + id="XM0Ks0-yxw_q" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628008385378, "user_tz": -330, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a75c8d22-39a7-477b-e817-cb65f17aea49"
ser = train_customers['created_at'] == train_customers['updated_at']
ser.value_counts(dropna=False)
# + [markdown] id="ZhzF9ujJxw_q"
# ## Vendors
#
# We should prioritize cleaning this table because it will likely be the most useful data for our model.
# + [markdown] id="Yeholxcq3K02"
# ---
# + [markdown] id="_MJSy8Wtxw_r"
# > Notes
# - 100 resturaunts
# - Outliers present in `latitude`, `longitude`, `preparation_time`, `discount_percentage`
# - Constant columns: `commission`, `open_close_flags`, `country_id`, `city_id`, `display_orders`, `one_click_vendor`, `is_akeed_delivering`, `language`
# - Nearly constant: `discount_percentage`
# - Columns (`status`, `verified`), (`vendor_category_en`, `vendor_category_id`) are almost equal
# - Median and Max `serving_distance` is 15
# - `rank` is either 1 or 11...?
# - `vendor_rating` has small variance
# - `vendor_tag` and `vendor_tag_name` are the same: lists of food types
# - Columns recording open/close times are confusing... What is the difference between 1 and 2 and `opening_time`?
# - `delivery_charge` is actually categorical
# + [markdown] id="i5nmrc5R3MeF"
# ---
# + [markdown] id="LG26Kqh0xw_s"
# ### Checking Some Values
# + colab={"base_uri": "https://localhost:8080/"} id="eU775Wi244Ur" executionInfo={"status": "ok", "timestamp": 1628010768538, "user_tz": -330, "elapsed": 549, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="feaf461e-8135-42a3-ef7a-430b52bd548c"
vendors = pd.read_parquet('./data/bronze/vendors.parquet.gz')
vendors.info()
# + id="NWRrlhSlxw_t" colab={"base_uri": "https://localhost:8080/", "height": 309} executionInfo={"status": "ok", "timestamp": 1628009539901, "user_tz": -330, "elapsed": 758, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7bf407ed-17ab-4221-aac7-57337ee3582d"
vendors.head(5)
# + id="yp89Yf7pxw_t" colab={"base_uri": "https://localhost:8080/", "height": 340} executionInfo={"status": "ok", "timestamp": 1628010774416, "user_tz": -330, "elapsed": 999, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bb9edb31-5a49-4aba-81c4-9a5edc46db25"
# Set id column to index
vendors.sort_values(by='id')
vendors, v_id_map, v_inv_map = integer_encoding(df=vendors, cols=['id'], drop_old=True, monotone_mapping=True)
vendors.set_index('id', inplace=True)
vendors.head()
# + id="nWKcffJ7xw_u" colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"status": "ok", "timestamp": 1628010774418, "user_tz": -330, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0ec14153-f7ad-49e0-d391-27be64f03661"
pd.concat([vendors.dtypes.rename('dtype'), vendors.isnull().sum().rename('num_null')], axis=1)
# + id="Sr0sx-7Exw_v" colab={"base_uri": "https://localhost:8080/", "height": 317} executionInfo={"status": "ok", "timestamp": 1628010774420, "user_tz": -330, "elapsed": 19, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ab413a1d-620e-40f0-d6cb-1c35ec6512d2"
vendors.describe()
# + id="ebPdclz9xw_w" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010777755, "user_tz": -330, "elapsed": 35, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4c54899f-be79-447e-8b4b-fdf029d28123"
vendors['is_akeed_delivering'].value_counts(dropna=False)
# + id="5g5Q4xDSxw_w" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010777756, "user_tz": -330, "elapsed": 21, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0b664c5b-c020-4213-8eab-11977845e208"
vendors['language'].value_counts(dropna=False)
# + id="GW-_FjH2xw_x" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010777757, "user_tz": -330, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="96112977-f1e9-4a86-cbd8-7eae1d3897f8"
vendors['one_click_vendor'].value_counts(dropna=False)
# + id="NSMYiQmgxw_y" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1628010777760, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bdf2385e-eb2a-4a80-fba5-06d6eaba37a3"
vendors['delivery_charge'].hist();
# + id="d7uhLWhLxw_y" colab={"base_uri": "https://localhost:8080/", "height": 266} executionInfo={"status": "ok", "timestamp": 1628010779164, "user_tz": -330, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="77ff2588-8546-4f78-a5a5-f5f604734f26"
vendors['serving_distance'].hist();
# + id="B-0D2rUExw_z" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1628010780183, "user_tz": -330, "elapsed": 33, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8ee759ef-06e5-4f8a-a65a-b86d93623fa2"
vendors['prepration_time'].hist();
# + id="VuLCcxBLxw_z" colab={"base_uri": "https://localhost:8080/", "height": 268} executionInfo={"status": "ok", "timestamp": 1628010780184, "user_tz": -330, "elapsed": 31, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="facec5f8-e1ac-4f0f-f4d1-2b41daa2fd0e"
vendors['discount_percentage'].hist();
# + id="qkLnuKqQxw_0" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010780186, "user_tz": -330, "elapsed": 25, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="fc298188-61fc-4ecf-8db4-fc8fafc33618"
vendors['rank'].value_counts(dropna=False)
# + id="fEHJbNOexw_0" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1628010784386, "user_tz": -330, "elapsed": 851, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3b3295a3-08a1-4ff1-9cfb-43ea96515db2"
vendors['vendor_rating'].hist();
# + id="ebFehzJ4xw_0" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010784387, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="5264ce41-2dd6-4221-bc26-2c74d043a3cc"
vendors['status'].value_counts(dropna=False)
# + id="lKo8wU65xw_1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010785219, "user_tz": -330, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="fb481187-fba3-414a-9d53-93353187ee9e"
vendors['verified'].value_counts(dropna=False)
# + id="2Le1DfgRxw_1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010785221, "user_tz": -330, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cbaed13e-ba82-4b95-9d66-7122826513ca"
vendors[vendors['verified'] == 0]['status'].value_counts(dropna=False)
# + id="WZ8DfDH4xw_1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010786076, "user_tz": -330, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="99f7c4f8-3cd8-4239-b776-96a413ec398b"
vendors['device_type'].value_counts(dropna=False)
# + id="g2Laxjehxw_2" colab={"base_uri": "https://localhost:8080/", "height": 148} executionInfo={"status": "ok", "timestamp": 1628010786077, "user_tz": -330, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="85ee3335-7447-40ed-f710-2bd4c3430676"
vendors[vendors.device_type == 1] # Is a location outlier...?
# + [markdown] id="B_ixru8Yxw_2"
# ### Cleaning Vendor Categories
# + id="PXRyEnjJxw_2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010787746, "user_tz": -330, "elapsed": 18, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b3986f0e-45b4-4afd-f3ae-5ef46c87d839"
vendors['vendor_category_en'].value_counts(dropna=False)
# + id="469HaU--xw_2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010787747, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e28ecec9-65aa-4a36-e5e8-a3b8b3f4f291"
vendors['vendor_category_id'].value_counts(dropna=False)
# + id="ntmtl_D7xw_3" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010787748, "user_tz": -330, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6df8672a-109c-45b3-951f-b76b0d12c6f2"
vendors[(vendors['vendor_category_en'] == "Sweets & Bakes") & (vendors['vendor_category_id'] == 3.0)].shape[0]
# + id="cwjKSc6Axw_3" colab={"base_uri": "https://localhost:8080/", "height": 165} executionInfo={"status": "ok", "timestamp": 1628010789928, "user_tz": -330, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c1b997f7-fe97-4e16-f959-2ab1033842a0"
vendors[(vendors['vendor_category_en'] == "Sweets & Bakes") & (vendors['vendor_category_id'] == 2.0)]
# + id="QXJFCFWBxw_3" executionInfo={"status": "ok", "timestamp": 1628010789929, "user_tz": -330, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Fix incorrect vendor_category_id
vendors.loc[28, 'vendor_category_id'] = 3.0
# + [markdown] id="TK99vLG5xw_3"
# ### Cleaning Vendor Tags
# + id="SYpExicnxw_4" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010794528, "user_tz": -330, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="42678b71-61a9-43ac-bb4d-3a5b7c5c2da6"
vendors['primary_tags'].value_counts(dropna=False)
# + id="caDUQdoqxw_4" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010796093, "user_tz": -330, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="82a5007c-c3e7-4fa6-bc91-691c62572671"
# Fill na with -1
# Strip unnecessary characters
vendors['primary_tags'] = vendors['primary_tags'].fillna("{\"primary_tags\":\"-1\"}").apply(lambda x: int(str(x).split("\"")[3]))
vendors['primary_tags'].value_counts(dropna=False).head(5)
# + id="LNwA4tlexw_4" colab={"base_uri": "https://localhost:8080/", "height": 580} executionInfo={"status": "ok", "timestamp": 1628010796766, "user_tz": -330, "elapsed": 23, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cf88f49e-fe23-4544-89b0-ac9b0b04f62c"
vendors[vendors['primary_tags'] == 134]
# + id="zRKPK4pDxw_7" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010798702, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="88d80a91-e465-4657-fea9-1bffa2a3a0b4"
# Fill na with -1
# Turn vendor_tag into list-valued
vendors['vendor_tag'] = vendors['vendor_tag'].fillna(str(-1)).apply(lambda x: x.split(",")).apply(lambda x: [int(i) for i in x])
vendors['vendor_tag'].head(10)
# + id="NuQFiZsExw_8" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010799539, "user_tz": -330, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d3efd459-a74d-49f7-a685-9640d47c85ad"
# Get unique vendor tags
# Map values to range(len(vendor_tags))
vendor_tags = [int(i) for i in vendors['vendor_tag'].explode().unique()]
vendor_tags.sort()
vendor_map = dict()
for i, tag in enumerate(vendor_tags):
vendor_map[tag] = i
vendors['vendor_tag'] = vendors['vendor_tag'].apply(lambda tags: [vendor_map[tag] for tag in tags])
vendors['vendor_tag'].head(10)
# + id="29J1YKvAxw_8" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010799540, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="94810dea-dc0d-4216-a65e-f2b55932d2d6"
# Combine status and verified features
vendors['status_and_verified'] = vendors['status'] * vendors['verified']
vendors['status_and_verified'].value_counts(dropna=False)
# + [markdown] id="--nZra3lxw_8"
# ### Creating Some Order-Based Features
# + id="RgrE-VQhxw_8" executionInfo={"status": "ok", "timestamp": 1628010801621, "user_tz": -330, "elapsed": 840, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Add num_orders, amt_sales, and avg_sale as new columns in vendor table
orders_vendor_grp = train_orders.groupby(by=['vendor_id'])
orders_per_vendor = orders_vendor_grp['akeed_order_id'].count().rename('num_orders')
grand_total_per_vendor = orders_vendor_grp['grand_total'].sum().rename('amt_sales')
vendors = vendors.merge(orders_per_vendor, how='left', left_on='id', right_index=True)
vendors = vendors.merge(grand_total_per_vendor, how='left', left_on='id', right_index=True)
vendors['avg_sale'] = vendors['amt_sales'] / vendors['num_orders']
# + id="98Ht3BHBxw_8" colab={"base_uri": "https://localhost:8080/", "height": 268} executionInfo={"status": "ok", "timestamp": 1628010801627, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1562a4c7-f720-4782-9c20-2dc1e4a4ae1d"
vendors['num_orders_log3'] = vendors['num_orders'].apply(log).apply(log).apply(log)
vendors['num_orders_log3'].hist();
# + id="sef9t0Ygxw_9" colab={"base_uri": "https://localhost:8080/", "height": 266} executionInfo={"status": "ok", "timestamp": 1628010803700, "user_tz": -330, "elapsed": 816, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="028b2c22-4811-4832-e58a-1c34ab284a5e"
vendors['amt_sales_log3'] = vendors['amt_sales'].apply(log).apply(log).apply(log)
vendors['amt_sales_log3'].hist();
# + id="nPQ2vtjTxw_9" colab={"base_uri": "https://localhost:8080/", "height": 266} executionInfo={"status": "ok", "timestamp": 1628010803702, "user_tz": -330, "elapsed": 18, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a824d8ca-ac0d-4f72-8d01-916b7dd7695d"
vendors['avg_sale_log'] = vendors['avg_sale'].apply(log)
vendors['avg_sale_log'].hist();
# + [markdown] id="hPOYG5UXxw_9"
# ### Transforming Location Outliers
# + id="eCdrZFt9xw_-" colab={"base_uri": "https://localhost:8080/", "height": 196} executionInfo={"status": "ok", "timestamp": 1628010806050, "user_tz": -330, "elapsed": 908, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bdc127ed-38e7-45b3-97fa-b85bdbed55e6"
# Examine the location outliers
vendors[vendors['latitude'] > 3]
# + id="UyKIIQrdxw_-" colab={"base_uri": "https://localhost:8080/", "height": 148} executionInfo={"status": "ok", "timestamp": 1628010806052, "user_tz": -330, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3871a675-84de-4756-f17e-4e67bf3dee03"
vendors[vendors['longitude'] > 3]
# + id="owWOl-P-xw_-" executionInfo={"status": "ok", "timestamp": 1628010806054, "user_tz": -330, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
orders_231 = train_orders[train_orders['vendor_id'] == 231]
orders_907 = train_orders[train_orders['vendor_id'] == 907]
orders_231 = orders_231.merge(train_locations, how='left', left_on=['customer_id', 'LOCATION_NUMBER'], right_on=['customer_id', 'location_number'])
orders_907 = orders_907.merge(train_locations, how='left', left_on=['customer_id', 'LOCATION_NUMBER'], right_on=['customer_id', 'location_number'])
# + colab={"base_uri": "https://localhost:8080/", "height": 694} id="WM5iBSGnAgHt" executionInfo={"status": "ok", "timestamp": 1628010871299, "user_tz": -330, "elapsed": 835, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="709cd5a6-177c-4374-9098-42601fe22ed0"
vendors.id
# + id="ajAREqjaxw_-" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628011278563, "user_tz": -330, "elapsed": 944, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a45fbcbd-d25e-45f7-ea2e-a35297df057a"
lat231 = vendors[vendors.index == v_id_map['id'][231]].latitude.item()
long231 = vendors[vendors.index == v_id_map['id'][231]].longitude.item()
lat907 = vendors[vendors.index == v_id_map['id'][907]].latitude.item()
long907 = vendors[vendors.index == v_id_map['id'][907]].longitude.item()
print(f'231 actual: \tLat = {lat231:.3f}, Long = {long231:.3f}')
print(f'231 estimate: \tLat = {orders_231.latitude.median():.3f}, Long = {orders_231.longitude.median():.3f}')
print(f'907 actual: \tLat = {lat907:.3f}, Long = {long907:.3f}')
print(f'907 estimate: \tLat = {orders_907.latitude.median():.3f}, Long = {orders_907.longitude.median():.3f}')
# + id="cLBjaesWxw_-" colab={"base_uri": "https://localhost:8080/", "height": 317} executionInfo={"status": "ok", "timestamp": 1628010810775, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b4f9d496-2e21-40fd-f9b6-5f4cfaea062d"
orders_907.describe()
# + id="F4_qdkPBxw__" colab={"base_uri": "https://localhost:8080/", "height": 317} executionInfo={"status": "ok", "timestamp": 1628009621680, "user_tz": -330, "elapsed": 731, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="edb674e5-211b-4962-d4a9-a4bc12022759"
orders_231.describe()
# + id="D1kTU7Aqxw__" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1628009623617, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="691f7a87-d4f4-4353-946e-fdb5fe31ce8e"
train_locations.head(5)
# + id="hDikXnA6xw__" colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"status": "ok", "timestamp": 1628009635573, "user_tz": -330, "elapsed": 782, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="695a4643-ebe7-4593-fff2-7d3502136c3b"
pd.concat([train_locations.dtypes.rename('dtype'), train_locations.isnull().sum().rename('num_null')], axis=1)
# + id="qfqQQfcpxw__"
# Aggregate # orders, $ sales, and avg spent by customer location
# (customers can have multiple locations registered to themselves)
orders_location_grp = train_orders.groupby(['customer_id', 'LOCATION_NUMBER'])
orders_per_location = orders_location_grp['akeed_order_id'].count().rename('num_orders') # multi index: [customer_id, LOCATION_NUMBER]
sales_per_location = orders_location_grp['grand_total'].sum().rename('amt_spent') # multi index: [customer_id, LOCATION_NUMBER]
train_locations = train_locations.merge(sales_per_location, how='left', left_on=['customer_id', 'location_number'], right_index=True)
train_locations = train_locations.merge(orders_per_location, how='left', left_on=['customer_id', 'location_number'], right_index=True)
train_locations['avg_spend'] = train_locations['amt_spent'] / train_locations['num_orders']
# + id="e6ICPnbbxw__"
# Filter locations which have not been ordered from
train_locations = train_locations[train_locations['num_orders'] != 0]
# + id="I0I-aDQaxxAA" colab={"base_uri": "https://localhost:8080/", "height": 317} executionInfo={"status": "ok", "timestamp": 1628009712973, "user_tz": -330, "elapsed": 784, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9743c964-1291-4e4b-c976-3382e5586fb1"
train_locations.describe()
# + id="qv2BRRizxxAA" colab={"base_uri": "https://localhost:8080/", "height": 247} executionInfo={"status": "ok", "timestamp": 1628009715177, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="461d2ed4-6dab-45fd-9c7b-51a21baab852"
train_locations[train_locations['amt_spent'] == 0].merge(train_orders, left_on=['customer_id', 'location_number'], right_on=['customer_id', 'LOCATION_NUMBER']).head(3) # Free orders
# + [markdown] id="WDiJNr4PxxAA"
# **Note from VariableDefinitions.txt:**
#
# "Not true latitude and longitude - locations have been masked, but nearby locations remain nearby in the new reference frame and can thus be used for clustering. However, not all locations are useful due to GPS errors and missing data - you may want to treat outliers separately."
#
# This will make our life difficult because we have no way of knowing how the location data has been transformed, thus it's not really clear how we should define "outlier".
#
# Almost all vendors are clustered very close to each other, but we will soon see that about 1/3rd of customer locations are "far" from this cluster.
#
# + id="7GYW6qgxxxAA" colab={"base_uri": "https://localhost:8080/", "height": 893} executionInfo={"status": "ok", "timestamp": 1628009726308, "user_tz": -330, "elapsed": 1735, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f5c13489-88eb-4c5b-efb1-28320b33d393"
# Map out customer locations and vendor locations
plt.figure(figsize=(15, 15))
plt.scatter(x=train_locations.longitude, y=train_locations.latitude, label='Customers', marker='s', alpha=0.2)
plt.scatter(x=vendors.longitude, y=vendors.latitude, label='Vendors', marker='*', alpha=0.5, s=vendors['num_orders']/5, c=vendors['avg_sale'], cmap='plasma')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.legend(loc='lower right')
plt.colorbar(label='$ Avg Sale')
plt.title('Customer + Vendor Locations')
plt.show()
# Stars:
# Size: Unpopular <----------> Popular
# Heat: Cheap <----------> Expensive
# + id="WXqaK7nPxxAA" colab={"base_uri": "https://localhost:8080/", "height": 893} executionInfo={"status": "ok", "timestamp": 1628009740616, "user_tz": -330, "elapsed": 2606, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7bdbbdc0-30af-40dc-e6bd-4d2d3b4abf3e"
# Outliers in location are probably a mistake (GPS error?)
# Zoom in on area close with most activity
# Marker Size = # Orders
# Color = $ Grand Total
lo = -5
hi = 5
filt1 = (lo < train_locations['longitude']) & (train_locations['longitude'] < hi)
filt2 = (lo < vendors['longitude']) & (vendors['longitude'] < hi)
train_locations_cut = train_locations[filt1]
vendors_cut = vendors[filt2]
plt.figure(figsize=(15, 15))
plt.scatter(x=train_locations_cut.longitude, y=train_locations_cut.latitude, label='Customers', marker='s', alpha=0.1)
plt.scatter(x=vendors_cut.longitude, y=vendors_cut.latitude, label='Vendors', marker='*', alpha=0.5, s=vendors_cut['num_orders']/7, c=vendors_cut['avg_sale'], cmap='plasma')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.legend(loc='lower right')
plt.colorbar(label='$ Avg Sale')
plt.title('Customer + Vendor Locations (Zoomed)')
plt.show()
# Stars:
# Size: Unpopular <----------> Popular
# Heat: Cheap <----------> Expensive
# + id="7iHRPRKpxxAB" colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"status": "ok", "timestamp": 1628009741516, "user_tz": -330, "elapsed": 35, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0c7cc5bd-4827-4444-a3b0-a93218300bae"
# Define outliers for customer locations
# There are a lot of customers that are outside the "vendor bubble"
# vendor bubble == customers that aren't outliers
lat_lo, lat_hi = -25, 25
long_lo, long_hi = -5, 5
c_outliers = (train_locations['latitude'] < lat_lo) | (train_locations['latitude'] > lat_hi) | (train_locations['longitude'] < long_lo) | (train_locations['longitude'] > long_hi)
v_outliers = (vendors['latitude'] < lat_lo) | (vendors['latitude'] > lat_hi) | (vendors['longitude'] < long_lo) | (vendors['longitude'] > long_hi)
train_locations[c_outliers].describe()
# + id="_l-rFnAExxAB" colab={"base_uri": "https://localhost:8080/", "height": 927} executionInfo={"status": "ok", "timestamp": 1628009765702, "user_tz": -330, "elapsed": 24218, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f9387c6f-6c05-47a9-9f34-880c6a82e2fc"
# Want to transform outliers so that they are closer to vendors, but also stay in their clusters
# Project outliers onto ellipse around bubble
lat_radius = lat_hi
long_radius = long_hi
# Project customer outliers
for i in tqdm(train_locations[c_outliers].index):
lat = train_locations.loc[i, 'latitude']
long = train_locations.loc[i, 'longitude']
mag = sqrt(lat**2 + long**2)
train_locations.loc[i, 'latitude'] = lat / mag * lat_radius
train_locations.loc[i, 'longitude'] = long / mag * long_radius
# Project vendor outliers
for i in tqdm(vendors[v_outliers].index):
lat = vendors.loc[i, 'latitude']
long = vendors.loc[i, 'longitude']
mag = sqrt(lat**2 + long**2)
vendors.loc[i, 'latitude'] = lat / mag * lat_radius
vendors.loc[i, 'longitude'] = long / mag * long_radius
plt.figure(figsize=(15, 15))
plt.scatter(x=train_locations.longitude, y=train_locations.latitude, label='Customers', marker='s', alpha=0.2)
plt.scatter(x=vendors.longitude, y=vendors.latitude, label='Vendors', marker='*', alpha=0.5, s=vendors['num_orders']/5, c=vendors['avg_sale'], cmap='plasma')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.legend(loc='lower right')
plt.colorbar(label='$ Avg Sale')
plt.title('Customer + Vendor Locations (Outliers Transformed)')
plt.show()
# + [markdown] id="BOXxbhnqxxAC"
# ### Drop Columns
# + id="UUFSJiWYxxAC"
# Throw away some columns
keep_continuous = ['latitude', 'longitude', 'serving_distance', 'prepration_time', 'vendor_rating', 'num_orders_log3', 'amt_sales_log3', 'avg_sale_log']
keep_categorical = ['vendor_category_id', 'delivery_charge', 'status', 'rank', 'primary_tags', 'vendor_tag']
keep_columns = keep_continuous + keep_categorical
vendors = vendors[keep_columns]
# + id="t-bbKDqAxxAC" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628009765708, "user_tz": -330, "elapsed": 28, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b2a2a09e-726e-4341-8905-22cf61212605"
vendors.isnull().sum()
# + [markdown] id="a67V-d7exxAC"
# ### Encode Categorical Features
# - `vendor_category_id` -> single binary variable -> remap to [0,1]
# - `delivery_charge` -> single binary variable -> remap to [0,1]
# - `status` -> single binary variable -> remap to [0,1]
# - `rank` -> single binary variable -> remap to [0,1]
# - `primary_tags` -> single multi-class variable -> remap to [0,C] -> one-hot encode in [0,C]
# - `vendor_tag` -> multiple binary variables -> one-to-many encode in [0,1]^C
# + id="7sPUz7MoxxAC" colab={"base_uri": "https://localhost:8080/", "height": 284} executionInfo={"status": "ok", "timestamp": 1628009768723, "user_tz": -330, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="af0dab94-ef34-43ba-ef14-1adca018c65f"
vendors, _, _ = integer_encoding(df=vendors, cols=['vendor_category_id', 'delivery_charge', 'status', 'rank', 'primary_tags'], drop_old=True, monotone_mapping=True)
vendors = multiclass_list_encoding(df=vendors, cols=['primary_tags', 'vendor_tag'], drop_old=True)
vendors.head(5)
# + [markdown] id="2Istf2YHxxAD"
# ## Represent Customers
# + [markdown] id="Jbdt3eiG8dNt"
# ---
# + [markdown] id="Yycg4K9dxxAD"
# > Notes
# - Need to construct training data.
# - First construct sequences of orders from users or user locations
# - Group orders by `customer_id`
# - Sort each group chronologically, by created_at
# - Collect each group into a list of ints
# + [markdown] id="j8KFM7E38gCW"
# ---
# + id="LEdTVAuAxxAD"
# Sort orders by datetime
train_orders['created_at'] = pd.to_datetime(train_orders['created_at'])
train_orders.sort_values(by=['created_at'], inplace=True)
# + id="hvxNCLIXxxAD" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628009797378, "user_tz": -330, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="99d28bc4-d96a-4b25-fa6f-a66a7e7b1710"
orders_grp = train_orders.groupby(by=['customer_id'])
orders_grp['vendor_id'].count().value_counts(normalize=True).head(5)
# + id="5vPZNSR2xxAE" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628009799042, "user_tz": -330, "elapsed": 943, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bd519c7f-d624-45bc-e91c-e7f96b4ec79c"
# Map vendor ids to range(0,num_vendors)
train_orders, v_id_map, v_inv_map = integer_encoding(df=train_orders, cols=['vendor_id'], drop_old=True, monotone_mapping=True)
# Group sequences by customer_id
train_sequences = get_sequences(df=train_orders, target='vendor_id', group_by=['customer_id'])
train_sequences.head(10)
# + id="l9l0nFq5xxAE"
# Represent customers as averages of the vendors they purchased from
train_customer_encoded = pool_encodings_from_sequences(sequences=train_sequences, pool_from=vendors)
# + id="bIC3lbt0xxAE" colab={"base_uri": "https://localhost:8080/", "height": 439} executionInfo={"status": "ok", "timestamp": 1628010128020, "user_tz": -330, "elapsed": 50, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="dfe6ef58-60cd-4476-bf7d-f343fd6ed6b6"
train_customer_encoded.head(10)
# + id="M7gcaYnmxxAE" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628010128022, "user_tz": -330, "elapsed": 47, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c32bbc88-4237-436b-dd3b-31dec22e78cb"
vendors[vendors.isna().sum(axis=1) > 0].isna().sum()
| _docs/nbs/reco-tut-arr-02-eda-part-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import itertools
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
df=pd.read_csv("news.csv")
df.shape
df.head()
df.tail()
#getting labels
labels=df.label
labels.head()
# SPLITTING THE DATA FOR TESTING AND TRAINING PHASE
x_train,x_test,y_train,y_test=train_test_split(df['text'], labels, test_size=0.2, random_state=7)
# initialize a TfidfVectorizer with stop words from the English language and a maximum document frequency of 0.7 (terms with a higher document frequency will be discarded). Stop words are the most common words in a language that are to be filtered out before processing the natural language data. And a TfidfVectorizer turns a collection of raw documents into a matrix of TF-IDF features.
#
# Now, fit and transform the vectorizer on the train set, and transform the vectorizer on the test set.
# +
#Initialize a TfidfVectorizer
tfid_vectorizer=TfidfVectorizer(stop_words="english",max_df=0.7)
# Fit and transform train set, transform test set
tfid_train=tfid_vectorizer.fit_transform(x_train)
tfid_test=tfid_vectorizer.transform(x_test)
# -
# fit transform autoscale the data by calculating mean and std
# transform only perform autoscaling
# Passive Aggressive algorithms are online learning algorithms. Such an algorithm remains passive for a correct classification outcome, and turns aggressive in the event of a miscalculation, updating and adjusting. Unlike most other algorithms, it does not converge. Its purpose is to make updates that correct the loss, causing very little change in the norm of the weight vector.
# **we will initialize a PassiveAggressiveClassifier.
# We will fit this on tfidf_train and y_train.
# Then, we will predict on the test set from the TfidfVectorizer and calculate the accuracy with accuracy_score() from sklearn.metrics.
#
#
#nitialize a PassiveAggressiveClassifier
classifier=PassiveAggressiveClassifier(max_iter=50)
classifier.fit(tfid_train,y_train)
#Predict on the test set and calculate accuracy
pred_y=classifier.predict(tfid_test)
score=accuracy_score(y_test,pred_y)
print(f'Accuracy: {round(score*100,2)}%')
print(pred_y)
print(y_test)
# *****confusion matrix to gain insight into the number of false and true negatives and positives.
confusion_matrix(y_test,pred_y,labels=["FAKE","REAL"])
# So with this model, we have 589 true positives, 588 true negatives, 49 false positives, and 41 false negatives.
| fknewsdetection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json as js
from tqdm import tqdm
from graphviz import Digraph
from IPython.display import HTML, SVG, display
with open("./math_gen2.json", "r") as file:
math_gen = js.load(file)
len(math_gen)
math_gen[0]
math_gen[1]
# Get unis
for person in tqdm(math_gen):
deg_uni_year_split = person["deg_uni_year"].split(" ")
person["uni"] = " ".join(deg_uni_year_split[1:-1])
# Get unis of students
def find_in_math_gen(idnum):
return [math]
not_found = []
multiple_found = []
for person in tqdm(math_gen):
for student in person["students"]:
# try to find the student in math gen
student_math_gens = [p for p in math_gen if p["id"] == student["link"].split("=")[-1]]
if len(student_math_gens) == 0:
not_found.append(student)
elif len(student_math_gens) > 1:
multiple_found.append(student)
else:
student["uni"] = student_math_gens[0]["uni"]
len(not_found)
len(multiple_found)
math_gen[1]
# Get advisor uni to student uni numbers
uni_to_uni = []
uni_to_uni_mult_found = []
for person in tqdm(math_gen):
# get all unis of students
student_unis = set([s["uni"] for s in person["students"] if "uni" in s.keys()])
for student_uni in student_unis:
# try to find uni pairing
unis = [u for u in uni_to_uni if u["from"] == person["uni"] and u["to"] == student_uni]
if len(unis) == 0:
uni_to_uni.append({"from": person["uni"], "to": student_uni, "num": 1})
elif len(unis) == 1:
unis[0]["num"] += 1
else:
uni_to_uni_mult_found.append([person, student_uni])
len(uni_to_uni_mult_found)
uni_to_uni.sort(key = lambda x: x["num"], reverse = True)
uni_to_uni[:10]
len(uni_to_uni)
# get all unis
unis = set([p["uni"] for p in math_gen])
# make graph viz
dot = Digraph(engine = "neato")
dot.graph_attr["rankdir"] = "LR"
# +
# for uni in unis:
# dot.node(uni)
# -
for edge in uni_to_uni[:50]:
dot.edge(edge["from"], edge["to"])
dot.format = 'svg'
style = "<style>svg{width:50% !important;height:50% !important;}</style>"
HTML(style)
dot
| math_genealogy/math_gen_uni_graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/zsteve/wtf/blob/main/examples/example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# + colab={"base_uri": "https://localhost:8080/"} id="plDsg87GT-n0" outputId="d7305c80-3b1f-463a-8b49-02c486e1d0e2"
# setup git
# !git clone https://github.com/zsteve/wtf
# !pip install tensorly
# !pip install pot
# !pip install gwot
# + id="12dlvsvK-l-8"
PLT_CELL = 2.5
figs_path = "/home/zsteve/wtf/manuscript/figs/"
# + id="AOtU3KTFUHqD"
import sys
sys.path.insert(0, "/home/zsteve/wtf/src")
import wtf
# + id="Zs2q-OEuTy3f"
import numpy as np
import copy
import tensorly as tl
from tensorly import tenalg, decomposition, cp_tensor
from tensorly.contrib.sparse import tensor as sptensor
import ot
import torch
import sklearn
from sklearn import datasets
import matplotlib.pyplot as plt
tl.set_backend("pytorch")
torch.set_default_tensor_type(torch.DoubleTensor)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tl_dtype = tl.float64
# +
# import matplotlib
# font = {'family' : 'normal',
# 'size' : 12}
# matplotlib.rc('font', **font)
# + id="JpOgQS6gTy3p"
def gaussian(x, h):
return np.exp(-(x/h)**2)
def gen_img(x0, x1, h = 0.25, r0 = 0.1, r1 = 0.1):
x = np.linspace(-1., 1., 32)
alpha = gaussian(x - x0 + r0*np.random.normal(), h)
beta = gaussian(x - x1 + r1*np.random.normal(), h)
return alpha, beta
sizex, sizey = (32, 32)
N = 100
factors1 = [gen_img(0, 0, h = 0.3, r0 = 0.15, r1 = 0.05) for x in range(N)]
factors2 = [gen_img(0.5, 0.5, h = 0.2, r0 = 0.15,r1 = 0.1) for x in range(N)]
factors3 = [gen_img(-0.5, -0.5, h = 0.15, r0 = 0.15, r1 = 0.15) for x in range(N)]
def outer(x):
return np.outer(x[0], x[1])
X = tl.tensor([wtf.normalise(outer(factors1[i]) + outer(factors2[i]) + outer(factors3[i])) for i in range(N)], dtype = tl_dtype)
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="ia85mMVETy3q" outputId="ed654405-f2ae-4cb7-9a6c-5420c492231b"
plt.imshow(tl.unfold(X, 0))
plt.axis("auto")
plt.colorbar()
# + id="cbgcinqwTy3r"
xx, yy = np.meshgrid(range(sizex), range(sizey))
coords = np.vstack((xx.reshape(1, sizex*sizey), yy.reshape(1, sizex*sizey))).T
C_full = ot.utils.euclidean_distances(coords, coords, squared=True)
C_full = torch.Tensor(C_full/C_full.mean()).to(device)
# +
# save SWIFT version
# idx = np.array(np.where(X > 0)).T
# vals = np.ones(idx.shape[0])
# vals = vals/vals.sum()
#
# out = np.hstack([idx, vals.reshape(-1, 1)])
# out[:, 0] += 1
#
# C = ot.utils.euclidean_distances(xx, xx, squared = True)
# C = C/C.mean()
# np.savetxt("X_slice.csv", out)
# np.savetxt("C_slice.csv", C)
# np.savetxt("C_images.csv", C_images)
# np.save("X_slice.npy", X)
# + id="c4ktiFfvTy3s"
# now pick multilinear rank
d = 3
r = [3, ]*3
S = tl.zeros(r).to(device)
for i in range(r[0]):
S[i, i, i] = 1
# initialise using SVD
factor_cp = tl.decomposition.non_negative_parafac(X, rank = r[0], n_iter_max = 0, init = "svd", random_state = 0)
A = copy.deepcopy(factor_cp.factors)
A = [a.to(device) for a in A]
X0 = X.to(device)
###
n_iter = 5
lr = np.ones(n_iter)*1
lamda = np.array([np.ones(3), ]*n_iter)*25
optim_modes = [0, ]
rho = np.array([np.array([0.01, 0.01, 0.01]), ]*n_iter)
eps = np.array([np.ones(3), ]*n_iter)*0.01
# -
import importlib
importlib.reload(wtf)
A[0] = (A[0].T/A[0].sum(1)).T
A[1] = A[1]/A[1].sum(0)
A[2] = A[2]/A[2].sum(0)
# + colab={"base_uri": "https://localhost:8080/"} id="eUkousDNTy3t" outputId="62d24c8c-8bc6-4b51-869e-797e4961418b"
dual_objs = [[], [], [], ]
max_iter, print_inter, check_iter, tol, unbal = (100, 10, 10, 1e-2, True)
mode = "lbfgs"
for i in range(n_iter):
print("Block iteration ", i)
print("Mode 0")
m0 = wtf.FactorsModel(X0, 0, [C_full, ], S, A, rho[i, :], eps[i, :], lamda[i, :], ot_mode = "slice", U_init = None, device = device, unbal = False, norm = "row")
dual_objs[0] += [wtf.solve(m0, lr = lr[i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol), ]
A[0] = m0.compute_primal_variable().detach()
print("Mode 1")
m1 = wtf.FactorsModel(X0, 1, [C_full, ], S, A, rho[i, :], eps[i, :], lamda[i, :], ot_mode = "slice", U_init = None, device = device, unbal = False, norm = "col")
dual_objs[1] += [wtf.solve(m1, lr = lr[i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol), ]
A[1] = m1.compute_primal_variable().detach()
print("Mode 2")
m2 = wtf.FactorsModel(X0, 2, [C_full, ], S, A, rho[i, :], eps[i, :], lamda[i, :], ot_mode = "slice", U_init = None, device = device, unbal = False, norm = "col")
dual_objs[2] += [wtf.solve(m2, lr = lr[i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol), ]
A[2] = m2.compute_primal_variable().detach()
# print("Core tensor")
# m = wtf.CoreModel(X0, None, A, rho[i, :], eps[i, :], lamda[i, :], optim_modes, ot_mode = "slice", C_full = C_full, device = device, unbal = unbal)
# wtf.solve(m, lr = lr[i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol)
# S = m.compute_primal_variable().detach()
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="5BVry3a1Kdng" outputId="7c7b90e0-1e7c-457c-e7e5-5fa2a6b59d6e"
plt.imshow(tl.unfold(S, 0).cpu())
plt.axis("auto")
# -
plt.scatter(np.arange(n_iter), dual_objs[0])
# + id="wKlqaJhZTy3t"
X_hat = tl.tenalg.multi_mode_dot(S, A).cpu()
factor_cp = tl.decomposition.non_negative_parafac(X, rank = r[0], init = "svd", n_iter_max = 500)
X_cp = tl.cp_tensor.cp_to_tensor(factor_cp)
# + colab={"base_uri": "https://localhost:8080/", "height": 341} id="SB0qsT8ATy3u" outputId="e22c1f51-84c4-4e19-cdbc-cd8bd1824562"
plt.figure(figsize = (15, 5))
plt.subplot(1, 3, 1)
plt.imshow(X_hat.reshape(-1, sizex*sizey).T, interpolation = "nearest")
plt.axis("auto")
plt.subplot(1, 3, 2)
plt.imshow(X_cp.reshape(-1, sizex*sizey).T, interpolation = "nearest")
plt.axis("auto")
plt.subplot(1, 3, 3)
plt.imshow(X.reshape(-1, sizex*sizey).T, interpolation = "nearest")
plt.axis("auto")
# + colab={"base_uri": "https://localhost:8080/", "height": 694} id="hZVzsW--Ty3u" outputId="44010ba9-de86-4fb7-98f0-055430575782"
plt.figure(figsize = (15, 5))
plt.suptitle("CP")
for i in range(3):
plt.subplot(1, 3, i+1)
plt.imshow(factor_cp.factors[i], interpolation = "nearest")
plt.axis("auto")
plt.colorbar()
plt.figure(figsize = (15, 5))
plt.suptitle("OT")
for i in range(3):
plt.subplot(1, 3, i+1)
plt.imshow(A[i].cpu(), interpolation = "nearest")
plt.axis("auto")
plt.colorbar()
# + colab={"base_uri": "https://localhost:8080/", "height": 218} id="QqrQPZvRvqoP" outputId="09c1b069-2a30-4f0b-b8ae-2a6a92b49b22"
plt.figure(figsize = (4/3*PLT_CELL, PLT_CELL))
plt.suptitle("Frobnenius-CP")
plt.subplot(1, 2, 1)
plt.title("$\\alpha_i$")
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factor_cp.factors[1][:, 0]), color = "red")
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factor_cp.factors[1][:, 1]), color = "green")
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factor_cp.factors[1][:, 2]), color = "blue")
plt.subplot(1, 2, 2)
plt.title("$\\beta_i$")
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factor_cp.factors[2][:, 0]), color = "red")
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factor_cp.factors[2][:, 1]), color = "green")
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factor_cp.factors[2][:, 2]), color = "blue")
plt.tight_layout()
plt.savefig(figs_path + "3mode_slice_atoms_cp.pdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 218} id="wNTsPBXzxuTJ" outputId="b6514126-02f0-4e8c-c8bc-fab5357aa393"
plt.figure(figsize = (4/3*PLT_CELL, PLT_CELL))
plt.subplot(1, 2, 1)
plt.suptitle("WTF")
plt.title("$\\alpha_i$")
plt.plot(np.linspace(-1, 1, 32), A[1][:, 0].cpu(), color = "red")
plt.plot(np.linspace(-1, 1, 32), A[1][:, 1].cpu(), color = "green")
plt.plot(np.linspace(-1, 1, 32), A[1][:, 2].cpu(), color = "blue")
plt.subplot(1, 2, 2)
plt.title("$\\beta_i$")
plt.plot(np.linspace(-1, 1, 32), A[2][:, 0].cpu(), color = "red")
plt.plot(np.linspace(-1, 1, 32), A[2][:, 1].cpu(), color = "green")
plt.plot(np.linspace(-1, 1, 32), A[2][:, 2].cpu(), color = "blue")
plt.tight_layout()
plt.savefig(figs_path + "3mode_slice_atoms_wtf.pdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="V8bsAscO7SGD" outputId="6272cc45-d0e4-4530-c437-73a9346e5fb7"
plt.figure(figsize = (4/3*PLT_CELL, PLT_CELL))
plt.suptitle("Raw")
plt.subplot(1, 2, 1)
plt.title("$\\alpha_i$")
for i in range(N):
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factors1[i][0]), color = "red", alpha = 0.1)
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factors2[i][0]), color = "green", alpha = 0.1)
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factors3[i][0]), color = "blue", alpha = 0.1)
plt.subplot(1, 2, 2)
plt.title("$\\beta_i$")
for i in range(N):
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factors1[i][1]), color = "red", alpha = 0.1)
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factors2[i][1]), color = "green", alpha = 0.1)
plt.plot(np.linspace(-1, 1, 32), wtf.normalise(factors3[i][1]), color = "blue", alpha = 0.1)
plt.tight_layout()
plt.savefig(figs_path + "3mode_slice_atoms_raw.pdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 200} id="nNS-sp3DADFa" outputId="5b252157-1641-4f5b-c004-68d6516c028b"
plt.figure(figsize = (PLT_CELL, PLT_CELL))
plt.title("Average")
plt.imshow(X.sum(0))
plt.axis("off")
plt.savefig(figs_path + "3mode_slice_avg.pdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 184} id="95Xh4Oy4WcSx" outputId="22ea561d-dc32-45ab-e8f8-206abeab48e3"
plt.figure(figsize = (2*PLT_CELL, PLT_CELL))
plt.suptitle("Sample observations")
for i in range(15):
plt.subplot(3, 5, i+1)
plt.axis("off")
plt.axis("auto")
plt.imshow(X[i, :, :])
plt.savefig(figs_path + "3mode_slice_sample.pdf")
# + id="4R5uuM2FTy3v"
d_ot = np.array([ot.emd2(wtf.normalise(X_hat[i, :, :].reshape(-1).double()), wtf.normalise(X[i, :, :].reshape(-1).double()), C_full.cpu()) for i in range(X.shape[0])])
d_baseline = np.array([ot.emd2(wtf.normalise(X_cp[i, :, :].reshape(-1).double()), wtf.normalise(X[i, :, :].reshape(-1).double()), C_full.cpu()) for i in range(X.shape[0])])
# + colab={"base_uri": "https://localhost:8080/"} id="o8lQ43ClTy3v" outputId="e0ba27c5-8422-4636-bc75-ff265ac381ac"
d_ot.mean(), d_baseline.mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="zEoEmUtFTy3w" outputId="c3237314-23b6-49b7-942b-13fef184bce3"
vmax = np.quantile(X, 0.995)
plt.figure(figsize = (10, 5))
plt.suptitle("CP")
for i in range(40):
plt.subplot(4, 10, i+1)
plt.imshow(X_cp[i, :].reshape(sizex, sizey), vmin = 0, vmax = vmax)
plt.figure(figsize = (10, 5))
plt.suptitle("True")
for i in range(40):
plt.subplot(4, 10, i+1)
plt.imshow(X[i, :].reshape(sizex, sizey), vmin = 0, vmax = vmax)
plt.figure(figsize = (10, 5))
plt.suptitle("OT")
for i in range(40):
plt.subplot(4, 10, i+1)
plt.imshow(X_hat[i, :].reshape(sizex, sizey), vmin = 0, vmax = vmax)
# + id="RhJVlT0__nsY"
# now try NMF with the same dataset
from sklearn import decomposition
r_nmf = [3, ]*2
S_nmf = tl.zeros(r_nmf).to(device)
for i in range(r_nmf[0]):
S_nmf[i, i] = 1
X0_nmf = tl.unfold(X, 0).to(device)
nmf_model = sklearn.decomposition.NMF(n_components = r_nmf[0], init = "nndsvd", max_iter = 1)
U_nmf = torch.Tensor(nmf_model.fit_transform(X0_nmf.cpu()))
V_nmf = torch.Tensor(nmf_model.components_)
U_nmf = (U_nmf.T/U_nmf.sum(1)).T
V_nmf = (V_nmf.T/V_nmf.sum(1)).T
A_nmf = copy.deepcopy([U_nmf, V_nmf.T])
A_nmf = [a.to(device) for a in A_nmf]
# + id="rvqBdyfDLG-o"
params_nmf = {"n_iter" : 10}
params_nmf['lr'] = np.ones(params_nmf['n_iter'])*1
params_nmf['lamda'] = np.array([np.ones(2), ]*params_nmf['n_iter'])*25
params_nmf['optim_modes'] = [0, ]
params_nmf['rho'] = np.array([np.ones(2), ]*params_nmf['n_iter'])*0.01
params_nmf['eps'] = np.array([np.ones(2), ]*params_nmf['n_iter'])*0.001
# -
import pykeops
from pykeops.torch import LazyTensor
x = torch.Tensor(coords).cuda()
x_i = LazyTensor(x.view(1, x.shape[0], x.shape[1]))
x_j = LazyTensor(x.view(x.shape[0], 1, x.shape[1]))
C_full = ((x_i - x_j)**2).sum(2)
C_full = C_full/((C_full @ torch.ones(C_full.shape[1], device = device)).sum()/np.prod(C_full.shape))
# + colab={"base_uri": "https://localhost:8080/"} id="IcIeYVtXLK8O" outputId="16b834d2-91e3-455a-d0cd-28e0a18fb115"
max_iter, print_inter, check_iter, unbal = (100, 10, 10, True)
tol = 1e-3
mode = "lbfgs"
for i in range(params_nmf['n_iter']):
print("Block iteration ", i)
print("Mode 0")
m0 = wtf.FactorsModel(X0_nmf, 0, [C_full, ], S_nmf, A_nmf, params_nmf['rho'][i, :], params_nmf['eps'][i, :], params_nmf['lamda'][i, :],
ot_mode = "slice", U_init = None, device = device, unbal = unbal, norm = "row")
wtf.solve(m0, lr = params_nmf['lr'][i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol)
A_nmf[0] = m0.compute_primal_variable().detach()
print("Mode 1")
m1 = wtf.FactorsModel(X0_nmf, 1, [C_full, ], S_nmf, A_nmf, params_nmf['rho'][i, :], params_nmf['eps'][i, :], params_nmf['lamda'][i, :],
ot_mode = "slice", U_init = None, device = device, unbal = unbal, norm = "col")
wtf.solve(m1, lr = params_nmf['lr'][i], mode = mode, max_iter = max_iter, print_inter = print_inter, check_iter = check_iter, tol = tol)
A_nmf[1] = m1.compute_primal_variable().detach()
# + id="lYV-4qWGLNVG"
X_hat_nmf = tl.tenalg.multi_mode_dot(S_nmf, A_nmf ).cpu()
nmf_model = sklearn.decomposition.NMF(n_components = r_nmf[0], init = "nndsvd")
U_nmf = torch.Tensor(nmf_model.fit_transform(X0_nmf.cpu()))
V_nmf = torch.Tensor(nmf_model.components_)
X_nmf = U_nmf @ V_nmf
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="xEX2hqWsLp-T" outputId="89891cf5-52a8-4721-cb03-715837cbdd6d"
plt.figure(figsize = (PLT_CELL*1.5, 0.75*PLT_CELL))
plt.suptitle("Frobenius-CP")
for i in range(r[0]):
plt.subplot(1, r[0], i+1)
plt.imshow(np.outer(factor_cp.factors[1][:, i], factor_cp.factors[2][:, i]))
plt.axis("off")
plt.tight_layout()
plt.savefig(figs_path + "3mode_slice_atoms2d_cp.pdf")
plt.figure(figsize = (PLT_CELL*1.5, 0.75*PLT_CELL))
plt.suptitle("WTF")
for i in range(r[0]):
plt.subplot(1, r[0], i+1)
plt.imshow(np.outer(A[1][:, i].cpu(), A[2][:, i].cpu()))
plt.axis("off")
plt.tight_layout()
plt.savefig(figs_path + "3mode_slice_atoms2d_wtf.pdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="LpjDKcaFM1sn" outputId="284002dd-6201-4185-9f05-37124f38994c"
plt.figure(figsize = (PLT_CELL*1.5, 0.75*PLT_CELL))
plt.suptitle("Frobenius-NMF")
for i in range(r[0]):
plt.subplot(1, r[0], i+1)
plt.imshow(V_nmf[i, :].reshape(sizex, sizey))
plt.axis("off")
plt.tight_layout()
plt.savefig(figs_path + "3mode_slice_atoms2d_nmf.pdf")
plt.figure(figsize = (PLT_CELL*1.5, 0.75*PLT_CELL))
plt.suptitle("W-NMF")
for i in range(r[0]):
plt.subplot(1, r[0], i+1)
plt.imshow(A_nmf[1][:, i].reshape(sizex, sizey).cpu())
plt.axis("off")
plt.tight_layout()
plt.savefig(figs_path + "3mode_slice_atoms2d_wnmf.pdf")
# + id="hLHcnbOQU1uJ"
| examples/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''qiskit'': conda)'
# name: python391jvsc74a57bd01c98fc4fe92390271d15ae28cc783178c5efb4069f3af669a34a587febea39ec
# ---
import numpy as np
from qore import Mine, ASP, QAOA, VQE
from qore.utils import measure_operator, get_bitstring_probabilities
from qore.algorithms.asp import construct_default_H_B
from qiskit.aqua import aqua_globals, QuantumInstance
from qiskit import Aer
# +
penalty = 5.0
# qmine = Mine(np.array([[-2.0, 3.0, -1.0, -2.0, -1.0], [float('inf'), 1.0, -5.0, 10.0, float('inf')], [float('inf'), float('inf'), 4.0, float('inf'), float('inf')]]))
#qmine = Mine('mine_config.txt')
qmine = Mine('mine_config_large.txt')
qmine.plot_mine()
# -
H = qmine.gen_Hamiltonian(penalty=penalty)
aqua_globals.random_seed = 1953
backend = Aer.get_backend('statevector_simulator')
instance = QuantumInstance(backend=backend)
def analysis(circuit):
x = get_bitstring_probabilities(circuit, instance, shots=1024)
# x_sorted = sorted(x.items(), key=lambda item: item[1])
# for i in range(5):
# print("The most probable configurations and the corresponding probabilities:")
# print(f"{x_sorted[i][0]}, {x_sorted[i][1]}")
bitstr, prob = max(x.items(), key=lambda item: item[1])
print(f"The most probable configuration and the corresponding probability: {bitstr, prob}")
print("------------------------------------------------------------")
# +
evol_time = 20
nsteps = 100
circuit = ASP(H,
evol_time=evol_time,
nsteps=nsteps,
callback=analysis,
callback_freq=40,
quantum_instance=instance).construct_circuit()
#analysis(circuit)
# -
s = '010011110'
qmine.plot_mine_state(s)
fs = ''.join(list(s)[::-1])
qmine.plot_mine_state(fs)
# ## Benchmark with pseudoflow
import networkx as nx
from networkx import Graph
import matplotlib.pyplot as plt
import pseudoflow
graph, source, sink = qmine.build_pseudoflow_graph()
from qore.algorithms.pseudoflow import Pseudoflow
pf = Pseudoflow(graph, source, sink)
res = pf.run()
qmine.plot_mine_state(res['ground_state'])
# +
# plot the pseudoflow graph
colors = ['g' for node in graph.nodes()]
pos = nx.spring_layout(graph)
default_axes = plt.axes(frameon=True)
nx.draw_networkx(graph, node_color=colors, node_size=600,
alpha=.8, ax=default_axes, pos=pos)
# -
| notebooks/asp_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/s98soni/app/blob/master/Untitled0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="y5EhUvkue_uI"
# Business problem
# Build ANN Model using hand written letter 'mnist' data set
# + id="dGukq4kcfFlS"
# Import required liberies
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import seaborn as sns
# + id="fMcRcAfuhog1"
#import dataset hand written rom keras
(x_train,y_train),(x_test,y_test) = keras.datasets.mnist.load_data()
# + colab={"base_uri": "https://localhost:8080/"} id="P4wRSISZildV" outputId="2dde976f-ef01-4d14-c1a0-bd8bcc176481"
#check data
print(len(x_train))
print(len(x_test))
print(len(y_train))
print(len(y_test))
# + colab={"base_uri": "https://localhost:8080/"} id="djYLLLEnjdTu" outputId="d304fd4d-2ae7-4eb4-c971-2ef5318a3702"
#check shape of data
x_train[1].shape
# + colab={"base_uri": "https://localhost:8080/"} id="xw6QQOB0kEJl" outputId="32548de2-0d41-4f94-c7ae-30efd615018a"
#check the value in data
x_train[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 292} id="qnOIuMrhl6IQ" outputId="ff7e8d68-f211-49af-bc7c-967ca5747c98"
# look the image of the picture
plt.matshow(x_train[10])
# + colab={"base_uri": "https://localhost:8080/"} id="ToQOE3t8l61M" outputId="a4a27913-dd8a-4d95-8b81-9b8609945f60"
# look at the y_train data
y_train[10]
# + colab={"base_uri": "https://localhost:8080/"} id="HUzPW41pl66K" outputId="09e52bbe-1b3e-4dc0-84e8-27b7a9094118"
# look at the 5 initial y_train values
y_train[:5]
# + colab={"base_uri": "https://localhost:8080/"} id="u38O3HTNmzQU" outputId="4c1d35a8-6350-4151-d047-de8178ceae9c"
#shape of train data
x_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="rfTQZ-hEmzXA" outputId="bd6b77f1-1fba-4e55-ff05-0afa46511f66"
x_test.shape
# + id="31sSJ2qeqmqO"
# since the highest vakue of my data is 255
# Divide entire data point by 255 for scaling the data
x_train =x_train/255
x_test = x_test/255
# + id="oHukk0lymzdN"
##convert train data as input data
x_train_flatten = x_train.reshape(len(x_train),28*28)
x_test_flatten = x_test.reshape(len(x_test),28*28)
# + colab={"base_uri": "https://localhost:8080/"} id="B6nKCJtKmziV" outputId="1301a7a0-4358-4fd2-a17f-ee6dd31f77cf"
#check flatten data
print(x_train_flatten.shape)
print(x_test_flatten.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="yt3Y0rGBmznU" outputId="3530c0e9-86fc-4d8f-fe75-b98727b6a3bb"
#check reshape value of any image
print(x_train_flatten[1])
# + colab={"base_uri": "https://localhost:8080/"} id="4FVK283aoNM7" outputId="77975dc3-0c3a-4a62-80d8-1a423616336d"
print(x_test_flatten[1])
# + id="1ancya6poSMt"
#create a simple ANN model
#will use categorical cross entropy loss function
#matrics 'accuracy'
# + id="iMhWTcOUolpr"
model = keras.Sequential([
keras.layers.Dense(10, input_shape=(784,),activation='sigmoid')
])
# + id="fIDz-2PwpIIc"
# compile model (optimizer, matrics, loss)
model.compile(
optimizer='adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy']
)
# + colab={"base_uri": "https://localhost:8080/"} id="ixOBIKUspJT1" outputId="0da53980-5293-43ac-91b0-09603a5ad571"
# fit model
model.fit(x_train_flatten,y_train,epochs=5)
# + colab={"base_uri": "https://localhost:8080/"} id="OrvDOnvLrPOv" outputId="8cf0f521-bac6-499c-b5ae-12ba9bd53766"
#verify the test data
model.evaluate(x_test_flatten,y_test)
# + id="82vJs__Trrgf"
#predict the test data
y_pred = model.predict(x_test_flatten)
# + colab={"base_uri": "https://localhost:8080/"} id="t7qP9BMKqYzD" outputId="3148ba81-2002-49c4-99fc-e402da5f968d"
y_pred[0]
# + colab={"base_uri": "https://localhost:8080/"} id="UZ-yuItSsW3m" outputId="fd534c6a-8813-42eb-9e30-da87ab078c34"
#numpy has the argmax function which return the highest value
np.argmax(y_pred[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 292} id="I7TJUEBnsXFz" outputId="5e7c1cda-0dc9-4d4a-f75d-5d3ca946d9ac"
#checl actual x_test
plt.matshow(x_test[0])
# + id="XXnoO523tAgC"
#convert entire predicted result into whole number
y_pred_labels = [np.argmax(i) for i in y_pred]
# + colab={"base_uri": "https://localhost:8080/"} id="W48UAhvsthDb" outputId="5ce88500-53b5-4ae4-ff6f-749de9e58b0a"
y_pred_labels[:5]
# + colab={"base_uri": "https://localhost:8080/"} id="MCwBcY9-thau" outputId="c82f7fc9-b269-44ad-a833-61f24b2250c2"
#create confusion matrix
cm = tf.math.confusion_matrix(labels=y_test,predictions=y_pred_labels)
cm
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="mxZnoI8CuEZN" outputId="7d0d6952-7902-467b-98bc-d70a9e04bf6a"
# use heat map
sns.heatmap(cm,annot=True,fmt='d')
plt.xlabel('Predicter')
plt.ylabel('Actual')
# + id="OmKp41FRudOY"
from sklearn.metrics import classification_report
# + colab={"base_uri": "https://localhost:8080/"} id="-2rTUXBBurPv" outputId="1a4a47a4-f994-4658-8d92-69d34a9bafb9"
print(classification_report(y_test,y_pred_labels))
# + id="OOrfzmKOurhX"
| Untitled0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
Correlation between features using Seaborn
==============================================================
Plotting the correlation between all features using Seaborn.
"""
# Authors: <NAME>
import seaborn as sns;
sns.set(style="ticks", color_codes=True)
# Importing iris datasest
iris = sns.load_dataset("iris")
# Plotting Seaborn pairplot with linear regresssion
g = sns.pairplot(iris, hue="species", kind="reg")
# -
| bivariate analysis/01_Correlation between features-iris-seaborn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# -
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # NVTabular demo on RecSys2020 Challenge
#
# ## Overview
#
# NVTabular is a feature engineering and preprocessing library for tabular data designed to quickly and easily manipulate terabyte scale datasets used to train deep learning based recommender systems. It provides a high level abstraction to simplify code and accelerates computation on the GPU using the RAPIDS cuDF library.
#
# ### RecSys2020 Challenge
#
# The [RecSys](https://recsys.acm.org/) conference is the leading data science conference for recommender systems and organizes an annual competiton in recommender systems. The [RecSys Challenge 2020](https://recsys-twitter.com/), hosted by Twitter, was about predicting interactions of ~200 mio. tweet-user pairs. NVIDIA's team scored 1st place. The team explained the solution in the [blogpost](https://medium.com/rapids-ai/winning-solution-of-recsys2020-challenge-gpu-accelerated-feature-engineering-and-training-for-cd67c5a87b1f) and published the code on [github](https://github.com/rapidsai/deeplearning/tree/main/RecSys2020).
#
# ### Downloading the dataset
#
# The dataset has to be downloaded from the original source, provided by Twitter. You need to create an account on the [RecSys Challenge 2020 website](https://recsys-twitter.com/). Twitter needs to (manually) approve your account, which can take a few days. After your account is approved, you can download the data [here](https://recsys-twitter.com/data/show-downloads). We will use only the `training.tsv` file, as we cannot make submissions anymore.
#
# ### Learning objectives
#
# This notebook covers the end-2-end pipeline, from loading the original .tsv file to training the models, with NVTabular, [cuDF](https://github.com/rapidsai/cudf), [dask](https://dask.org/) and [XGBoost](https://xgboost.readthedocs.io/). We demonstrate multi-GPU support for NVTabular and new `nvt.ops`, implemented based on the success of our RecSys2020 solution.
# 1. **NVTabular** to preprocess the original .tsv file.
# 2. **dask_cudf** to split the preprocessed data into a training and validation set.
# 3. **NVTabular** to create additional features with only **~70 lines of code**.
# 4. **dask_cudf** / **XGBoost on GPU** to train our model.
# ## Getting Started
# +
# External Dependencies
import os
import time
import glob
import gc
import cupy as cp # CuPy is an implementation of NumPy-compatible multi-dimensional array on GPU
import cudf # cuDF is an implementation of Pandas-like Dataframe on GPU
import rmm # library for pre-allocating memory on GPU
import dask # dask is an open-source library to nateively scale Python on multiple workers/nodes
import dask_cudf # dask_cudf uses dask to scale cuDF dataframes on multiple workers/nodes
import numpy as np
# NVTabular is the core library, we will use here for feature engineering/preprocessing on GPU
import nvtabular as nvt
import xgboost as xgb
# More dask / dask_cluster related libraries to scale NVTabular
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from dask.distributed import wait
from dask.utils import parse_bytes
from dask.delayed import delayed
from nvtabular.utils import device_mem_size
# -
# Let's have a short look on the libary versions and setup, we will use.
# We ran this example with 2x Quadro GV100 GPUs with each having 32GB of GPU memory. NVTabular used a single GPU whereas XGBoost trained with two GPUs.
time_total_start = time.time()
# We define our input directory containing the data, and output directory to save the processed files.
INPUT_DATA_DIR = os.environ.get('INPUT_DATA_DIR', '/dataset/')
OUTPUT_DIR = os.environ.get('OUTPUT_DIR', './')
# First, we initalize our local cuda cluster.
cluster = LocalCUDACluster(
protocol="tcp"
)
client = Client(cluster)
client
# ## Preparing our dataset
# The original data format had multiple inefficiencies, resulting in requiring more disk space and memory:
# 1. The file format is `.tsv`, an uncompressed, text-based format. The `parquet` file format stores tabular data in a compressed, column-oriented format. This saves a significant amount of disk space which results in fewer i/o operations and faster execution.
# 2. Some categorical features, such as tweet_id, user_id are hashed to String values (e.g. `cfcd208495d565ef66e7dff9f98764da`). These long Strings require significant amount of disk space/memory. We can encode the Categories as Integer values using `Categorify`. Representing the String `cfcd208495d565ef66e7dff9f98764da` as an Integer `0` can save up 90% in memory. Although other categorical features are no long hashes, they are still Strings (e.g. tweet_type) and we will represent them as Integers, as well.
# 3. In our experiments, the text_tokens were not a significant feature and we will drop the column before we split the data.
# First, we define the column names in the original .tsv file. The .tsv file has no header and we need to specify the names.
features = [
'text_tokens', ###############
'hashtags', #Tweet Features
'tweet_id', #
'media', #
'links', #
'domains', #
'tweet_type', #
'language', #
'timestamp', ###############
'a_user_id', ###########################
'a_follower_count', #Engaged With User Features
'a_following_count', #
'a_is_verified', #
'a_account_creation', ###########################
'b_user_id', #######################
'b_follower_count', #Engaging User Features
'b_following_count', #
'b_is_verified', #
'b_account_creation', #######################
'b_follows_a', #################### Engagement Features
'reply', #Target Reply
'retweet', #Target Retweet
'retweet_comment',#Target Retweet with comment
'like', #Target Like
####################
]
# We define two helper function, we apply in our NVTabular workflow:
# 1. splitmedia2 splits the entries in media by `\t` and keeps only the first two values (if available),
# 2. count_token counts the number of token in a column (e.g. how many hashtags are in a tweet),
# +
def splitmedia(col):
if col.shape[0] == 0:
return(col)
else:
return(col.str.split('\t', expand=True)[0].fillna('') + '_' + col.str.split('\t', expand=True)[1].fillna(''))
def count_token(col,token):
not_null = col.isnull()==0
return ((col.str.count(token)+1)*not_null).fillna(0)
# -
# We will define our data processing pipeline with NVTabular.<br><br>
# We count the number of tokens in the columns hashtags, domains, links. We use the `count_token` helper function in a `lambda` function. Finally, we rename the column names to avoid duplicated names.
count_features = (
nvt.ColumnGroup(['hashtags', 'domains', 'links']) >>
(lambda col: count_token(col,'\t')) >>
nvt.ops.Rename(postfix = '_count_t')
)
# We apply splitmedia function to split the media.
split_media = nvt.ColumnGroup(['media']) >> (lambda col: splitmedia(col))
# We encode categorical columns as a small, continuous integer to save memory. Some categorical columns contain long hashes of type String as values to preserve the privacy of the users (e.g. userId, language, etc.). Long hashes of type String requires significant amount of memory to store. We encode/map the Strings to continuous Integer to save significant memory.
# <br><br>
# Before we can apply `Categorify`, we need to fill na/missing values in the columns `hashtags`, `domains` and `links`.
multihot_filled = ['hashtags', 'domains', 'links'] >> nvt.ops.FillMissing()
cat_features = (
split_media + multihot_filled + ['language', 'tweet_type', 'tweet_id', 'a_user_id', 'b_user_id'] >>
nvt.ops.Categorify(out_path=OUTPUT_DIR)
)
# We want to fill na/missing values in the label columns as well.
label_name = ['reply', 'retweet', 'retweet_comment', 'like']
label_name_feature = label_name >> nvt.ops.FillMissing()
# We extract the weekday from the timestamp.
weekday = (
nvt.ColumnGroup(['timestamp']) >>
(lambda col: cudf.to_datetime(col, unit='s').dt.weekday) >>
nvt.ops.Rename(postfix = '_wd')
)
# We can visualize our pipeline.
output = count_features+cat_features+label_name_feature+weekday
(output).graph
# Our calculation workflow looks correct. But we want to keep columns, which are not used in our pipeline (for `a_follower_count` or `b_follows_a`. Therefore, we include all columns in features, which are not part of our pipeline (except of `text_tokens`).
remaining_columns = [x for x in features if x not in (output.columns+['text_tokens'])]
remaining_columns
# We initialize our NVTabular workflow.
proc = nvt.Workflow(output+remaining_columns)
# We initialize a nvt.Dataset. The engine is `csv` as the `.tsv` file has a similar structure. The `.tsv` file uses the special character `\x01` to separate columns. There is no header in the file and we define column names with the parameter `names`.
trains_itrs = nvt.Dataset(INPUT_DATA_DIR + 'training.tsv',
header=None,
names=features,
engine='csv',
sep='\x01',
part_size='1GB')
# First, we collect the training dataset statistics.
# +
# %%time
time_preproc_start = time.time()
proc.fit(trains_itrs)
time_preproc = time.time()-time_preproc_start
# -
# Next, we apply the transformation to the dataset and persist it to disk.<br><br>
# We define the output datatypes for continuous columns to save memory. We can define the output datatypes as a dict and parse it to the `to_parquet` function.
dict_dtypes = {}
for col in label_name + ['media', 'language', 'tweet_type', 'tweet_id',
'a_user_id', 'b_user_id', 'hashtags', 'domains',
'links', 'timestamp', 'a_follower_count',
'a_following_count', 'a_account_creation',
'b_follower_count', 'b_following_count', 'b_account_creation']:
dict_dtypes[col] = np.uint32
# +
# %%time
time_preproc_start = time.time()
proc.transform(trains_itrs).to_parquet(output_path=OUTPUT_DIR + 'preprocess/', dtypes=dict_dtypes, out_files_per_proc=10)
time_preproc += time.time()-time_preproc_start
# -
# We can take a look in the output folder.
# ## Splitting dataset into training and test
# We split the training data by time into a train and validation set. The first 5 days are train and the last 2 days are for validation. We use the weekday for it. The first day of the dataset is a Thursday (weekday id = 3) and the last day is Wednesday (weekday id = 2). Therefore, we split the weekday ids 1 and 2 into the validation set.
# +
# %%time
time_split_start = time.time()
df = dask_cudf.read_parquet(os.path.join(OUTPUT_DIR, 'preprocess/*.parquet'))
if 'text_tokens' in list(df.columns):
df = df.drop('text_tokens', axis=1)
VALID_DOW = [1, 2]
valid = df[df['timestamp_wd'].isin(VALID_DOW)].reset_index(drop=True)
train = df[~df['timestamp_wd'].isin(VALID_DOW)].reset_index(drop=True)
train = train.sort_values(["b_user_id", "timestamp"]).reset_index(drop=True)
valid = valid.sort_values(["b_user_id", "timestamp"]).reset_index(drop=True)
train.to_parquet(OUTPUT_DIR + 'nv_train/')
valid.to_parquet(OUTPUT_DIR + 'nv_valid/')
time_split = time.time()-time_split_start
del train; del valid
gc.collect()
# -
# ## Feature Engineering
# Now, we can apply the actual feature engineering. We define our data pipelines.
# We count encode the columns *media*, *tweet_type*, *language*, *a_user_id*, *b_user_id*. CountEncoding is explained [here](https://github.com/rapidsai/deeplearning/blob/main/RecSys2020Tutorial/03_4_CountEncoding.ipynb).
count_encode = (
['media', 'tweet_type', 'language', 'a_user_id', 'b_user_id'] >> nvt.ops.Rename(postfix="_c") >>
nvt.ops.JoinGroupby(cont_cols=['reply'],stats=["count"], out_path='./')
)
# We transform timestamp to datetime type.
datetime = nvt.ColumnGroup(['timestamp']) >> (lambda col: cudf.to_datetime(col.astype('int32'), unit='s'))
# We extract hour from datetime.<br>
# We extract minute from datetime.<br>
# We extract seconds from datetime
hour = datetime >> (lambda col: col.dt.hour) >> nvt.ops.Rename(postfix = '_hour')
minute = datetime >> (lambda col: col.dt.minute) >> nvt.ops.Rename(postfix = '_minute')
seconds = datetime >> (lambda col: col.dt.second) >> nvt.ops.Rename(postfix = '_second')
# We difference encode *b_follower_count, b_following_count, language* grouped by *b_user_id*. DifferenceEncoding is explained [here](https://github.com/rapidsai/tdeeplearning/blob/main/RecSys2020Tutorial/05_2_TimeSeries_Differences.ipynb). First, we need to transform the datatype to float32 to prevent overflow/underflow. After DifferenceEncoding, we want to fill NaN values with 0.
diff_lag = (
nvt.ColumnGroup(['b_follower_count','b_following_count','language']) >>
(lambda col: col.astype('float32')) >>
nvt.ops.DifferenceLag(partition_cols=['b_user_id'], shift = [1, -1]) >>
nvt.ops.FillMissing(fill_val=0)
)
# We need to transform the LABEL_COLUMNS into boolean (0/1) targets.
LABEL_COLUMNS = ['reply', 'retweet', 'retweet_comment', 'like']
labels = nvt.ColumnGroup(LABEL_COLUMNS) >> (lambda col: (col>0).astype('int8'))
# We apply TargetEncoding with kfold of 5 and smoothing of 20. TargetEncoding is explained in [here](https://medium.com/rapids-ai/target-encoding-with-rapids-cuml-do-more-with-your-categorical-data-8c762c79e784) and [here](https://github.com/rapidsai/deeplearning/blob/main/RecSys2020Tutorial/03_3_TargetEncoding.ipynb)
target_encode = (
['media', 'tweet_type', 'language', 'a_user_id', 'b_user_id',
['domains','language','b_follows_a','tweet_type','media','a_is_verified']] >>
nvt.ops.TargetEncoding(
labels,
kfold=5,
p_smooth=20,
out_dtype="float32",
)
)
# We visualize our NVTabular workflow.
output = count_encode+hour+minute+seconds+diff_lag+labels+target_encode
(output).graph
# We want to keep all columns of the input dataset. Therefore, we extract all column names from the first input parquet file.
df_tmp = cudf.read_parquet(OUTPUT_DIR + '/nv_train/part.0.parquet')
all_input_columns = df_tmp.columns
del df_tmp
gc.collect()
remaining_columns = [x for x in all_input_columns if x not in (output.columns+['text_tokens'])]
remaining_columns
# We initialize our NVTabular workflow and add the "remaining" columns to it.
proc = nvt.Workflow(output+remaining_columns)
# We initialize the train and valid as NVTabular datasets.
train_dataset = nvt.Dataset(glob.glob(OUTPUT_DIR + 'nv_train/*.parquet'),
engine='parquet',
part_size="2GB")
valid_dataset = nvt.Dataset(glob.glob(OUTPUT_DIR + 'nv_valid/*.parquet'),
engine='parquet',
part_size="2GB")
# We collect statistics from our train dataset.
# +
# %%time
time_fe_start = time.time()
proc.fit(train_dataset)
time_fe = time.time()-time_fe_start
# -
# The columns *a_is_verified*, *b_is_verified* and *b_follows_a* have the datatype boolean. XGBoost does not support boolean datatypes and we need convert them to int8. We can define the output datatypes as a dict and parse it to the `.to_parquet` function.
dict_dtypes = {}
for col in ['a_is_verified','b_is_verified','b_follows_a']:
dict_dtypes[col] = np.int8
# We apply the transformation to the train and valid datasets.
# +
# %%time
time_fe_start = time.time()
proc.transform(train_dataset).to_parquet(output_path=OUTPUT_DIR + 'nv_train_fe/', dtypes=dict_dtypes)
proc.transform(valid_dataset).to_parquet(output_path=OUTPUT_DIR + 'nv_valid_fe/', dtypes=dict_dtypes)
time_fe += time.time()-time_fe_start
# -
# ## Training our model
# After the preprocessing and feature engineering is done, we can train a model to predict our targets. We load our datasets with `dask_cudf`.
train = dask_cudf.read_parquet(os.path.join(OUTPUT_DIR,'nv_train_fe/*.parquet'))
valid = dask_cudf.read_parquet(os.path.join(OUTPUT_DIR, 'nv_valid_fe/*.parquet'))
train[['a_is_verified','b_is_verified','b_follows_a']].dtypes
# Some columns are only used for feature engineering. Therefore, we define the columns we want to ignore for training.
dont_use =[
'__null_dask_index__',
'text_tokens',
'timestamp',
'a_account_creation',
'b_account_creation',
'hashtags',
'tweet_id',
'links',
'domains',
'a_user_id',
'b_user_id',
'timestamp_wd',
'timestamp_to_datetime',
'a_following_count_a_ff_rate',
'b_following_count_b_ff_rate'
]
dont_use = [x for x in train.columns if x in dont_use]
label_names = ['reply', 'retweet', 'retweet_comment', 'like']
# We drop the columns, which are not required for training.
# Our experiments show that we require only 10% of the training dataset. Our feature engineering, such as TargetEncoding, uses the training datasets and leverage the information of the full dataset. In the competition, we trained our models with higher ratio (20% and 50%), but we could not observe an improvement in performance.<br><br>
# We sample the training dataset to 10% of the size and drop all columns, which we do not want to use.
# +
SAMPLE_RATIO = 0.1
SEED = 1
if SAMPLE_RATIO < 1.0:
train['sample'] = train['tweet_id'].map_partitions(lambda cudf_df: cudf_df.hash_encode(stop=10))
print(len(train))
train = train[train['sample']<10*SAMPLE_RATIO]
train, = dask.persist(train)
print(len(train))
Y_train = train[label_names]
Y_train, = dask.persist(Y_train)
train = train.drop(['sample']+label_names+dont_use,axis=1)
train, = dask.persist(train)
print('Using %i features'%(train.shape[1]))
# -
# Similar to the training dataset, our experiments show that 35% of our validation dataset is enough to get a good estimate of the performance metric. 35% of the validation dataset has a similar size as the test set of the RecSys2020 competition.
# +
SAMPLE_RATIO = 0.35
SEED = 1
if SAMPLE_RATIO < 1.0:
print(len(valid))
valid['sample'] = valid['tweet_id'].map_partitions(lambda cudf_df: cudf_df.hash_encode(stop=10))
valid = valid[valid['sample']<10*SAMPLE_RATIO]
valid, = dask.persist(valid)
print(len(valid))
Y_valid = valid[label_names]
Y_valid, = dask.persist(Y_valid)
valid = valid.drop(['sample']+label_names+dont_use,axis=1)
valid, = dask.persist(valid)
# -
# We initialize our XGBoost parameter.
# +
print('XGB Version',xgb.__version__)
xgb_parms = {
'max_depth':8,
'learning_rate':0.1,
'subsample':0.8,
'colsample_bytree':0.3,
'eval_metric':'logloss',
'objective':'binary:logistic',
'tree_method':'gpu_hist',
'predictor' : 'gpu_predictor'
}
# -
train,valid = dask.persist(train,valid)
# We train our XGBoost models. The challenge requires to predict 4 targets, does a user
# 1. like a tweet
# 2. reply a tweet
# 3. comment a tweet
# 4. comment and reply a tweet
# We train 4x XGBoost models for 300 rounds on a GPU.
# +
# %%time
time_train_start = time.time()
NROUND = 300
VERBOSE_EVAL = 50
preds = []
for i in range(4):
name = label_names[i]
print('#'*25);print('###',name);print('#'*25)
start = time.time(); print('Creating DMatrix...')
dtrain = xgb.dask.DaskDMatrix(client,data=train,label=Y_train.iloc[:, i])
print('Took %.1f seconds'%(time.time()-start))
start = time.time(); print('Training...')
model = xgb.dask.train(client, xgb_parms,
dtrain=dtrain,
num_boost_round=NROUND,
verbose_eval=VERBOSE_EVAL)
print('Took %.1f seconds'%(time.time()-start))
start = time.time(); print('Predicting...')
preds.append(xgb.dask.predict(client,model,valid))
print('Took %.1f seconds'%(time.time()-start))
del model, dtrain
time_train = time.time()-time_train_start
# -
yvalid = Y_valid[label_names].values.compute()
oof = cp.array([i.values.compute() for i in preds]).T
yvalid.shape
# The hosts of the RecSys2020 competition provide code for calculating the performance metric `PRAUC` and `RCE`. We optimized the code to speed up the calculation, as well. Using cuDF / cupy, we calculate the performance metric on the GPU.
# +
from sklearn.metrics import auc
def precision_recall_curve(y_true,y_pred):
y_true = y_true.astype('float32')
ids = cp.argsort(-y_pred)
y_true = y_true[ids]
y_pred = y_pred[ids]
y_pred = cp.flip(y_pred,axis=0)
acc_one = cp.cumsum(y_true)
sum_one = cp.sum(y_true)
precision = cp.flip(acc_one/cp.cumsum(cp.ones(len(y_true))),axis=0)
precision[:-1] = precision[1:]
precision[-1] = 1.
recall = cp.flip(acc_one/sum_one,axis=0)
recall[:-1] = recall[1:]
recall[-1] = 0
n = (recall==1).sum()
return precision[n-1:],recall[n-1:],y_pred[n:]
def compute_prauc(pred, gt):
prec, recall, thresh = precision_recall_curve(gt, pred)
recall, prec = cp.asnumpy(recall), cp.asnumpy(prec)
prauc = auc(recall, prec)
return prauc
def log_loss(y_true,y_pred,eps=1e-7, normalize=True, sample_weight=None):
y_true = y_true.astype('int32')
y_pred = cp.clip(y_pred, eps, 1 - eps)
if y_pred.ndim == 1:
y_pred = cp.expand_dims(y_pred, axis=1)
if y_pred.shape[1] == 1:
y_pred = cp.hstack([1 - y_pred, y_pred])
y_pred /= cp.sum(y_pred, axis=1, keepdims=True)
loss = -cp.log(y_pred)[cp.arange(y_pred.shape[0]), y_true]
return _weighted_sum(loss, sample_weight, normalize).item()
def _weighted_sum(sample_score, sample_weight, normalize):
if normalize:
return cp.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return cp.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def compute_rce_fast(pred, gt):
cross_entropy = log_loss(gt, pred)
yt = cp.mean(gt).item()
# cross_entropy and yt are single numbers (no arrays) and using CPU is fast
strawman_cross_entropy = -(yt*np.log(yt) + (1 - yt)*np.log(1 - yt))
return (1.0 - cross_entropy/strawman_cross_entropy)*100.0
# -
# Finally, we calculate the performance metric PRAUC and RCE for each target.
txt = ''
for i in range(4):
prauc = compute_prauc(oof[:,i], yvalid[:, i])
rce = compute_rce_fast(oof[:,i], yvalid[:, i]).item()
txt_ = f"{label_names[i]:20} PRAUC:{prauc:.5f} RCE:{rce:.5f}"
print(txt_)
txt += txt_ + '\n'
time_total = time.time()-time_total_start
print('Total time: {:.2f}s'.format(time_total))
print()
print('1. Preprocessing: {:.2f}s'.format(time_preproc))
print('2. Splitting: {:.2f}s'.format(time_split))
print('3. Feature engineering: {:.2f}s'.format(time_fe))
print('4. Training: {:.2f}s'.format(time_train))
| examples/winning-solution-recsys2020-twitter/01-02-04-Download-Convert-ETL-with-NVTabular-Training-with-XGBoost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import torch_geometric as tg
from scipy.sparse import csr_matrix
from sklearn.model_selection import train_test_split
import torch
import matplotlib.pyplot as plt
from torch_geometric.data import Data
from torch.nn import functional as F
from torch_geometric.nn import GCNConv, ChebConv, GAE, VGAE, GATConv, AGNNConv
from torch_geometric.utils import dropout_adj
import torch_geometric.transforms as T
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from collections import Counter
import torch.nn as nn
npzfile = np.load("../data/trade_savez_files.npz", allow_pickle=True)
#_ = npzfile.seek(0)
npzfile.files
node_attributes = npzfile['attr_data']
attribute_shape = npzfile['attr_shape']
trade_adj = npzfile['sparse_adj_trade']
dist_adj = npzfile['sparse_adj_dists']
class_labels = npzfile['labels']
class_names = npzfile['class_names']
dist_data_adj = dist_adj.tolist()
dist_edge_attr = dist_data_adj.data
dsrc, dtar = dist_data_adj.nonzero()[0], dist_data_adj.nonzero()[1]
dist_edge_index = torch.tensor([dsrc, dtar], dtype = torch.long)
trade_data_adj = trade_adj.tolist()
trade_edge_attr = torch.tensor(trade_data_adj.data, dtype = torch.float32)
tsrc, ttar = trade_data_adj.nonzero()[0], trade_data_adj.nonzero()[1]
node_attributes =torch.tensor(node_attributes, dtype = torch.float32)
trade_edge_index = torch.tensor([tsrc, ttar], dtype = torch.long)
y = torch.tensor(class_labels, dtype = torch.long)
# +
n = len(node_attributes)
test_size = int(n * 0.3)
train_idx, test_idx = train_test_split(range(len(node_attributes)), test_size=test_size, random_state=42)
trade_data = Data(x = node_attributes, y = y, edge_index = trade_edge_index, edge_attr = trade_edge_attr)
test_size = int(len(trade_data.x) * 0.20) # Use 70% for training and 30% for testing
trade_data.train_idx = torch.tensor(train_idx, dtype=torch.long)
trade_data.test_idx = torch.tensor(test_idx, dtype=torch.long)
trade_data.train_mask = torch.cat((torch.zeros(test_size, dtype=torch.uint8),
torch.ones(n - test_size, dtype=torch.uint8)))
# trade_data.val_mask = torch.cat((torch.zeros(train_mask_size, dtype=torch.uint8),
# torch.ones(val_mask_size,dtype=torch.uint8),
# torch.zeros(test_mask_size ,dtype=torch.uint8)))
trade_data.test_mask = torch.cat((torch.zeros(n - test_size, dtype=torch.uint8),
torch.ones(test_size, dtype=torch.uint8)))
trade_data.num_classes = trade_data.y.max() + 1
# -
def classifier_train_test(model_name, input_data, epochs = 1000, lr = 0.01, weight_decay = 0.0005):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device: '.ljust(32), device)
print('Model Name: '.ljust(32), str(model_name.__name__))
print('Model params:{:19} lr: {:.4f} weight_decay: {:.4f}'.format('',lr, weight_decay))
print('Total number of epochs to run: '.ljust(32), epochs)
print('*' * 65)
data = input_data.clone().to(device)
infeat = data.num_node_features
outfeat = data.num_classes.item()
model = model_name(infeat, outfeat).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = lr, weight_decay = weight_decay)
train_losses, test_losses = [], []
accs = []
best_val_acc = 0
best_train_loss = 0
best_test_loss = 0
best_epoch = 0
model.train()
for epoch in range(1, epochs + 1):
train_loss = 0
test_loss = 0
optimizer.zero_grad()
out = model(data)
train_loss = F.nll_loss(out[data.train_idx], data.y[data.train_idx])
train_losses.append(train_loss.item())
train_loss.backward()
optimizer.step()
model.eval()
test_out = model(data)
#_ ,pred = model(data).max(dim = 1)
test_loss = F.nll_loss(test_out[data.test_idx], data.y[data.test_idx])
test_losses.append(test_loss)
_ ,pred = test_out.max(dim = 1)
correct = float(pred[data.test_idx].eq(data.y[data.test_idx]).sum().item())
acc = correct/ len(data.test_idx)
tune.track.log(mean_accuracy=acc)
if best_val_acc < acc:
best_val_acc = acc
best_epoch = epoch
best_train_loss = train_loss
best_test_loss = test_loss
log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
accs.append(acc)
if (epoch % int(epochs/10) == 0):
print('Epoch: {} Train loss: {:.4f} Test loss: {:.4f} Test Accuracy: {:.4f}'.format(epoch, train_loss, test_loss, acc))
if (epoch == epochs):
print('-'*65,'\nFinal epoch: {} Train loss: {:.4f} Test loss: {:.4f} Test Accuracy: {:.4f}'.format(epoch, train_loss, test_loss, acc))
print('-'*65)
print('\033[1mBest Accuracy\nEpoch: {} Train loss: {:.4f} Test loss: {:.4f} Test Accuracy: {:.4f}\n'
.format(best_epoch, best_train_loss, best_test_loss, best_val_acc))
fig = plt.figure(figsize = (12,5))
ax1 = fig.add_subplot(121)
ax1.plot(range(1, epochs + 1) , train_losses, label = 'Train loss')
ax1.plot(range(1, epochs + 1) , test_losses, label = 'Test loss')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss')
ax1.set_title('Learning curve during training and testing')
ax2 = fig.add_subplot(122)
ax2.plot(range(1, epochs + 1) ,accs, label = 'Accuracy')
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracy')
ax2.set_title('A plot of accuracy per epoch')
plt.show()
def run_GAE(input_data, epochs = 1000, lr = 0.01, weight_decay = 0.0005):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device: '.ljust(32), device)
print('Model Name: '.ljust(32), 'GAE')
print('Model params:{:19} lr: {:.4f} weight_decay: {:.4f}'.format('',lr, weight_decay))
print('Total number of epochs to run: '.ljust(32), epochs)
print('*' * 70)
data = input_data.clone().to(device)
in_channels = data.num_features
out_channels = data.num_classes.item()
model = GAE(GAEncoder(in_channels, out_channels)).to(device)
data = trade_data.clone().to(device)
split_data = model.split_edges(data)
x, train_pos_edge_index, edge_attr = split_data.x.to(device), split_data.train_pos_edge_index.to(device), data.edge_attr.to(device)
split_data.train_idx = split_data.test_idx = data.y = None
optimizer = torch.optim.Adam(model.parameters(), lr = lr, weight_decay = weight_decay)
train_losses, test_losses = [], []
aucs = []
aps = []
model.train()
for epoch in range(1, epochs+1):
train_loss = 0
test_loss = 0
optimizer.zero_grad()
z = model.encode(x, train_pos_edge_index)
train_loss = model.recon_loss(z, train_pos_edge_index)
train_losses.append(train_loss)
train_loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
z = model.encode(x, train_pos_edge_index)
auc, ap = model.test(z, split_data.test_pos_edge_index, split_data.test_neg_edge_index)
test_loss = model.recon_loss(z, data.test_pos_edge_index)
test_losses.append(test_loss.item())
aucs.append(auc)
aps.append(ap)
if (epoch % int(epochs/10) == 0):
print('Epoch: {} Train loss: {:.4f} Test loss: {:.4f} AUC: {:.4f} AP: {:.4f}'.format(epoch, train_loss, test_loss, auc, ap))
if (epoch == epochs):
print('-'*65,'\nFinal epoch: {} Train loss: {:.4f} Test loss: {:.4f} AUC: {:.4f} AP: {:.4f}'.format(epoch, train_loss, test_loss, auc, ap))
print('-'*65)
fig = plt.figure(figsize = (12,5))
ax1 = fig.add_subplot(121)
ax1.plot(range(1, epochs + 1) , train_losses, label = 'Train loss')
ax1.plot(range(1, epochs + 1) , test_losses, label = 'Test loss')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Reconstruction loss on train and test')
ax1.set_title('Learning curve for the Graph autoencoder')
ax2 = fig.add_subplot(122)
ax2.plot(range(1, epochs + 1) , aucs, label = 'AUC')
ax2.plot(range(1, epochs + 1) , aps, label = 'AP')
ax2.set_xlabel('Epochs')
ax2.set_ylabel('AUC / AP')
ax2.set_title('AUCs and APs on test sets')
plt.show()
return
def run_baseline_GAE():
return
def run_VGAE(input_data, epochs = 1000, lr = 0.01, weight_decay = 0.0005):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device: '.ljust(32), device)
print('Model Name: '.ljust(32), 'VGAE')
print('Model params:{:19} lr: {:.4f} weight_decay: {:.4f}'.format('',lr, weight_decay))
print('Total number of epochs to run: '.ljust(32), epochs)
print('*' * 70)
data = input_data.clone().to(device)
model = VGAE(VGAEncoder(data.num_features, data.num_classes.item())).to(device)
data = model.split_edges(data)
x, train_pos_edge_index, edge_attr = data.x.to(device), data.train_pos_edge_index.to(device), data.edge_attr.to(device)
data.train_idx = data.test_idx = data.y = None
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
train_losses = []
test_losses = []
aucs = []
aps = []
model.train()
for epoch in range(1, epochs + 1):
train_loss, test_loss = 0, 0
optimizer.zero_grad()
z = model.encode(x, train_pos_edge_index)
train_loss = model.recon_loss(z, train_pos_edge_index) + (1 / data.num_nodes) * model.kl_loss()
train_losses.append(train_loss.item())
train_loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
z = model.encode(x, train_pos_edge_index)
auc, ap = model.test(z, data.test_pos_edge_index, data.test_neg_edge_index)
test_loss = model.recon_loss(z, data.test_pos_edge_index) + (1 / data.num_nodes) * model.kl_loss()
test_losses.append(test_loss.item())
aucs.append(auc)
aps.append(ap)
#print('AUC: {:.4f}, AP: {:.4f}'.format(auc, ap))
if (epoch % int(epochs/10) == 0):
print('Epoch: {} Train loss: {:.4f} Test loss: {:.4f} AUC: {:.4f} AP: {:.4f}'.format(epoch, train_loss, test_loss, auc, ap))
if (epoch == epochs):
print('-'*65,'\nFinal epoch: {} Train loss: {:.4f} Test loss: {:.4f} AUC: {:.4f} AP: {:.4f}'.format(epoch, train_loss, test_loss, auc, ap))
print('-'*65)
fig = plt.figure(figsize = (12,5))
ax1 = fig.add_subplot(121)
ax1.plot(range(1, epochs + 1) , train_losses, label = 'Train loss')
ax1.plot(range(1, epochs + 1) , test_losses, label = 'Test loss')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Reconstruction loss')
ax1.set_title('Learning curve for the Variational Graph autoencoder')
ax2 = fig.add_subplot(122)
ax2.plot(range(1, epochs + 1) , aucs, label = 'AUC')
ax2.plot(range(1, epochs + 1) , aps, label = 'Average Precision score')
ax2.set_xlabel('Epochs')
ax2.set_ylabel('AUC / AP')
ax2.set_title('AUCs and Average Precision scores on test sets')
plt.show()
# ## Define Classification models for training
class GCNet(nn.Module):
def __init__(self, in_channels, out_channels):
super(GCNet, self).__init__()
self.conv1 = GCNConv(in_channels, 2 * in_channels)#, cached=True)
self.conv2 = GCNConv(2 * in_channels, out_channels)#data.num_classes)#, cached=True)
# self.conv1 = ChebConv(data.num_features, 16, K=2)
# self.conv2 = ChebConv(16, data.num_features, K=2)
def forward(self, data):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.conv1(x, edge_index))#, edge_weight))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)#, edge_weight)
return F.log_softmax(x, dim=1)
# +
class ChebyNet(nn.Module):
def __init__(self, in_channels, out_channels):
super(ChebyNet, self).__init__()
# self.conv1 = GCNConv(in_channels, 64)#, cached=True)
# self.conv2 = GCNConv(64, out_channels=num_classes)#data.num_classes)#, cached=True)
self.conv1 = ChebConv(in_channels, 64, K=2)
self.conv2 = ChebConv(64, out_channels, K=2)
def forward(self, data, use_edge_weight = False):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
if use_edge_weight:
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, edge_weight)
else:
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
# -
class GATNet(nn.Module):
def __init__(self, in_channels, out_channels):
super(GATNet, self).__init__()
self.conv1 = GATConv(in_channels, 8 , heads=8, dropout=0.6)
self.conv2 = GATConv( 8 * 8, out_channels, heads=1, concat=True, dropout=0.6)
def forward(self, data):
x, edge_index, edge_weights = data.x, data.edge_index, data.edge_attr
x = F.dropout(data.x, p=0.6, training=self.training)
x = F.elu(self.conv1(x, data.edge_index))
x = F.dropout(x, p=0.6, training=self.training)
x = self.conv2(x, data.edge_index)
return F.log_softmax(x, dim=1)
class AGNNet(nn.Module):
def __init__(self, in_channels, out_channels):
super(AGNNet, self).__init__()
self.lin1 = torch.nn.Linear(in_channels, 64)
self.prop1 = AGNNConv(requires_grad=False)
self.prop2 = AGNNConv(requires_grad=True)
self.lin2 = torch.nn.Linear(64, out_channels)
def forward(self, data):
x = F.dropout(data.x, training=self.training)
x = F.relu(self.lin1(x))
x = self.prop1(x, data.edge_index)
x = self.prop2(x, data.edge_index)
x = F.dropout(x, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=1)
class GAEncoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(GAEncoder, self).__init__()
self.conv1 = GCNConv(in_channels, 2 * out_channels, cached=True)
self.conv2 = GCNConv(2 * out_channels, out_channels, cached=True)
def forward(self, x, edge_index):
# data = self.split_edges(data)
# x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.conv1(x, edge_index))#, edge_weight))
return self.conv2(x, edge_index)#, edge_weight)
class baseline_GAEncoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(GAEncoder, self).__init__()
self.fc1 = nn.Linear(in_channels, 2 * in_channels, cached=True)
self.fc2 = nn.Linear(2 * out_channels, in_channels, cached=True)
def forward(self, x, edge_index):
# data = self.split_edges(data)
# x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.fc1(x))#, edge_weight))
return self.fc2(x)#, edge_weight)
class VGAEncoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(VGAEncoder, self).__init__()
self.conv1 = GCNConv(in_channels, 2 * out_channels, cached=True)
self.conv_mu = GCNConv(2 * out_channels, out_channels, cached=True)
self.conv_logvar = GCNConv(2 * out_channels, out_channels, cached=True)
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index))
mu, var = self.conv_mu(x, edge_index), self.conv_logvar(x, edge_index)
return mu, var
class baseline_VGAEncoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(baseline_VGAEncoder, self).__init__()
self.lin = nn.Linear(in_channels, 2 * in_channels)
self.lin_mu = nn.Linear(2 * in_channels, out_channels)
self.lin_logvar = nn.Linear(2 * in_channels, out_channels)
# self.conv1 = GCNConv(in_channels, 2 * out_channels, cached=True)
# self.conv_mu = GCNConv(2 * out_channels, out_channels, cached=True)
# self.conv_logvar = GCNConv(2 * out_channels, out_channels, cached=True)
def forward(self, x):
x = F.relu(self.lin(x))
mu, var = self.lin_mu(x), self.lin_logvar(x, edge_index)
return self.lin2(x)
class linear_baseline(nn.Module):
def __init__(self, in_channel, out_channel):
super(linear_baseline, self).__init__()
self.linear1 = nn.Linear(in_channel, 64)
self.linear2 = nn.Linear(64, out_channel)
def forward(self, data):
x = data.x
x = F.relu(self.linear1(x))
x = self.linear2(x)
return F.log_softmax(x)
params = {'epochs': 10, #Model parameters
'lr' : 0.001,
'weight_decay': 0.0005}
# Relative number of observations in train data
print(Counter(trade_data.y[trade_data.train_idx].numpy()))
# Relative number of observations in test data
print(Counter(trade_data.y[trade_data.test_idx].numpy()))
# Train with baseline logistic regression for node classification
X, y = trade_data.x.clone().numpy(), trade_data.y.clone().numpy()
clf = LogisticRegression(random_state=0, solver='lbfgs',
multi_class='multinomial').fit(X[trade_data.train_idx.numpy()], y[trade_data.train_idx.numpy()])
#clf.predict(X[:2, :])
print('Classification score: ', clf.score(X[trade_data.test_idx.numpy()], y[trade_data.test_idx.numpy()]))
print('Predicted targets for test x: ', clf.predict(X[trade_data.test_idx.numpy()]))
# +
# Train node classifier with Graph Convolutional network
classifier_train_test(GCNet, trade_data, **params)
# +
# Classification with ChebyNet
classifier_train_test(ChebyNet, trade_data, **params)
# +
# Classification with Graph Attention Network
classifier_train_test(GATNet, trade_data, **params)
# +
# Classification with Attention-based Graph Neural Network
classifier_train_test(AGNNet, trade_data, **params)
# -
# Run Graph Autoencoder
run_GAE(trade_data, ** params)
# Run Variational Graph Autoencoder
run_VGAE(trade_data, ** params)
# +
#run_baseline_GAE(trade_data, **params)
# +
#run_baseline_VGAE(trade_data, **params)
# -
classifier_train_test(linear_baseline, trade_data, **params)
from ray import tune
def search_params():
params = {'lr': (0.5, 0.3, 0.1), 'weight_decay' : (0.2,0.5)}
clf = GridSearchCV(classifier_train_test, params, cv=5)
clf.fit(trade_data)
return best_params
def tune_params(config):
lr = config['lr']
weight_decay = config['weight_decay']
params = {'lr': lr, 'weight_decay': weight_decay, 'epochs': 10}
classifier_train_test(GATNet, trade_data, **params)
analysis = tune.run(tune_params,
config={"lr": tune.grid_search([0.001, 0.01]),
"weight_decay": tune.grid_search([0.005, 0.0005])
})
print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))
# !tensorboard --logdir ~/ray_results
| notebooks/training_nb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Econophysics I
# ## Exercise 03 - H07
#
# ### <NAME>
# ### Universität Duisburg-Essen
# 05.05.2020
# +
# Modules
import numpy as np
from matplotlib import pyplot as plt
from scipy import signal
# -
# ## Exercise 03. Homework 07. Point 01
#
# Sketch the distributions
# $$q_{1}\left(\varepsilon\right)=b_{1}\Theta\left(a_{1}-\left|\varepsilon\right|\right)$$
# and
# $$q_{2}\left(\varepsilon\right)=b_{2}\left(\delta\left(\varepsilon+a_{2}\right)+\delta\left(\varepsilon-a_{2}\right)\right)$$
# +
# Constant values
a1 = 3. ** 0.5
b1 = 1. / (2. * a1)
a2 = 1.
b2 = 0.5
# -
# We do not use here random number for the epsilon value to show the complete shape
# of all the distribution.
epsilon = np.arange(-5, 5, 0.01)
q1 = b1 * np.heaviside(a1 - np.abs(epsilon), 1)
# Figure
fig = plt.figure(figsize=(16,9))
plt.plot(epsilon, q1, linewidth=10,
label=r'$q_{1}\left( \epsilon \right)=b_{1}\Theta \left( a_{1}- \left|\epsilon\right|\right)$')
plt.legend(fontsize=30)
plt.ylim(-0.05, 0.4)
plt.xlim(-5, 5)
plt.xticks([-a1, 0, a1], ['$-a_{1}$', '0', '$a_{1}$'], fontsize=30)
plt.yticks([0, b1], ['0', '$b_{1}$'], fontsize=30)
plt.xlabel(r'$\epsilon$', fontsize=40)
plt.ylabel(r'$q_{1} \left( \epsilon \right)$', fontsize=40)
plt.grid(True)
plt.tight_layout()
# We do not use here random number for the epsilon value to show the complete shape
# of all the distribution.
epsilon = np.arange(-5, 5, 0.01)
q2 = b2 * (signal.unit_impulse(100, 30) + signal.unit_impulse(100, -30))
fig = plt.figure(figsize=(16,9))
plt.plot(q2, linewidth=10,
label=r'$q_{2}\left( \epsilon \right) = b_{2} \left( \delta \left( \epsilon + a_{2}\right)+\delta \left( \epsilon-a_{2} \right) \right)$')
plt.legend(fontsize=30)
plt.ylim(-0.05, 0.6)
plt.xlim(0, 100)
plt.xticks([30, 50, 70], ['$-a_{2}$', '0', '$a_{2}$'], fontsize=30)
plt.yticks([0, b2], ['0', '$b_{2}$'], fontsize=30)
plt.xlabel(r'$\epsilon$', fontsize=40)
plt.ylabel(r'$q_{2} \left( \epsilon \right)$', fontsize=40)
plt.grid(True)
plt.tight_layout()
| week_3/Exercise03_H07_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
import pandas as pd
from django.utils.text import slugify
file="data/IAD_Vocabulary_Import.xlsx"
df = pd.read_excel(file)
cols = df.shape[1]
concepts = []
for i, row in df.iterrows():
temp_scheme, _ = SkosConceptScheme.objects.get_or_create(
dc_title=slugify(row[0])
)
related_concepts = []
for x in range(1,cols):
if isinstance(row[x], str):
concept, _ = SkosConcept.objects.get_or_create(
pref_label=row[x]
)
concept.scheme.set([temp_scheme])
related_concepts.append(concept)
concepts.append(related_concepts)
for row in concepts:
x = 0
broader = None
narrower = None
while x != len(row):
broader = row[x]
try:
narrower = row[x+1]
except:
narrower = False
if narrower:
narrower.broader_concept = broader
narrower.save()
x +=1
| csv2vocabs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Dataset Normalization.
from importlib.util import find_spec
if find_spec("vgg") is None:
import sys
sys.path.append('..')
import numpy as np
from torchvision import datasets, transforms
train_transform = transforms.Compose([transforms.ToTensor()])
# ### CIFAR10
train_set = datasets.CIFAR10('../data', train=True, download=True, transform=train_transform)
print(f"shape of train data: {train_set.data.shape}")
print(f"mean of train data: {train_set.data.mean(axis=(0, 1, 2)) / 255}")
print(f"std of train data: {train_set.data.std(axis=(0, 1, 2)) / 255}")
# ### CIFAR100
train_set = datasets.CIFAR100('../data', train=True, download=True, transform=train_transform)
print(f"shape of train data: {train_set.data.shape}")
print(f"mean of train data: {train_set.data.mean(axis=(0, 1, 2)) / 255}")
print(f"std of train data: {train_set.data.std(axis=(0, 1, 2)) / 255}")
| notebooks/08_data_normalization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Benchmark: Moresi and Solomatov 1995
# =======
#
# Stagnant lid convection model
# ------
#
# **This example covers the concepts of:**
# 1. Changing rheologies, specifically temperature dependent viscosity function.
# 2. Stokes solver options.
# 3. Saving and loading FE variables.
#
# **Keywords:** particle swarms, Stokes system, advective diffusive systems
#
# **References**
#
# Moresi, <NAME>., and <NAME> (1995), Numerical investigation of 2d convection with extremely large viscosity variations, Phys. Fluids, 7, 2154–2162.
#
# <!--
# 1. <NAME> and <NAME>, Mantle convection with a brittle lithosphere: thoughts on the global tectonic styles of the Earth and Venus. Geophys. J. Int. (1998) 133 (3): 669-682.
# http://gji.oxfordjournals.org/content/133/3/669.short
# -->
#
# %matplotlib inline
import matplotlib.pyplot as pyplot
import matplotlib.pylab as pylab
import numpy as np
import underworld as uw
import math
from underworld import function as fn
import glucifer
import time
# Setup parameters
# -----
#
dim = 2
boxLength = 1.0
boxHeight = 1.0
tempMin = 0.0
tempMax = 1.0
# Set the resolution.
res = 32
# Set the Rayleigh number.
Ra=1.e6
# **Input/output paths**
#
# Set input and output file directory paths. For this example the input directory contains near steady state snapshots of the velocity, pressure and temperature fields. It also constains a summary of the Nusselt and $v_{RMS}$ values against time from the simulation used to make these snapshots.
inputPath = 'MandS_Input/'
outputPath = 'MandS_Output/'
# Make output directory if necessary
import os
if not os.path.exists(outputPath):
os.makedirs(outputPath)
# Create mesh and finite element variables
# ------
#
# In this case the mesh type used is different. For more information on the different mesh types see the user guide.
# +
mesh = uw.mesh.FeMesh_Cartesian( elementType = ("Q2/dPc1"),
elementRes = (res, res),
minCoord = (0., 0.),
maxCoord = (boxLength, boxHeight) )
velocityField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=dim )
pressureField = uw.mesh.MeshVariable( mesh=mesh.subMesh, nodeDofCount=1 )
temperatureField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )
tempDotField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )
# -
# Set initial conditions and boundary conditions
# ----------
#
# **Initial and boundary conditions**
#
# Either set by perturbation function or load data from file.
LoadFromFile = False
# **If loading from file**
#
# Read 32\*32 resolution data for $P$, $v$ and $T$ fields as well as existing summary statistics data. These are converted into lists so that the main time loop below will append with new values.
#
if(LoadFromFile == True):
# set up mesh for 32*32 data file
mesh32 = uw.mesh.FeMesh_Cartesian( elementType = ("Q2/dPc1"),
elementRes = (32, 32),
minCoord = (0., 0.),
maxCoord = (boxLength, boxHeight) )
temperatureField32 = uw.mesh.MeshVariable( mesh=mesh32, nodeDofCount=1 )
pressureField32 = uw.mesh.MeshVariable( mesh=mesh32.subMesh, nodeDofCount=1 )
velocityField32 = uw.mesh.MeshVariable( mesh=mesh32, nodeDofCount=dim )
temperatureField32.load(inputPath+'Arrhenius_32_T.inp')
velocityField32.load(inputPath+'Arrhenius_32_v.inp')
pressureField32.load(inputPath+'Arrhenius_32_P.inp')
temperatureField.data[:] = temperatureField32.evaluate(mesh)
pressureField.data[:] = pressureField32.evaluate(mesh.subMesh)
velocityField.data[:] = velocityField32.evaluate(mesh)
# load summary statistics into arrays
data = np.loadtxt(inputPath+'ArrSumary.txt', unpack=True )
timeVal, vrmsVal, nuVal = data[0].tolist(), data[1].tolist(), data[2].tolist()
# **If _not_ loading from file: Initialise data**
#
# Start with a perturbed temperature gradient to speed up the convergence to the benchmark steady state solution.
if(LoadFromFile == False):
velocityField.data[:] = [0.,0.]
pressureField.data[:] = 0.
temperatureField.data[:] = 0.
pertStrength = 0.1
deltaTemp = tempMax - tempMin
for index, coord in enumerate(mesh.data):
pertCoeff = math.cos( math.pi * coord[0]/boxLength ) * math.sin( math.pi * coord[1]/boxLength )
temperatureField.data[index] = tempMin + deltaTemp*(boxHeight - coord[1]) + pertStrength * pertCoeff
temperatureField.data[index] = max(tempMin, min(tempMax, temperatureField.data[index]))
# initialise summary statistics arrays
timeVal = []
vrmsVal = []
nuVal = []
# **Boundary conditions**
#
# This step is to ensure that the temperature boundary conditions are satisfied, as the initial conditions above may have been set to different values on the boundaries.
for index in mesh.specialSets["MinJ_VertexSet"]:
temperatureField.data[index] = tempMax
for index in mesh.specialSets["MaxJ_VertexSet"]:
temperatureField.data[index] = tempMin
# **Conditions on the boundaries**
#
# Construct sets for the both horizontal and vertical walls. Combine the sets of vertices to make the I (left and right side walls) and J (top and bottom walls) sets. Note that both sets contain the corners of the box.
# +
iWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"]
jWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"]
allWalls = iWalls + jWalls
freeslipBC = uw.conditions.DirichletCondition( variable = velocityField,
indexSetsPerDof = (allWalls,jWalls) )
tempBC = uw.conditions.DirichletCondition( variable = temperatureField,
indexSetsPerDof = (jWalls,) )
# -
# **Plot initial conditions**
#
# Automatically scale the size of the vector arrows for the velocity field maximum.
velmax = np.amax(velocityField.data[:])
if(velmax==0.0): velmax = 1.0
figtemp = glucifer.Figure()
tempminmax = fn.view.min_max(temperatureField)
figtemp.append( glucifer.objects.Surface(mesh, tempminmax) )
figtemp.append( glucifer.objects.VectorArrows(mesh, velocityField, scaling=0.1/velmax, arrowHead=0.2 ) )
figtemp.show()
# Set up material parameters and functions
# -----
# +
# Rheology
delta_Eta = 1.0e6
activationEnergy = np.log(delta_Eta)
Ra = 1.e6
fn_viscosity = delta_Eta * fn.math.exp( - activationEnergy * temperatureField )
densityFn = Ra*temperatureField
# Define our vertical unit vector using a python tuple (this will be automatically converted to a function)
z_hat = ( 0.0, 1.0 )
# Now create a buoyancy force fn using the density.
buoyancyFn = z_hat*densityFn
# -
# **Plot the viscosity**
#
figEta = glucifer.Figure()
figEta.append( glucifer.objects.Surface(mesh, fn_viscosity) )
figEta.show()
# System setup
# -----
#
# **Setup a Stokes system: advanced solver settings**
#
# Set up parameters for the Stokes system solver. For PIC style integration, we include a swarm for the a PIC integration swarm is generated within. For gauss integration, simple do not include the swarm. Nearest neighbour is used where required.
# +
stokes = uw.systems.Stokes(velocityField=velocityField,
pressureField=pressureField,
conditions=[freeslipBC,],
fn_viscosity=fn.exception.SafeMaths(fn_viscosity),
fn_bodyforce=buoyancyFn )
solver=uw.systems.Solver(stokes)
solver.set_inner_method("mumps")
solver.set_penalty(1.0e7)
# -
# **Create an advective diffusive system**
#
advDiff = uw.systems.AdvectionDiffusion( temperatureField, tempDotField, velocityField, fn_diffusivity=1., conditions=[tempBC,], _allow_non_q1=True )
# Analysis tools
# -----
#
# **RMS velocity**
#
# Set up integrals used to calculate the RMS velocity.
v2sum_integral = uw.utils.Integral( mesh=mesh, fn=fn.math.dot(velocityField, velocityField) )
volume_integral = uw.utils.Integral( mesh=mesh, fn=1. )
# **Nusselt number**
#
def FindNusseltNumber(temperatureField, linearMesh, xmax, zmax):
tempgradField = temperatureField.fn_gradient
vertGradField = tempgradField[1]
BottomInt = 0.0
GradValues = temperatureField.fn_gradient[1].evaluate(mesh.specialSets["MaxJ_VertexSet"])
TopInt = sum(GradValues)
for index in mesh.specialSets["MinJ_VertexSet"]:
BottomInt += temperatureField.data[index]
Nu = -zmax*TopInt/BottomInt
return Nu[0]
# Main simulation loop
# -----
#
# The main time stepping loop begins here. Before this the time and timestep are initialised to zero and the output statistics arrays are set up. Since this may be a continuation of the saved data with associated summary statistics then check if there are existing time values first. If there is existing data then add new simulation statistics after the existing data.
# +
steps_prev = len(timeVal)
steps = 0
steps_end = 10000
step_out = 25
# Set time to zero, unless we are loading from file.
try:
time_start = timeVal[-1]
except:
time_start = 0.0
print 'Begining at t = ',time_start,' after having completed ',steps_prev,' steps'
simtime = time_start
# +
# Setup clock to calculate simulation CPU time.
start = time.clock()
# Perform steps_end steps.
rms_v, nu_no = 0.0, 0.0
while steps<steps_end:
# Get solution for initial configuration.
solver.solve()
# Retrieve the maximum possible timestep for the AD system.
dt = advDiff.get_max_dt()
if steps == 0:
dt = 0.
# Advect using this timestep size.
advDiff.integrate(dt)
# Calculate the RMS velocity.
rms_v = math.sqrt(v2sum_integral.evaluate()[0]/volume_integral.evaluate()[0])
nu_no = FindNusseltNumber(temperatureField, mesh, boxLength, boxHeight)
# Increment time and store results.
simtime += dt
steps += 1
vrmsVal.append( rms_v )
timeVal.append( simtime )
nuVal.append(nu_no)
if steps ==0 or steps % step_out == 0:
print "steps = {:04d}; time = {:.6e}; vrms = {:6.2f}; nusselt = {:6.4f}; CPU = {:4.1f}s".format(
steps, simtime, rms_v, nu_no, time.clock()-start)
v2sum = v2sum_integral.evaluate()
rms_v = rms_v = math.sqrt(v2sum_integral.evaluate()[0]/volume_integral.evaluate()[0])
# nu_no = -surface_Tgradient.integrate()[0] / basalT.integrate()[0]
nu_no = FindNusseltNumber(temperatureField, mesh, boxLength, boxHeight)
print "steps = {:04d}; time = {:.6e}; vrms = {:6.2f}; nusselt = {:6.4f}; CPU = {:4.1f}s".format(
steps, simtime, rms_v, nu_no, time.clock()-start)
# -
# Post simulation analysis
# -----
#
# **Check CPU timing**
#
# Output timing for calculation per simulation time step
# +
if(steps!=steps_prev):
avtime = (time.clock() - start)/float(steps)
else:
avtime = 0.0
print "Average time per timestep = ",avtime," seconds over ", steps, " steps"
# -
# Pre-run model took about $1.74$ seconds per timestep and was run for $13000$ timesteps for $Ra = 10^6$ for a $64\times64$ grid.
# **Save data to files**
#
# Save system summary data; $v_{rms}$ and the Nusselt number against time.
#
np.savetxt( outputPath+'MandS_Summary.out', np.c_[timeVal, vrmsVal, nuVal], header="Time, VRMS, Nusselt" )
# Save final temperature, velocity and pressure fields.
temperatureField.save(outputPath+'MandS_T.out')
velocityField.save(outputPath+'MandS_v.out')
pressureField.save(outputPath+'MandS_p.out')
# **Plot temperature field with velocity vectors overlaid**
velmax = np.amax(velocityField.data[:])
figVT = glucifer.Figure()
figVT.append( glucifer.objects.Surface(mesh, temperatureField) )
figVT.append( glucifer.objects.VectorArrows(mesh, velocityField, scaling=0.1/velmax, arrowHead=0.2) )
figVT.show()
# **Plot system summary information**
#
# If the initial conditions were loaded up from a data file then these arrays will contain the saved data as well as results from the simulation just run.
# +
pylab.rcParams[ 'figure.figsize'] = 14, 8
pyplot.figure(1)
pyplot.subplot(211)
pyplot.plot(timeVal, nuVal)
pyplot.ylabel('Nusselt Number')
pyplot.subplot(212)
pyplot.plot(timeVal,vrmsVal)
pyplot.xlabel('Time')
pyplot.ylabel('Vrms')
pyplot.show()
# -
| notebooks/publications/MoresiSolomatov-1995/MoresiAndSolomatov1995.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# This notebook separates images into two folders based on having or not having an anatomical landmark on them.
# -
import csv
import cv2
import numpy
import os
import pandas
import shutil
# +
# Rood Folder is the only input for this notebook.
# Set RootFolder as full path of the folder that includes annotations and images subfolders.
RootFolder = "m:\Temp"
images_folder = os.path.join(RootFolder, "images")
annotations_folder = os.path.join(RootFolder, "annotations")
image_file_list = os.listdir(images_folder)
annotation_file_list = os.listdir(annotations_folder)
num_images = len(image_file_list)
print("Found {} images".format(num_images))
print("Found {} annotation files".format(len(annotation_file_list)))
# +
# Read annotation data from csv files and concatenate them in a single DataFrame.
df = pandas.DataFrame()
for i in range(len(annotation_file_list)):
current_file_path = os.path.join(annotations_folder, annotation_file_list[i])
df = pandas.concat([df, pandas.read_csv(current_file_path)])
print("Number of positive images found: {}".format(df.shape[0]))
# +
# Create folders for classes if they don't exist already.
positive_folder = os.path.join(images_folder, 'positive')
negative_folder = os.path.join(images_folder, 'negative')
if not os.path.exists(positive_folder):
os.makedirs(positive_folder)
print("Created folder: " + positive_folder)
if not os.path.exists(negative_folder):
os.makedirs(negative_folder)
print("Created folder: " + negative_folder)
# +
# Copy images into 'positive' and 'negative' folders based on being annotated or not.
for i in range(num_images):
current_file_name = image_file_list[i]
current_file_fullname = os.path.join(images_folder, current_file_name)
new_file_fullname = os.path.join(positive_folder, current_file_name)
if df.loc[df.filename == current_file_name].shape[0] == 0: # negative image class
new_file_fullname = os.path.join(negative_folder, current_file_name)
shutil.copy2(current_file_fullname, new_file_fullname)
# -
| Notebooks/ClassesToFolders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional Neural Networks
#
# ## Project: Write an Algorithm for a Dog Identification App
#
# ---
#
# In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
#
# > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
#
# In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
#
# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.
#
# The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.
#
#
#
# ---
# ### Why We're Here
#
# In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!).
#
# 
#
# In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience!
#
# ### The Road Ahead
#
# We break the notebook into separate steps. Feel free to use the links below to navigate the notebook.
#
# * [Step 0](#step0): Import Datasets
# * [Step 1](#step1): Detect Humans
# * [Step 2](#step2): Detect Dogs
# * [Step 3](#step3): Create a CNN to Classify Dog Breeds (from Scratch)
# * [Step 4](#step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)
# * [Step 5](#step5): Write your Algorithm
# * [Step 6](#step6): Test Your Algorithm
#
# ---
# <a id='step0'></a>
# ## Step 0: Import Datasets
#
# Make sure that you've downloaded the required human and dog datasets:
# * Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`.
#
# * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`.
#
# *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*
#
# In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`.
# +
import numpy as np
from glob import glob
# load filenames for human and dog images
human_files = np.array(glob("/home/en/Git/deep-learning-datasets/lfw/*/*"))
dog_files = np.array(glob("/home/en/Git/deep-learning-datasets/dogImages/*/*/*"))
# print number of images in each dataset
print('There are %d total human images.' % len(human_files))
print('There are %d total dog images.' % len(dog_files))
# -
# <a id='step1'></a>
# ## Step 1: Detect Humans
#
# In this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images.
#
# OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image.
# +
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
# extract pre-trained face detector
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
# load color (BGR) image
img = cv2.imread(human_files[0])
# convert BGR image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces in image
faces = face_cascade.detectMultiScale(gray)
# print number of faces detected in the image
print('Number of faces detected:', len(faces))
# get bounding box for each detected face
for (x,y,w,h) in faces:
# add bounding box to color image
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# convert BGR image to RGB for plotting
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# display the image, along with bounding box
plt.imshow(cv_rgb)
plt.show()
# -
# Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter.
#
# In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box.
#
# ### Write a Human Face Detector
#
# We can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below.
# returns "True" if face is detected in image stored at img_path
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
# ### (IMPLEMENTATION) Assess the Human Face Detector
#
# __Question 1:__ Use the code cell below to test the performance of the `face_detector` function.
# - What percentage of the first 100 images in `human_files` have a detected human face?
# - What percentage of the first 100 images in `dog_files` have a detected human face?
#
# Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`.
# __Answer:__
# (You can print out your results and/or write your percentages in this cell)
human_files_short = human_files[:100]
dog_files_short = dog_files[:100]
# +
#-#-# Do NOT modify the code above this line. #-#-#
## TODO: Test the performance of the face_detector algorithm
## on the images in human_files_short and dog_files_short.
faces_detected = 0
dog_faces_detected = 0
for f in human_files_short:
faces_detected += face_detector(f)
for f in dog_files_short:
dog_faces_detected += face_detector(f)
# -
print("Human faces detected: {:.2f}, {:d}/{:d}".format(faces_detected/len(human_files_short), faces_detected, len(human_files_short)))
print("Dog faces detected: {:.2f}, {:d}/{:d}".format(dog_faces_detected/len(dog_files_short), dog_faces_detected, len(dog_files_short)))
# We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`.
# ---
# <a id='step2'></a>
# ## Step 2: Detect Dogs
#
# In this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images.
#
# ### Obtain Pre-trained VGG-16 Model
#
# The code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a).
# +
import torch
import torchvision.models as models
# define VGG16 model
VGG16 = models.vgg16(pretrained=True)
# check if CUDA is available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
VGG16.to(device)
# -
# Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image.
# ### (IMPLEMENTATION) Making Predictions with a Pre-trained Model
#
# In the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.
#
# Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html).
from PIL import Image
import torchvision.transforms as transforms
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def VGG16_predict(img_path):
'''
Use pre-trained VGG-16 model to obtain index corresponding to
predicted ImageNet class for image at specified path
Args:
img_path: path to an image
Returns:
Index corresponding to VGG-16 model's prediction
'''
# set model to eval:
VGG16.eval()
# load image
I = Image.open(img_path)
# resize to 224 x 224
I = I.resize((224, 224), Image.ANTIALIAS)
# convert to np array:
im = np.array(I)/255.0
# transpose fo 3 x 224 x 224
im = np.transpose(im,(2,0,1))
# add dimension to simulate batch for vgg model:
im = np.expand_dims(im,0)
# convert im data to tensor:
t = torch.tensor(im)
t = t.to(device)
# conver to float
t = t.float()
# run model with softmax output to normalize output weights
ps = torch.softmax(VGG16.forward(t),1)
# get index of max weight:
_, i = ps.max(1)
return i # predicted class index
# ### (IMPLEMENTATION) Write a Dog Detector
#
# While looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).
#
# Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not).
### returns "True" if a dog is detected in the image stored at img_path
def dog_detector(img_path):
## TODO: Complete the function.
if 151<= VGG16_predict(img_path) <= 268:
return True # true/false
else:
return False
# ### (IMPLEMENTATION) Assess the Dog Detector
#
# __Question 2:__ Use the code cell below to test the performance of your `dog_detector` function.
# - What percentage of the images in `human_files_short` have a detected dog?
# - What percentage of the images in `dog_files_short` have a detected dog?
# __Answer:__
#
# +
faces_detected_vgg16 = 0
dog_faces_detected_vgg16 = 0
for f in human_files_short:
faces_detected_vgg16 += dog_detector(f)
for f in dog_files_short:
dog_faces_detected_vgg16 += dog_detector(f)
# -
print("Human faces detected sing VGG16: {:.2f}, {:d}/{:d}".format(faces_detected_vgg16/len(human_files_short), faces_detected_vgg16, len(human_files_short)))
print("Dog faces detected using VGG16: {:.2f}, {:d}/{:d}".format(dog_faces_detected_vgg16/len(dog_files_short), dog_faces_detected_vgg16, len(dog_files_short)))
# We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.html#inception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.html#id3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`.
# ---
# <a id='step3'></a>
# ## Step 3: Create a CNN to Classify Dog Breeds (from Scratch)
#
# Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.
#
# We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel.
#
# Brittany | Welsh Springer Spaniel
# - | -
# <img src="images/Brittany_02625.jpg" width="100"> | <img src="images/Welsh_springer_spaniel_08203.jpg" width="200">
#
# It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels).
#
# Curly-Coated Retriever | American Water Spaniel
# - | -
# <img src="images/Curly-coated_retriever_03896.jpg" width="200"> | <img src="images/American_water_spaniel_00648.jpg" width="200">
#
#
# Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed.
#
# Yellow Labrador | Chocolate Labrador | Black Labrador
# - | -
# <img src="images/Labrador_retriever_06457.jpg" width="150"> | <img src="images/Labrador_retriever_06455.jpg" width="240"> | <img src="images/Labrador_retriever_06449.jpg" width="220">
#
# We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%.
#
# Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun!
#
# ### (IMPLEMENTATION) Specify Data Loaders for the Dog Dataset
#
# Use the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)!
# +
import os
import numpy as np
import torch
from torchvision import datasets, transforms
### TODO: Write data loaders for training, validation, and test sets
## Specify appropriate transforms, and batch_sizes
data_dir = '/home/en/Git/deep-learning-datasets/dogImages'
batch_size = 32
num_workers = 6
image_size = 224
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
valid_data = datasets.ImageFolder(data_dir + '/valid', transform=test_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
loaders_scratch = {'train':trainloader, 'valid':validloader,'test':testloader}
numClasses = len(train_data.classes)
# -
# **Question 3:** Describe your chosen procedure for preprocessing the data.
# - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?
# - Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not?
#
# **Answer**:
# 1. The images are resized to 224 x 224 with a random similar to the required input for the VGG network.
# 2. The input tensor is of size [32, 3, 224, 224] which gives a batch of 32 images with 3 color channels, resized to a square of 224x224, to mirror the architectures of VGG and others
# 3. The dataset was augemented to increase the dataset size and allow the network to better handle variations in input images. The augemntations used include a random rotation, random crop, and random horizontal flip. The input data was also normalized across the color channels.
#
# ### (IMPLEMENTATION) Model Architecture
#
# Create a CNN to classify dog breed. Use the template in the code cell below.
# +
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net(nn.Module):
### TODO: choose an architecture, and complete the class
def __init__(self):
super(Net, self).__init__()
## Define layers of a CNN
# convolutional layer (sees 32x32x3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
# convolutional layer (sees 16x16x16 tensor)
self.conv2 = nn.Conv2d(self.conv1.out_channels, 32, 3, padding=1)
# convolutional layer (sees 8x8x32 tensor)
self.conv3 = nn.Conv2d(self.conv2.out_channels, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# linear layer (64 * 4 * 4 -> 500)
self.fc1 = nn.Linear(64 * 28 * 28, 1000)
# linear layer (500 -> 10)
self.fc2 = nn.Linear(1000, numClasses)
# dropout layer (p=0.25)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
## Define forward behavior
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image input
x = x.view(-1, 64 * 28 * 28)
# add dropout layer
x = self.dropout(x)
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add 2nd hidden layer, with relu activation function
x = self.fc2(x)
return x
# +
#-#-# You so NOT have to modify the code below this line. #-#-#
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# instantiate the CNN
model_scratch = Net()
# move tensors to GPU if CUDA is available
model_scratch.to(device)
print(model_scratch)
# -
# __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step.
# **__Answer:__**
#
# The architecture is based on the one provided in the course material, with three banks of 2D conv filters, each with increasing number of filters joined with a maxpool 2x2 operation. This results in three banks of filters that are meant to train kernels for different sized features, with the first bank on smallest features, and the last bank on largers features.
#
# Following the conv layers, the net is flattened and pass through two reduction layers, one reduces all the outputs from the last conv layer to 1000 features, and the final layer further reduces the 1000 to the number of classes.
#
# Two drop out layers were added between the conv and linear and between the linear layers to improve the net's robusness.
#
# ### (IMPLEMENTATION) Specify Loss Function and Optimizer
#
# Use the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below.
# +
import torch.optim as optim
### TODO: select loss function
criterion_scratch = nn.CrossEntropyLoss()
### TODO: select optimizer
optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.05)
# -
# ### (IMPLEMENTATION) Train and Validate the Model
#
# Train and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`.
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(n_epochs, loaders, model, optimizer, criterion, device, save_path):
"""returns trained model"""
# initialize tracker for minimum validation loss
valid_loss_min = np.Inf
for epoch in range(1, n_epochs+1):
# initialize variables to monitor training and validation loss
train_loss = 0.0
valid_loss = 0.0
correct = 0.
total = 0.
###################
# train the model #
###################
model.train()
for batch_idx, (data, target) in enumerate(loaders['train']):
# move to GPU
data, target = data.to(device), target.to(device)
## find the loss and update the model parameters accordingly
## record the average training loss, using something like
## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
######################
# validate the model #
######################
model.eval()
for batch_idx, (data, target) in enumerate(loaders['valid']):
# move to GPU
data, target = data.to(device), target.to(device)
## update the average validation loss
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update average validation loss
valid_loss += loss.item()*data.size(0)
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
# calculate average losses
train_loss = train_loss/len(loaders['train'].dataset)
valid_loss = valid_loss/len(loaders['valid'].dataset)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss
))
print('\Validation Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
## TODO: save the model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), save_path)
valid_loss_min = valid_loss
# return trained model
return model
# +
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# train the model
model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch,
criterion_scratch, device, 'model_scratch.pt')
# load the model that got the best validation accuracy
model_scratch.load_state_dict(torch.load('model_scratch.pt'))
# -
# ### (IMPLEMENTATION) Test the Model
#
# Try out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%.
def test(loaders, model, criterion, device):
# monitor test loss and accuracy
test_loss = 0.
correct = 0.
total = 0.
model.eval()
for batch_idx, (data, target) in enumerate(loaders['test']):
# move to GPU
data, target = data.to(device), target.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update average test loss
test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load the model that got the best validation accuracy
model_scratch.load_state_dict(torch.load('model_scratch.pt'))
# call test function
test(loaders_scratch, model_scratch, criterion_scratch, device)
# ---
# <a id='step4'></a>
# ## Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)
#
# You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set.
#
# ### (IMPLEMENTATION) Specify Data Loaders for the Dog Dataset
#
# Use the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively).
#
# If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch.
# +
## TODO: Specify data loaders
data_dir = '/home/en/Git/deep-learning-datasets/dogImages'
batch_size = 32
num_workers = 6
image_size = 224
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
valid_data = datasets.ImageFolder(data_dir + '/valid', transform=test_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
data_transfer = {'train':train_data,'valid':valid_data,'test':test_data}
trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
loaders_transfer = {'train':trainloader, 'valid':validloader,'test':testloader}
numClasses = len(train_data.classes)
# -
# ### (IMPLEMENTATION) Model Architecture
#
# Use transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`.
# +
import torch
import torchvision.models as models
import torch.nn as nn
## TODO: Specify model architecture
# define VGG16 model
model_transfer = models.vgg16(pretrained=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# cuda
model_transfer.to(device)
# Freeze training for all layers
for param in model_transfer.features.parameters():
param.require_grad = False
# -
features = list(model_transfer.classifier.children())[:-1]
features
# Newly created modules have require_grad=True by default
num_features = model_transfer.classifier[6].in_features
features = list(model_transfer.classifier.children())[:-1] # Remove last layer
features.extend([nn.Linear(num_features, numClasses)]) # Add our layer with 4 outputs
model_transfer.classifier = nn.Sequential(*features) # Replace the model classifier
print(model_transfer)
# __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem.
# __Answer:__
#
# The VGG16 network was chosen for the transfer learning task due to its inherent dog-classification abilities (as per part 1).
#
# The model was loaded and all of its parameters were held constant. The last layer was replaced with a new layer for dog breed classfication. Since the VGG16 network already encodes many image features, it was a logical first step to simply modify the last output classification layer, re-train it for dog-breeds, and see if the performance meets the requirements. In this case, the technique was applicable and produced results above the test criterium.
#
# ### (IMPLEMENTATION) Specify Loss Function and Optimizer
#
# Use the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.html#loss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below.
# +
import torch.optim as optim
criterion_transfer = nn.CrossEntropyLoss()
optimizer_transfer = optim.SGD(model_transfer.parameters(), lr=0.05)
# -
# ### (IMPLEMENTATION) Train and Validate the Model
#
# Train and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_transfer.to(device)
# train the model
model_transfer = train(30, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, device, 'model_transfer.pt')
# ### (IMPLEMENTATION) Test the Model
#
# Try out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%.
# load the model that got the best validation accuracy (uncomment the line below)
model_transfer.load_state_dict(torch.load('model_transfer.pt'))
test(loaders_transfer, model_transfer, criterion_transfer, device)
# ### (IMPLEMENTATION) Predict Dog Breed with the Model
#
# Write a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model.
# +
### TODO: Write a function that takes a path to an image as input
### and returns the dog breed that is predicted by the model.
# list of class names by index, i.e. a name can be accessed like class_names[0]
class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes]
def predict_breed_transfer(img_path):
# load the image and return the predicted breed
# set model to eval:
model_scratch.eval()
# load image
I = Image.open(img_path)
# resize to 224 x 224
I = I.resize((224, 224), Image.ANTIALIAS)
# convert to np array:
im = np.array(I)/255.0
# transpose fo 3 x 224 x 224
im = np.transpose(im,(2,0,1))
# add dimension to simulate batch for vgg model:
im = np.expand_dims(im,0)
# convert im data to tensor:
t = torch.tensor(im)
t = t.to(device)
# conver to float
t = t.float()
# run model with softmax output to normalize output weights
ps = torch.softmax(model_scratch.forward(t),1)
# get index of max weight:
_, i = ps.max(1)
# Try with transfer learning as well:
ps = torch.softmax(model_transfer.forward(t),1)
# get index of max weight:
_, j = ps.max(1)
return class_names[i], class_names[j] # predicted class from scratch & transfer models
# -
# ---
# <a id='step5'></a>
# ## Step 5: Write your Algorithm
#
# Write an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,
# - if a __dog__ is detected in the image, return the predicted breed.
# - if a __human__ is detected in the image, return the resembling dog breed.
# - if __neither__ is detected in the image, provide output that indicates an error.
#
# You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed.
#
# Some sample output for our algorithm is provided below, but feel free to design your own user experience!
#
# 
#
#
# ### (IMPLEMENTATION) Write your Algorithm
# +
### TODO: Write your algorithm.
### Feel free to use as many code cells as needed.
# load models...
# load the model that got the best validation accuracy
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_scratch.load_state_dict(torch.load('model_scratch.pt'))
model_scratch.to(device)
model_transfer.load_state_dict(torch.load('model_transfer.pt'))
model_transfer.to(device)
def run_app(img_path):
## handle cases for a human face, dog, and neither
# step 1. check if human using human detector:
if(face_detector(img_path)):
print("I think you are human!")
elif(dog_detector(img_path)):
print("Looks like a dog to me!")
else:
print("I don't know what this is, but I don't think its a dog or a human!")
# load image locally:
img = cv2.imread(img_path)
# print image:
# convert BGR image to RGB for plotting
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# display the image, along with bounding box
plt.imshow(cv_rgb)
plt.show()
# run classification:
pred_scratch, pred_transfer = predict_breed_transfer(img_path)
print("Simple model thinks you are a " + pred_scratch)
print("Transfer model thinks you are a " + pred_transfer)
print("\n-------------\n")
# -
# ---
# <a id='step6'></a>
# ## Step 6: Test Your Algorithm
#
# In this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog?
#
# ### (IMPLEMENTATION) Test Your Algorithm on Sample Images!
#
# Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images.
#
# __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm.
# __Answer:__
#
# The human detector worked well as expected, but the dog-breed classifier trained with the scratch model did not work as well.
#
# The breed classification with the scratch model worked almost as expected - which was not too well. The test accuracy of 14% meant that only about 1 in 8 images from the classes trained would be correctly identified. Further, the simplicity of the model, combined with the large similarity between certain breeds would make it difficult for the scratch model to perform very well.
#
# Therefore the following could be done to improve the scratch model:
# 1. Increase the number of filters and convolutional layers - this would produce more filters to learn the finer detailed differences between similar breeds.
# 2. Add batch normalization to keep weights controlled in between layers - this was one step that the VGG network included which the scratch network did not. This should make the weights more managable as the gradient is back-propagated without it being squashed.
# 3. increase the dataset with more augmentation. The Pytorch image augramention (according to forum & documentation) creates one augmented image per transform during run-time training. Therefore, using 4 individual transforms and a compose would result in 4 new images, one for each transform. This can approach can be replaces with a pre-computed augmented data set that further increases the number of transforms by not only creating individually transformed images, but combinations of transforms. This should create a more diverse input dataset for training.
#
# +
## TODO: Execute your algorithm from Step 6 on
## at least 6 images on your computer.
## Feel free to use as many code cells as needed.
## suggested code, below
for file in np.hstack((human_files[:3], dog_files[:3])):
run_app(file)
| project-dog-classification/dog_app.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pymysql.cursors
# +
# DEFINE FUNCTION TO WRITE DATA TO DBASE
# -
def write_to_db( query:str):
"""
This function takes a query and then sends it to the database.
"""
connection = pymysql.connect(host='localhost',
user='root',
password='<PASSWORD>&&',
database= "loans",
cursorclass=pymysql.cursors.DictCursor)
with connection:
with connection.cursor() as cursor:
cursor.execute(query)
connection.commit()
# +
# DEFINE FUNCTION TO INSERT RECORDS INTO TABLE
# -
def insert_to_db(colnames:list,records:list,table_name:str):
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='<PASSWORD>&&',
database='loans',
cursorclass=pymysql.cursors.DictCursor)
with connection:
with connection.cursor() as cursor:
# Create a new record
for record in records:
sql = f"INSERT INTO `{table_name}`({colnames[0]},{colnames[1]},{colnames[2]},{colnames[3]},{colnames[4]}) VALUES ('{record[0]}','{record[1]}','{record[2]}','{record[3]}','{record[4]}')"
cursor.execute(sql)
connection.commit()
# connection is not autocommit by default. So you must commit to save
# your changes.
# +
# COLLECT DICTIONARY KEYS
# -
import csv
def process_file()->dict:
"""This function gets a file name(csv format),
reads it and returns the column names,
also the records and filename, in a dictionary
"""
header = None # COLUMN NAMES
records = [] # CORRESPONDING TABLE RECORDS FROM CSV
path_csv_file =""
path = input("Please enter file name or path\n:> ")
# path_csv_file += path+".csv"
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
# print(f'\t{row[1].ljust(15)} {row[3].ljust(25)} {row[5].ljust(15)} {row[14].rjust(9)} {row[9].rjust(9)} \n')
header = row[:5] # We assign the first row as the header.
line_count += 1
else:
recs = row[:5]
# print(f'\t{row[1].ljust(15)} {row[3].ljust(25)} {row[5].ljust(15)} {row[14].rjust(9)} {row[9].rjust(9)} \n')
records.append(recs) # Here we assign all the other rows to the records list.
line_count += 1
print(f'Processed {line_count} lines.')
de_extensioned_filename = path.split(".")[0].replace(" ", "").replace("-", "")
return dict(colnames = header, records = records, filename = de_extensioned_filename)
def create_table_from_cols(colnames:list, use_name:str):
"""
This function will allow you to pass in a list of column names,
and another string which is the name for the new table,
It will then create an sql query that would be used to create
corresponding sql table.
returns boolean (True/False) if task is successful or not respectively.
"""
sql_query = f"CREATE TABLE {use_name} (main_id INT(4) PRIMARY KEY AUTO_INCREMENT NOT NULL" # Creat base query which will be same accross all tables created.
for colname in colnames:
if colname == "": # Deal with column name that are empty
colname = "unknown_col"
colname = colname.replace(" ", "")
sql_query += f", {str(colname).strip()} VARCHAR(30)" # Table info for each column name.
sql_query += ")" # Add closing tokens to query
print(sql_query)
write_to_db(sql_query)
print(sql_query)
return use_name
# +
def write_records_to_db(colnames:list,records:list, table_name:str,dbase)->bool:
"""
This function will allow you to pass in a list of column names,
and another string which is the name for the new table,
It will then create an sql query that would be used to create
corresponding sql table.
returns boolean (True/False) if task is successful or not respectively.
"""
sql_query_2 = "" # Creat base query which will be same accross all tables created.
for record in records:
if record == "": # Deal with record names that are empty
record = "unknown_rec"
# record = record.remove(" ", "")
sql_query_2 += f"INSERT INTO {str(table_name).strip()} ({colnames[0]}, {colnames[1]},{colnames[2]}, {colnames[3]})"
sql_query_2 += f" VALUES ('{0}', '{1}','{2}','{3}')" # Add closing tokens to query
# cursor.execute(sql.format(accName, accNum,BVN, accountStatus))
print(sql_query_2)
insert_to_db(sql_query_2)
print(sql_query_2)
return True
# -
# +
mycsv_data= process_file()
mycolnames = mycsv_data["colnames"]
myrecnames = mycsv_data["records"]
mytable = create_table_from_cols(mycolnames, "newloans")
insert_to_db(mycolnames,myrecnames,mytable)
# -
# +
# DEFINE FUNCTION TO RETRIEVE DATA FROM DATABASE AND CONVERT TO .CSV FILE
# +
import pymysql.cursors
def retrieve_data(tablename:str, qty:int)->list:
"""This function retrieves data from a database table and
reads it and returns the data as a csv file.
"""
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='',
database="univelcity",
cursorclass=pymysql.cursors.DictCursor)
with connection:
with connection.cursor() as cursor:
# Create a new record
sql = f"SELECT * FROM {tablename}"
cursor.execute(sql)
results = cursor.fetchmany(qty)
line_count = 0
data_list = []
for row in results:
if line_count == 0:
col_names = list(row.keys()) # Get headings from first row
data = list(row.values()) # get data for first row
data_list.append(col_names)
data_list.append(data)
line_count += 1
else:
data = list(row.values()) # get data for first row
data_list.append(data)
return (data_list)
raw_data = retrieve_data("laptops", 4)
print(raw_data)
# +
def write_to_csv(data:list)->bool:
import csv
with open("out.csv", "w") as file:
writer = csv.writer(file)
writer.writerows(data)
return True
write_to_csv(raw_data)
# -
| Charles_Assignment_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Compare Lagrangian Results Files Between MIDOSS-MOHID Runs
#
# This notebook is for quick comparison of results from `Lagrangian*.nc` files
# from 2 MIDOSS-MOHID runs.
#
# This is a template version of the notebook that compares one dataset to itself.
# %matplotlib inline
# +
from pathlib import Path
import matplotlib.pyplot as plt
import xarray
# -
results = {
"01-07jun17": Path(
"/media/doug/warehouse/MIDOSS/results/01jun17-08jun17/forcing-float32_no-comp_no-chunk//Lagrangian_AKNS_crude_SOG_01jun2017_08jun2017_AKNS.nc"),
"15-21jun17": Path(
"/media/doug/warehouse/MIDOSS/results/15jun17-21jun17/AKNS-spatial-0-231/Lagrangian_AKNS_crude_15jun17_22jun17_AKNS.nc"),
}
datasets = {
name: xarray.open_dataset(path)
for name, path in results.items()
}
datasets["baseline forcing"]
for name in datasets:
print(name)
print(f"{datasets[name]}\n")
def viz_field(datasets, var, selections, robust=False):
if len(selections) == 1:
fig, (ax0, ax1, ax_diff) = plt.subplots(1, 3, figsize=(20, 5))
else:
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(20, 5))
ds_names = list(datasets.keys())
ds0 = datasets[ds_names[0]].get(var).sel(**selections[0])
try:
ds1 = datasets[ds_names[1]].get(var).sel(**selections[1])
except IndexError:
ds1 = datasets[ds_names[1]].get(var).sel(**selections[0])
ds0.plot(ax=ax0, robust=robust)
ax0.set_title(f"{ds_names[0]}\n{ax0.get_title()}")
ds1.plot(ax=ax1, robust=robust)
ax1.set_title(f"{ds_names[1]}\n{ax1.get_title()}")
if len(selections) == 1:
(ds0 - ds1).plot(ax=ax_diff, cmap=plt.cm.RdBu_r, robust=robust)
ax_diff.set_title(f"{ds_names[0]} - {ds_names[1]}\n{ax_diff.get_title()}")
plt.tight_layout()
selections = [
dict(
time="2017-06-04 08:30:00",
grid_y=slice(600, 700),
grid_x=slice(100, 200),
# depth axis is inverted, so that surface has index 39
grid_z=39,
),
dict(
time="2017-06-18 08:30:00",
grid_y=slice(650, 750),
grid_x=slice(100, 200),
# depth axis is inverted, so that surface has index 39
grid_z=39,
),
]
viz_field(datasets, "OilConcentration_3D", selections, robust=False)
viz_field(datasets, "Dissolution_3D", selections)
for ds in datasets.values():
ds.close()
| notebooks/CompareLagrangians.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: default:R
# language: R
# name: conda-env-default-r
# ---
# # Sample R notebook - Example Financial Analysis
#
# In this notebook, we illustrate the use of R with several code examples showing how to gather and use financial data, as well as fit financial models, eventually with deep learning.
#
# The code below is R code. Please make sure to execute this only after you've installed the R kernel as in the previous notebook titled `1._InstallR.ipynb`
#
# This notebook uses the R kernel. You will find this in the launcher.
# ## Check that plotting works
#
# It's good to start by checking that plots work in the notebook so that we know the graphical support in R is working. Here we generate 1 million standard normal random variates and plot the histogram, which shows that it is normally distributed. We also overlay a grid on the plot.
x = rnorm(1000000) # normal random numbers
hist(x,100) # histogram with 100 bins
grid(lwd=2) # a grid with adjusted line width
# ## Installing R packages
#
# You can install multiple R packages in this notebook as follows. The `install.packages` function in R can take a list of packages, concatenated together using the `c()` function. We install below two packages: (i) `quantmod` (https://www.quantmod.com/) a financial data package, and `magrittr` (https://cran.r-project.org/web/packages/magrittr/vignettes/magrittr.html) used for piping.
#
# You can install these once and not every time you use this notebook. The first time you use `install.packages` you will also be required to choose a [CRAN](https://cran.r-project.org/) mirror for the download.
install.packages(c("quantmod", "magrittr"))
# We now use the quantmod package to download stock prices as a basic example.
#
# We use the S&P 500 index here.
ticker = '^GSPC'
# Next, we will invoke the quantmod package. Then we use the `getSymbols` function to get the chosen ticker (or tickers in a concatenated list), we then convert the table into a dataframe.
#
# We can examine the dataframe and also plot the adjusted prices of the ticker.
library(quantmod)
getSymbols(ticker)
GSPC = as.data.frame(GSPC)
head(GSPC)
plot(GSPC$GSPC.Adjusted, type='l', col='blue', lwd=1.5)
grid()
# We can invoke the same functionality in R using pipes with the `magrittr` package. Can you see why the package is so named?
library(magrittr)
GSPC$GSPC.Adjusted %>% plot(type='l'); grid()
# ## Regression Analysis
#
# We can examine a simple regression of a chosen stock's return on the market index returns so that we can see how it is related to moves in the broad stock market. We choose Tesla here.
#
# Let's collect the two time series first for both the index and the stock. Note that we already downloaded the S&P 500 index data above. Below, we add a column of dates to that dataframe. We also download Tesla stock prices and add a date column as well.
# +
GSPC$dt = rownames(GSPC)
getSymbols('TSLA')
TSLA = as.data.frame(TSLA)
TSLA$dt = rownames(TSLA)
# -
head(TSLA)
# ## Merging two dataframes
#
# We have two separate dataframes, one for GSPC and the other for TSLA. They cover different date ranges, and we wish to line up the data for the two tickers. One way to do this is to join the two dataframes on date using an intersection (inner) join, so that the rows that are retained are the ones for which the two tickers have common dates. It is really easy to do so in R using the `merge` function. The key for the join is the date column.
df = merge(GSPC,TSLA,by="dt")
head(df)
# ## Preparing the dataframe needed for regression analysis
#
# We only need the adjusted closing prices for each ticker, and the dates, so we take a subset of the merged dataframe. This is shown next.
df = df[c("dt","GSPC.Adjusted","TSLA.Adjusted")]
names(df) = c("DATE","GSPC","TSLA")
head(df)
# DESCRIPTIVE STATISTICS
summary(df)
# ## Convert prices to returns
#
# We want to use regression analysis to examine the relationship of returns on the two tickers. So we convert the prices into percentage returns from one date to the next. This is easy to using the twp columns of ticker prices. Note that we create two vectors of returns.
n = dim(df)[1]
gspc_ret = (df$GSPC[2:n] - df$GSPC[1:(n-1)])/df$GSPC[1:(n-1)]
tsla_ret = (df$TSLA[2:n] - df$TSLA[1:(n-1)])/df$TSLA[1:(n-1)]
# + [markdown] tags=[]
# ## Fitting the regression
#
# In R, ordinary least squares (OLS) regression is fitted using the function `lm` (for linear model). We want to fit the model by regressing Tesla's return on the return of the S&P 500 index. The single line of code below runs the model and the `summary` function prints out all the details of the regression.
# -
res = lm(tsla_ret ~ gspc_ret)
summary(res)
# Notice that the intercept is very small (0.0017) and the coefficient on the GSPC return is 1.29, which approximates what is known as the "beta" of the stock. What this means is that when the market moves by 1%, Tesla stock changes by 1.29% (up or down). So TSLA is relatively more risky than GSPC.
#
# Note that the t-statistics show that GSPC is statistically significant in being able to explain the return of TSLA.
#
# Also, see that the F-statistic has a p-value = 0, which means that the regression model provides a good fit to the data. We see that the R-square is 15%.
#
# It is useful to plot one series against the other to see this relationship and also plot the regression line using the output of the regression model. This is done next.
plot(gspc_ret, tsla_ret)
cat("Correlation of stock and market return:"); print(cor(gspc_ret, tsla_ret))
abline(res, col="red")
# Next, let's explore a different topic to see how functions are written in R, and to implement some deep learning as well.
# ## Pricing Equity Options
#
# [Options](https://www.investopedia.com/terms/o/option.asp) are traded in large numbers. The basic model for pricing options is the famous Black-Scholes equation. The price of a call option in this model is given by the following formula (see: https://www.investopedia.com/terms/b/blackscholes.asp)
#
# $$
# C=Se^{-qT} N(d_1) - Ke^{-rT}⋅N(d_2)
# $$
#
# where
#
# $$
# d_1=\frac{\ln(S/K)+(r-q+v^2/2)T]}{v\sqrt{T}}
# $$
#
# and $d_2=d_1-v\sqrt{T}$.
#
# Here $S$ is the stock price, $K$ is the strike price, $T$ is option maturity in years, $v$ is the annualized volatility of the stock, and $r$ is the continuous risk free rate of interest for maturity $T$. Finally, $q$
# is the annual dividend rate, assuming it is paid continuously.
#
# Likewise, the formula for a put option is
#
# $$
# P=Ke^{-rT}N(-d_2) - Se^{-qT}N(-d_1)
# $$
#
# and $d_1$ and $d_2$ are the same as for the call option.
#
# We build a simple R function to price options. It returns both the price of a call and a put option for the input parameters.
#Generate Black-Scholes values
BS = function(S,K,T,v,rf,dv) {
d1 = (log(S/K) + (rf-dv+0.5*v^2)*T)/(v*sqrt(T))
d2 = d1 - v*sqrt(T)
bscall = S*exp(-dv*T)*pnorm(d1) - K*exp(-rf*T)*pnorm(d2)
bsput = -S*exp(-dv*T)*pnorm(-d1) + K*exp(-rf*T)*pnorm(-d2)
res = c(bscall,bsput)
}
# We install `ggplot` which is a popular package in R. See: https://ggplot2.tidyverse.org/
#
# This will be used to plot the option prices generated from this function.
install.packages("ggplot2")
library(ggplot2)
# +
# Add in the parameter values for pricing options
S = 100
K = 100
T = 1
v = 0.20
rf = 0.05
dv = 0.01
# Create null vectors to hold the option prices
vcall = NULL; vput = NULL
strikes = seq(K-30,K+30)
# Loop to compute all option prices
for (k in strikes) {
vcall = c(vcall,BS(S,k,T,v,rf,dv)[1])
vput = c(vput,BS(S,k,T,v,rf,dv)[2])
}
# Construct a dataframe with the option values
df = data.frame(strikes,vcall,vput)
ggplot(df,aes(x=strikes,y=vput)) + geom_point(color=strikes)
# -
# This shows the expected relationship of put prices to the strike price of the option, i.e., as the strike is increase, ceteris paribus, the price of puts increases.
# ## Create a synthetic dataset of option prices for a deep learning example
#
# We now create a synthetic dataset of option prices and inputs to fit a deep learning model to "learn" the Black-Scholes option pricing model from synthetically generated data. We generate 100,000 option prices. The six input parameters are chosen randomly and then the call prices are generated from the random input parameters.
n = 100000
S = runif(n, 80, 120) # Uniform random numbers between 80
K = runif(n, 80, 120)
T = runif(n, 0.25, 2)
v = runif(n, 0.05, 0.25)
rf = runif(n, 0.01, 0.05)
dv = runif(n, 0.005, 0.02)
callPrices = NULL
for (i in 1:n) {
callPrices = c(callPrices,BS(S[i],K[i],T[i],v[i],rf[i],dv[i])[1])
}
df = data.frame(S, K, T, v, rf, dv, callPrices)
head(df)
# Above, we see the dataframe of input values and in the last column, the option price generated by the Black-Scholes model.
| use-r-in-studio-lab/2.SampleR_NB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
...create graph...
my_train_op = ...
sv = tf.train.Supervisor(logdir="/my/training/directory")
with sv.managed_session() as sess:
for step in range(100000):
if sv.should_stop():
break
sess.run(my_train_op)
...create graph...
my_train_op = ...
my_summary_op = tf.summary.merge_all()
sv = tf.train.Supervisor(logdir="/my/training/directory",
summary_op=None) # Do not run the summary service
with sv.managed_session() as sess:
for step in range(100000):
if sv.should_stop():
break
if step % 100 == 0:
_, summ = session.run([my_train_op, my_summary_op])
sv.summary_computed(sess, summ)
else:
session.run(my_train_op)
...create graph...
# Create a saver that restores only the pre-trained variables.
pre_train_saver = tf.train.Saver([pre_train_var1, pre_train_var2])
# Define an init function that loads the pretrained checkpoint.
def load_pretrain(sess):
pre_train_saver.restore(sess, "<path to pre-trained-checkpoint>")
# Pass the init function to the supervisor.
#
# The init function is called _after_ the variables have been initialized
# by running the init_op.
sv = tf.train.Supervisor(logdir="/my/training/directory",
init_fn=load_pretrain)
with sv.managed_session() as sess:
# Here sess was either initialized from the pre-trained-checkpoint or
# recovered from a checkpoint saved in a previous run of this code.
...
# +
def my_additional_sumaries(sv, sess):
...fetch and write summaries, see below...
...
sv = tf.train.Supervisor(logdir="/my/training/directory")
with sv.managed_session() as sess:
# Call my_additional_sumaries() every 1200s, or 20mn,
# passing (sv, sess) as arguments.
sv.loop(1200, my_additional_sumaries, args=(sv, sess))
...main training loop...
# -
def my_additional_sumaries(sv, sess):
summaries = sess.run(my_additional_summary_op)
sv.summary_computed(sess, summaries)
# Use a custom Saver and checkpoint every 30 seconds.
...create graph...
my_saver = tf.train.Saver(<only some variables>)
sv = tf.train.Supervisor(logdir="/my/training/directory",
saver=my_saver,
save_model_secs=30)
with sv.managed_session() as sess:
...training loop...
| crackingcode/day7/cc_tf_day7_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rA5Mubike7OJ"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="fY0a3LRYfHUl"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="iNz7xXMSsAQa"
# # Parameter Server Training
# + [markdown] id="jHyqRIqxsJuc"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/distribute/parameter_server_training"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/parameter_server_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/distribute/parameter_server_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/distribute/parameter_server_training.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="6v4D6QfcfTrm"
# ## Overview
#
# [Parameter server training](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-li_mu.pdf)
# is a common data-parallel method to scale up model training on multiple
# machines. A parameter server training cluster consists of workers and parameter
# servers. Variables are created on parameter servers and they are read and updated by workers in each step. By default, workers read and update these variables independently without synchronizing with each other. This is why sometimes parameter server-style training is called asynchronous training.
#
# In TF2, parameter server training is powered by the
# `tf.distribute.experimental.ParameterServerStrategy` class, which distributes
# the training steps to a cluster that scales up to thousands of workers
# (accompanied by parameter servers). There are two main supported training APIs:
# Keras Training API, also known as `Model.fit`, and Custom Training Loop (CTL).
# `Model.fit` is recommended when users prefer a high-level abstraction
# and handling of training, while CTL is recommended when users prefer to define the details of their training
# loop.
#
# Regardless of the API of choice, distributed training in TF2 involves a
# "cluster" with several "jobs", and each of the jobs may have one or more
# "tasks". When using parameter server training, it is recommended to have one
# coordinator job (which has the job name `chief`), multiple worker jobs (job name
# `worker`), and multiple parameter server jobs (job name `ps`).
#
# While the coordinator creates resources, dispatches training tasks, writes
# checkpoints, and deals with task failures, workers and parameter servers run `tf.distribute.Server` that listen for requests from the coordinator.
# + [markdown] id="oLV1FbpLtqtB"
# ### Parameter server training with `Model.fit` API
#
# Parameter server training with `Model.fit` API requires the coordinator to use a
# `tf.distribute.experimental.ParameterServerStrategy` object, and a
# `tf.keras.utils.experimental.DatasetCreator` as the input. Similar to
# `Model.fit` usage with no strategy, or with other strategies, the workflow
# involves creating and compiling the model, preparing the callbacks, followed by
# a `Model.fit` call.
#
# ### Parameter server training with custom training loop (CTL) API
#
# With CTLs, the `tf.distribute.experimental.coordinator.ClusterCoordinator`
# class is the key component used for the coordinator. The `ClusterCoordinator`
# class needs to work in conjunction with a `tf.distribute.Strategy` object. This
# `tf.distribute.Strategy` object is needed to provide the information of the cluster and is used to define a training step as we have seen in [custom training with `MirroredStrategy`](https://www.tensorflow.org/tutorials/distribute/custom_training#training_loop). The `ClusterCoordinator` object then dispatches the execution of these training
# steps to remote workers. For parameter server training, the `ClusterCoordinator`
# needs to work with a `tf.distribute.experimental.ParameterServerStrategy`.
#
# The most important API provided by the `ClusterCoordinator` object is `schedule`. The `schedule` API enqueues a `tf.function` and returns a future-like `RemoteValue` immediately. The queued functions will be dispatched to remote workers in background threads and their `RemoteValue`s will be filled asynchronously. Since `schedule` doesn’t require worker assignment, the `tf.function` passed in can be executed on any available worker. If the worker it is executed on becomes unavailable before its completion, the function will be retried on another available worker. Because of this fact and the fact that function execution is not atomic, a function may be executed more than once.
#
# In addition to dispatching remote functions, the `ClusterCoordinator` also helps
# to create datasets on all the workers and rebuild these datasets when a worker recovers from failure.
# + [markdown] id="MyDnWjmOje5-"
# ## Tutorial Setup
#
# The tutorial will branch into CTL or `Model.fit` paths, and you can choose the
# one that fits your need. Sections other than "Training with X" are appliable to
# both paths.
# + id="0-V3LUcIs4a-"
# !pip install portpicker
# !pip install tf-nightly
# + id="GlI_NAVFae3J"
#@title
import multiprocessing
import os
import random
import portpicker
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers.experimental.preprocessing as kpl
# + [markdown] id="uvwgM2rzgzIC"
# ## Cluster Setup
#
# As mentioned above, a parameter server training cluster requires a coordinator task that runs your training program, one or several workers and parameter server tasks that run TensorFlow servers, i.e. `tf.distribute.Server`, and possibly an additional evaluation task that runs side-car evaluation (see the side-car evaluation section below). The
# requirements to set them up are:
#
# * The coordinator task needs to know the addresses and ports of all other TensorFlow servers except the evaluator.
# * The workers and parameter servers need to know which port they need to listen to. For the sake of simplicity, we usually pass in the complete cluster information when we create TensorFlow servers on these tasks.
# * The evaluator task doesn’t have to know the setup of the training cluster. If it does, it should not attempt to connect to the training cluster.
# * Workers and parameter servers should have task types as “worker” and “ps” respectively. The coordinator should use “chief” as the task type for legacy reasons.
#
# In this tutorial, we will create an in-process cluster so that the whole parameter server training can be run in colab. We will introduce how to set up [real clusters](#real_clusters) in a later section.
# + [markdown] id="7UNs7Lm2g19n"
# ### In-process cluster
#
# In this tutorial, we will start a bunch of TensorFlow servers in advance and
# connect to them later. Note that this is only for the purpose of this tutorial's
# demonstration, and in real training the servers will be started on worker and ps
# machines.
# + id="FbrP5pXuaoVH"
def create_in_process_cluster(num_workers, num_ps):
"""Creates and starts local servers and returns the cluster_resolver."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
cluster_dict["worker"] = ["localhost:%s" % port for port in worker_ports]
if num_ps > 0:
cluster_dict["ps"] = ["localhost:%s" % port for port in ps_ports]
cluster_spec = tf.train.ClusterSpec(cluster_dict)
# Workers need some inter_ops threads to work properly.
worker_config = tf.compat.v1.ConfigProto()
if multiprocessing.cpu_count() < num_workers + 1:
worker_config.inter_op_parallelism_threads = num_workers + 1
for i in range(num_workers):
tf.distribute.Server(
cluster_spec, job_name="worker", task_index=i, config=worker_config,
protocol="grpc")
for i in range(num_ps):
tf.distribute.Server(
cluster_spec, job_name="ps", task_index=i, protocol="grpc")
cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver(
cluster_spec, rpc_layer="grpc")
return cluster_resolver
# Set the environment variable to allow reporting worker and ps failure to the
# coordinator. This is a workaround and won't be necessary in the future.
os.environ["GRPC_FAIL_FAST"] = "use_caller"
NUM_WORKERS = 3
NUM_PS = 2
cluster_resolver = create_in_process_cluster(NUM_WORKERS, NUM_PS)
# + [markdown] id="pX_91OByt0J2"
# The in-process cluster setup is frequently used in our unit testing. Here is
# [one example](https://github.com/tensorflow/tensorflow/blob/7621d31921c2ed979f212da066631ddfda37adf5/tensorflow/python/distribute/coordinator/cluster_coordinator_test.py#L437).
# + [markdown] id="zyby6M2Jqg6J"
# ## Instantiate a `ParameterServerStrategy`
#
# Before we dive into the training code, let's instantiate a `ParameterServerStrategy` object. Note that this is needed regardless of whether you are proceeding with a custom training loop or `Model.fit`. `variable_partitioner` argument will be explained in the [next section](#variable-sharding).
# + id="_YyEPgisrC35"
variable_partitioner = (
tf.distribute.experimental.partitioners.FixedShardsPartitioner(
num_shards=NUM_PS))
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver,
variable_partitioner=variable_partitioner)
# + [markdown] id="WlAQxuMDJ3k9"
# In order to use GPUs for training, allocate GPUs visible to each worker.
# `ParameterServerStrategy` will use all the available GPUs on each worker,
# with the restriction that all workers should have the same number of GPUs
# available.
# + [markdown] id="QMmBLsf6sEXh"
# ### Variable sharding
#
# Variable sharding refers to splitting a variable into multiple smaller
# variables. We call these smaller variables *shard*s. Variable sharding may be
# useful to distribute the network load when accessing these shards. It is also
# useful to distribute computation and storage of a normal variable across
# multiple parameter servers.
#
# To enable variable sharding, you can pass in a `variable_partitioner` when
# constructing a `ParameterServerStrategy` object. The `variable_partitioner` will
# be invoked every time when a variable is created and it is expected to return
# the number of shards along each dimension of the variable. Some out-of-box
# `variable_partitioner`s are provided such as
# `tf.distribute.experimental.partitioners.FixedShardsPartitioner`.
# + [markdown] id="1--SxlxtsOb7"
# When a `variable_partitioner` is passed in and if you create a variable directly
# under `strategy.scope()`, it will become a container type with a `variables`
# property which provides access to the list of shards. In most cases, this
# container will be automatically converted to a Tensor by concatenating all the
# shards. As a result, it can be used as a normal variable. On the other hand,
# some TensorFlow methods such as `tf.nn.embedding_lookup` provide efficient
# implementation for this container type and in these methods automatic
# concatenation will be avoided.
#
# Please see the API docstring of `ParameterServerStrategy` for more details.
# + [markdown] id="jlOq-O-26O1d"
# ## Training with `Model.fit`
# <a id="training_with_modelfit"></a>
#
# Keras provides an easy-to-use training API via `Model.fit` that handles the
# training loop under the hood, with the flexbility of overridable `train_step`,
# and callbacks which provide functionalities such as checkpoint saving, or
# summary saving for TensorBoard. With `Model.fit`, the same training code can be
# used for other strategies with a simple swap of the strategy object.
# + [markdown] id="oMZ9Cu5J6ZGi"
# ### Input data
#
# `Model.fit` with parameter server training requires that the input data be
# provided in a callable that takes a single argument of type
# `tf.distribute.InputContext`, and returns a `tf.data.Dataset`. Then, create a
# `tf.keras.utils.experimental.DatasetCreator` object that takes such `callable`,
# and an optional `tf.distribute.InputOptions` object via `input_options`
# argument. Note that it is recommended to shuffle and repeat the data with
# parameter server training, and specify `steps_per_epoch` in `fit` call so the library knows the
# epoch boundaries.
#
# Please see
# [Distributed Input](https://www.tensorflow.org/tutorials/distribute/input#usage_2)
# guide for more information about the `InputContext` argument.
# + id="shAo1CCS7wU1"
def dataset_fn(input_context):
global_batch_size = 64
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10,))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).shuffle(10).repeat()
dataset = dataset.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2)
return dataset
dc = tf.keras.utils.experimental.DatasetCreator(dataset_fn)
# + [markdown] id="v_jhF70K7zON"
# The code in `dataset_fn` will be invoked on the input device, which is usually
# the CPU, on each of the worker machines.
#
# ### Model construction and compiling
#
# Now, you will create a `tf.keras.Model` with the APIs of choice (a trivial
# `tf.keras.models.Sequential` model is being used as a demonstration here),
# followed by a `Model.compile` call to incorporate components such as optimizer,
# metrics, or parameters such as `steps_per_execution`:
# + id="PhTHUYaD74vT"
with strategy.scope():
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
model.compile(tf.keras.optimizers.SGD(), loss='mse', steps_per_execution=10)
# + [markdown] id="nWb_Ekm377YX"
# ### Callbacks and training
#
# <a id="callbacks-and-training"> </a>
#
# Before you call `model.fit` for the actual training, let's prepare the needed
# callbacks for common tasks such as:
#
# * `ModelCheckpoint` - to save the model weights.
#
# * `BackupAndRestore` - to make sure the training progress is automatically
# backed up, and recovered if the cluster experiences unavailability (such as
# abort or preemption), or
#
# * `TensorBoard` - to save the progress reports into summary files which get
# visualized in TensorBoard tool.
#
# Note that due to performance consideration, custom callbacks cannot have batch
# level callbacks overridden when used with `ParameterServerStrategy`. Please
# modify your custom callbacks to make them epoch level calls, and adjust
# `steps_per_epoch` to a suitable value. In addition, `steps_per_epoch` is a
# required argument for `Model.fit` when used with `ParameterServerStrategy`.
# + id="3ddUvUZk7_wm"
working_dir = '/tmp/my_working_dir'
log_dir = os.path.join(working_dir, 'log')
ckpt_filepath = os.path.join(working_dir, 'ckpt')
backup_dir = os.path.join(working_dir, 'backup')
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=log_dir),
tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_filepath),
tf.keras.callbacks.experimental.BackupAndRestore(backup_dir=backup_dir),
]
model.fit(dc, epochs=5, steps_per_epoch=20, callbacks=callbacks)
# + [markdown] id="uWgP1h2z8B3j"
# ### Direct usage with `ClusterCoordinator` (optional)
#
# Even if you choose `Model.fit` training path, you can optionally instantiate a
# `ClusterCoordinator` object to schedule other functions you would like to be
# executed on the workers. See below
# [Training with Custom Training Loop](#training_with_custom_training_loop)
# section for more details and examples.
# + [markdown] id="GxypEyIthR0z"
# ## Training with Custom Training Loop
#
# <a id="training_with_custom_training_loop"> </a>
#
# Custom training loop with `tf.distribute.Strategy`
# provides great flexibility to define training loops. With the `ParameterServerStrategy` defined above, you will use a
# `ClusterCoordinator` to dispatch the execution of training steps to remote
# workers.
#
# + [markdown] id="xAwLWSliiDRb"
# Then, you will create a model, define a dataset and a step function as we have
# seen in the training loop with other `tf.distribute.Strategy`s. You can find
# more details in this
# [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training).
#
# To ensure efficient dataset prefetching, use the recommended
# distributed dataset creation APIs mentioned in the
# [Dispatch Training steps to remote workers](https://www.tensorflow.org/tutorials/distribute/parameter_server_training#dispatch_training_steps_to_remote_workers)
# section below. Also, make sure to call `strategy.run` inside worker_fn
# to take full advantage of GPUs allocated on workers. Rest of the steps
# are the same for training with or without GPUs.
#
# Let’s create these components in following steps:
# + [markdown] id="4QNkCtV8VivM"
# ### Setup the data
# First, write a function that creates a dataset that includes preprocessing logic implemented by Keras preprocessing layers. We will create these layers outside the `dataset_fn` but apply the transformation inside the `dataset_fn` since you will wrap the `dataset_fn` into a `tf.function` which doesn't allow variables to be created inside it.
#
# Note: there is a known performance implication when using lookup table resources, which layers such as `tf.keras.layers.experimental.preprocessing.StringLookup` employ. Please see [Known Limitations](#known_limitations) section for more information.
# + id="2GUwATssauus"
feature_vocab = [
"avenger", "ironman", "batman", "hulk", "spiderman", "kingkong",
"wonder_woman"
]
label_vocab = ["yes", "no"]
with strategy.scope():
feature_lookup_layer = kpl.StringLookup(vocabulary=feature_vocab,
mask_token=None)
label_lookup_layer = kpl.StringLookup(vocabulary=label_vocab,
num_oov_indices=0,
mask_token=None)
raw_feature_input = keras.layers.Input(
shape=(3,), dtype=tf.string, name="feature")
feature_id_input = feature_lookup_layer(raw_feature_input)
feature_preprocess_stage = keras.Model(
{"features": raw_feature_input}, feature_id_input)
raw_label_input = keras.layers.Input(
shape=(1,), dtype=tf.string, name="label")
label_id_input = label_lookup_layer(raw_label_input)
label_preprocess_stage = keras.Model({"label": raw_label_input}, label_id_input)
# + [markdown] id="Jgp8MX_7OR_A"
# Generate toy examples in a dataset:
# + id="chIY4fFANaFH"
def feature_and_label_gen(num_examples=200):
examples = {"features": [], "label": []}
for _ in range(num_examples):
features = random.sample(feature_vocab, 3)
label = ["yes"] if "avenger" in features else ["no"]
examples["features"].append(features)
examples["label"].append(label)
return examples
examples = feature_and_label_gen()
# + [markdown] id="2AtZBya7OeyZ"
# Then we create the training dataset wrapped in a dataset_fn:
# + id="Gs0QYRZoNbvw"
def dataset_fn(_):
raw_dataset = tf.data.Dataset.from_tensor_slices(examples)
train_dataset = raw_dataset.map(
lambda x: (
{"features": feature_preprocess_stage(x["features"])},
label_preprocess_stage(x["label"])
)).shuffle(200).batch(32).repeat()
return train_dataset
# + [markdown] id="IT9PQexJiFtB"
# ### Build the model
# Second, we create the model and other objects. Make sure to create all variables
# under `strategy.scope`.
# + id="Quxud1uEazeo"
# These variables created under the `strategy.scope` will be placed on parameter
# servers in a round-robin fashion.
with strategy.scope():
# Create the model. The input needs to be compatible with KPLs.
model_input = keras.layers.Input(
shape=(3,), dtype=tf.int64, name="model_input")
emb_layer = keras.layers.Embedding(
input_dim=len(feature_lookup_layer.get_vocabulary()), output_dim=20)
emb_output = tf.reduce_mean(emb_layer(model_input), axis=1)
dense_output = keras.layers.Dense(units=1, activation="sigmoid")(emb_output)
model = keras.Model({"features": model_input}, dense_output)
optimizer = keras.optimizers.RMSprop(learning_rate=0.1)
accuracy = keras.metrics.Accuracy()
# + [markdown] id="iyuxiqCQU50m"
# Let's confirm that the use of `FixedShardsPartitioner` split all variables into two shards and each shard was assigned to different parameter servers:
# + id="04r1nO4WVDO1"
assert len(emb_layer.weights) == 2
assert emb_layer.weights[0].shape == (4, 20)
assert emb_layer.weights[1].shape == (4, 20)
assert emb_layer.weights[0].device == "/job:ps/replica:0/task:0/device:CPU:0"
assert emb_layer.weights[1].device == "/job:ps/replica:0/task:1/device:CPU:0"
# + [markdown] id="lWhfXZLRiHyM"
# ### Define the training step
# Third, create the training step wrapped into a `tf.function`:
# + id="aNNVo0bFa1K9"
@tf.function
def step_fn(iterator):
def replica_fn(batch_data, labels):
with tf.GradientTape() as tape:
pred = model(batch_data, training=True)
per_example_loss = keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)(labels, pred)
loss = tf.nn.compute_average_loss(per_example_loss)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
accuracy.update_state(labels, actual_pred)
return loss
batch_data, labels = next(iterator)
losses = strategy.run(replica_fn, args=(batch_data, labels))
return strategy.reduce(tf.distribute.ReduceOp.SUM, losses, axis=None)
# + [markdown] id="rvrYQUeYiLNy"
# In the above step function, calling `strategy.run` and `strategy.reduce` in the
# `step_fn` can support multiple GPUs per worker. If the workers have GPUs
# allocated, `strategy.run` will distribute the datasets on multiple replicas.
#
# + [markdown] id="GPJ3PV_L2zAY"
# ### Dispatch training steps to remote workers
# <a id="dispatch_training_steps_to_remote_workers"> </a>
#
# After all the computations are defined by `ParameterServerStrategy`, we will use
# the `ClusterCoordinator` class to create resources and distribute the training
# steps to remote workers.
#
# Let’s first create a `ClusterCoordinator` object and pass in the strategy
# object:
# + id="DpcMlH7Pa3DB"
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(strategy)
# + [markdown] id="-xRIgKxciOSe"
# Then we create a per-worker dataset and an iterator. In the `per_worker_dataset_fn` below, wrapping the `dataset_fn` into
# `strategy.distribute_datasets_from_function` is recommended to allow efficient
# prefetching to GPUs seamlessly.
# + id="h9DCvTJTa4Q2"
@tf.function
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(dataset_fn)
per_worker_dataset = coordinator.create_per_worker_dataset(per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
# + [markdown] id="i2pnOx78iRwW"
# The final step is to distribute the computation to remote workers using `schedule`. The `schedule` method enqueues a `tf.function` and returns a future-like `RemoteValue` immediately. The queued functions will be dispatched to remote workers in background threads and the `RemoteValue` will be filled asynchronously. The `join` method can be used to wait until all scheduled functions are excuted.
# + id="gmPvactfa6Eh"
num_epoches = 4
steps_per_epoch = 5
for i in range(num_epoches):
accuracy.reset_states()
for _ in range(steps_per_epoch):
coordinator.schedule(step_fn, args=(per_worker_iterator,))
# Wait at epoch boundaries.
coordinator.join()
print ("Finished epoch %d, accuracy is %f." % (i, accuracy.result().numpy()))
# + [markdown] id="WBn-gn-OP3DR"
# Here is how you can fetch the result of a `RemoteValue`:
# + id="-15a2I_lQDO1"
loss = coordinator.schedule(step_fn, args=(per_worker_iterator,))
print ("Final loss is %f" % loss.fetch())
# + [markdown] id="htY4QKc9iXg9"
# Alternatively, you can launch all steps and do something while waiting for
# completion:
#
# ```Python
# for _ in range(total_steps):
# coordinator.schedule(step_fn, args=(per_worker_iterator,))
# while not coordinator.done():
# time.sleep(10)
# # Do something like logging metrics or writing checkpoints.
# ```
#
# For the complete training and serving workflow for this particular example,
# please check out this
# [test](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/distribute/parameter_server_training_test.py).
#
# + [markdown] id="kzNsj2GR3BGs"
# ### More about dataset creation
#
# The dataset in the above code is created using the `create_per_worker_dataset`
# API. It creates one dataset per worker and returns a container object. You can
# call `iter` method on it to create a per-worker iterator. The per-worker
# iterator contains one iterator per worker and the corresponding slice of a
# worker will be substituted in the input argument of the function passed to the
# `schedule` method before the function is executed on a particular worker.
#
# Currently the `schedule` method assumes workers are equivalent and thus assumes
# the datasets on different workers are the same except they may be shuffled
# differently if they contain a
# [dataset.shuffle](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle)
# operation. Because of this, we also recommend the datasets to be repeated
# indefinitely and schedule a finite number of steps instead of relying on the
# `OutOfRangeError` from a dataset.
#
# Another important note is that `tf.data` datasets don’t support implicit
# serialization and deserialization across task boundaries. So it is important to
# create the whole dataset inside the function passed to
# `create_per_worker_dataset`.
# + [markdown] id="LcfdI_M83lAM"
# ## Evaluation
#
# There are more than one way to define and run an evaluation loop in distributed training. Each has its own pros and cons as described below. The inline evaluation method is recommended if you don't have a preference.
# + [markdown] id="oiG8EhcY3gA1"
# ### Inline evaluation
#
# In this method the coordinator alternates between training and evaluation and thus we call it inline evaluation. There are several benefits of inline evaluation. For example, it can support large evaluation models and evaluation datasets that a single task cannot hold. For another example, the evaluation results can be used to make decisions for training next epoch.
#
# There are two ways to implement inline evaluation:
#
# - **Direct evaluation** - For small models and evaluation datasets the coordinator can run evaluation directly on the distributed model with the evaluation dataset on the coordinator:
# + id="WakiAakoaHVn"
eval_dataset = tf.data.Dataset.from_tensor_slices(
feature_and_label_gen(num_examples=16)).map(
lambda x: (
{"features": feature_preprocess_stage(x["features"])},
label_preprocess_stage(x["label"])
)).batch(8)
eval_accuracy = keras.metrics.Accuracy()
for batch_data, labels in eval_dataset:
pred = model(batch_data, training=False)
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
eval_accuracy.update_state(labels, actual_pred)
print ("Evaluation accuracy: %f" % eval_accuracy.result())
# + [markdown] id="MKGHbdI7aGoJ"
# - **Distributed evaluation** - For large models or datasets that are infeasible to run directly on the coordinator, the coordinator task can distribute evaluation tasks to the workers via the `schedule`/`join` methods:
# + id="XcHNHJpDgEvK"
with strategy.scope():
# Define the eval metric on parameter servers.
eval_accuracy = keras.metrics.Accuracy()
@tf.function
def eval_step(iterator):
def replica_fn(batch_data, labels):
pred = model(batch_data, training=False)
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
eval_accuracy.update_state(labels, actual_pred)
batch_data, labels = next(iterator)
strategy.run(replica_fn, args=(batch_data, labels))
def eval_dataset_fn():
return tf.data.Dataset.from_tensor_slices(
feature_and_label_gen(num_examples=16)).map(
lambda x: (
{"features": feature_preprocess_stage(x["features"])},
label_preprocess_stage(x["label"])
)).shuffle(16).repeat().batch(8)
per_worker_eval_dataset = coordinator.create_per_worker_dataset(eval_dataset_fn)
per_worker_eval_iterator = iter(per_worker_eval_dataset)
eval_steps_per_epoch = 2
for _ in range(eval_steps_per_epoch):
coordinator.schedule(eval_step, args=(per_worker_eval_iterator,))
coordinator.join()
print ("Evaluation accuracy: %f" % eval_accuracy.result())
# + [markdown] id="cKrQktZX5z7a"
# Note: currently the `schedule`/`join` methods don’t support visitation guarantee or exactly-once semantics. In other words, there is no guarantee that all evaluation examples in a dataset will be evaluated exactly once; some may not be visited and some may be evaluated multiple times. Visitation guarantee on evaluation dataset is being worked on.
# + [markdown] id="H40X-9Gs3i7_"
# ### Side-car evaluation
#
# Another method is called side-car evaluation which is to create a dedicated evaluator task that repeatedly reads checkpoints and runs evaluation on a latest checkpoint. It allows your training program to finish early if you don't need to change your training loop based on evaluation results. However, it requires an additional evaluator task and periodic checkpointing to trigger evaluation. Following is a possible side-car evaluation loop:
#
# ```Python
# checkpoint_dir = ...
# eval_model = ...
# eval_data = ...
# checkpoint = tf.train.Checkpoint(model=eval_model)
#
# for latest_checkpoint in tf.train.checkpoints_iterator(
# checkpoint_dir):
# try:
# checkpoint.restore(latest_checkpoint).expect_partial()
# except (tf.errors.OpError,) as e:
# # checkpoint may be deleted by training when it is about to read it.
# continue
#
# # Optionally add callbacks to write summaries.
# eval_model.evaluate(eval_data)
#
# # Evaluation finishes when it has evaluated the last epoch.
# if latest_checkpoint.endswith('-{}'.format(train_epoches)):
# break
# ```
# + [markdown] id="9TkNbtpPhFRQ"
# ## Clusters in Real-world
# <a id="real_clusters"></a>
#
# Note: this section is not necessary for running the tutorial code in this page.
#
# In a real production environment, you will run all tasks in different processes
# on different machines. The simplest way to configure cluster information on each
# task is to set "TF_CONFIG" environment variables and use a
# `tf.distribute.cluster_resolver.TFConfigClusterResolver` to parse "TF_CONFIG".
# For a general description about "TF_CONFIG" environment variables, please see
# the [distributed training guide](https://www.tensorflow.org/guide/distributed_training#setting_up_tf_config_environment_variable).
#
# If you start your training tasks using Kubernetes or other configuration templates, it is very likely that these templates have already set “TF_CONFIG” for you.
# + [markdown] id="n7AK9SJGt3tQ"
# ### Set “TF_CONFIG” environment variable
#
# Suppose you have 3 workers and 2 parameter servers, the “TF_CONFIG” of worker 1
# can be:
#
# ```Python
# os.environ["TF_CONFIG"] = json.dumps({
# "cluster": {
# "worker": ["host1:port", "host2:port", "host3:port"],
# "ps": ["host4:port", "host5:port"],
# "chief": ["host6:port"]
# },
# "task": {"type": "worker", "index": 1}
# })
# ```
#
# The “TF_CONFIG” of the evaluator can be:
#
# ```Python
# os.environ["TF_CONFIG"] = json.dumps({
# "cluster": {
# "evaluator": ["host7:port"]
# },
# "task": {"type": "evaluator", "index": 0}
# })
# ```
#
# The “cluster” part in the above “TF_CONFIG” string for the evaluator is
# optional.
# + [markdown] id="fZRjMS0pt1LM"
# ### If you use the same binary for all tasks
#
# If you prefer to run all these tasks using a single binary, you will need to let
# your program branch into different roles at the very beginning:
#
# ```Python
# cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
# if cluster_resolver.task_type in ("worker", "ps"):
# # start a TensorFlow server and wait.
# elif cluster_resolver.task_type == "evaluator":
# # run side-car evaluation
# else:
# # run the coordinator.
# ```
#
# The following code starts a TensorFlow server and waits:
#
# ```Python
# # Set the environment variable to allow reporting worker and ps failure to the
# # coordinator. This is a workaround and won't be necessary in the future.
# os.environ["GRPC_FAIL_FAST"] = "use_caller"
#
# server = tf.distribute.Server(
# cluster_resolver.cluster_spec(),
# job_name=cluster_resolver.task_type,
# task_index=cluster_resolver.task_id,
# protocol=cluster_resolver.rpc_layer or "grpc",
# start=True)
# server.join()
# ```
# + [markdown] id="ZWdYfK593eOL"
# ## Handling Task Failure
# + [markdown] id="Bl9eK5r13cOv"
# ### Worker failure
#
# `ClusterCoordinator` or `Model.fit` provides built-in fault tolerance for worker
# failure. Upon worker recovery, the previously provided dataset function (either
# to `create_per_worker_dataset` for CTL, or `DatasetCreator` for `Model.fit`)
# will be invoked on the workers to re-create the datasets.
# + [markdown] id="aP0OHZ1-Ne-B"
# ### Parameter server or coordinator failure
#
# However, when the coordinator sees a parameter server error, it will raise an `UnavailableError` or `AbortedError` immediately. You can restart the coordinator in this case. The coordinator itself can also become unavailable. Therefore, certain tooling is recommended in order to not lose the training progress:
# + [markdown] id="f7m7Itoz8lsI"
# * For `Model.fit`, you should use a `BackupAndRestore` callback, which handles
# the progress saving and restoration automatically. See
# [Callbacks and training](#callbacks-and-training) section above for an
# example.
# + [markdown] id="-XlLyJp53Z8A"
# * For CTLs, you should checkpoint the model variables periodically and load
# model variables from a checkpoint, if any, before training starts. The
# training progress can be inferred approximately from `optimizer.iterations`
# if an optimizer is checkpointed:
#
# ```Python
# checkpoint_manager = tf.train.CheckpointManager(
# tf.train.Checkpoint(model=model, optimizer=optimizer),
# checkpoint_dir,
# max_to_keep=3)
# if checkpoint_manager.latest_checkpoint:
# checkpoint = checkpoint_manager.checkpoint
# checkpoint.restore(
# checkpoint_manager.latest_checkpoint).assert_existing_objects_matched()
#
# global_steps = int(optimizer.iterations.numpy())
# starting_epoch = global_steps // steps_per_epoch
#
# for _ in range(starting_epoch, num_epoches):
# for _ in range(steps_per_epoch):
# coordinator.schedule(step_fn, args=(per_worker_iterator,))
# coordinator.join()
# checkpoint_manager.save()
# ```
# + [markdown] id="PlN1P7C53XK9"
# ### Fetching a `RemoteValue`
#
# Fetching a `RemoteValue` is guaranteed to succeed if a function is executed
# successfully. This is because currently the return value is immediately copied
# to the coordinator after a function is executed. If there is any worker failure
# during the copy, the function will be retried on another available worker.
# Therefore, if you want to optimize for performance, you can schedule functions
# without a return value.
# + [markdown] id="iZcR_xNZ3UdU"
# ## Error Reporting
#
# Once the coordinator sees an error such as `UnavailableError` from parameter
# servers or other application errors such as an `InvalidArgument` from
# `tf.debugging.check_numerics`, it will cancel all pending and queued functions
# before raising the error. Fetching their corresponding `RemoteValue`s will raise
# a `CancelledError`.
#
# After an error is raised, the coordinator will not raise the same error or any
# error from cancelled functions.
# + [markdown] id="QfhbXH-j3NVw"
# ## Performance Improvement
#
# There are several possible reasons if you see performance issues when you train
# with `ParameterServerStrategy` and `ClusterResolver`.
#
# One common reason is parameter servers have unbalanced load and some
# heavily-loaded parameter servers have reached capacity. There can also be
# multiple root causes. Some simple methods to mitigate this issue are to
#
# 1. shard your large model variables via specifying a `variable_partitioner`
# when constructing a `ParameterServerStrategy`.
# 2. avoid creating a hotspot variable that is required by all parameter servers
# in a single step if possible. For example, use a constant learning rate
# or subclass `tf.keras.optimizers.schedules.LearningRateSchedule` in
# optimizers since the default behavior is that the learning rate will become
# a variable placed on a particular parameter server and requested by all
# other parameter servers in each step.
# 3. shuffle your large vocabularies before passing them to Keras preprocessing
# layers.
#
# Another possible reason for performance issues is the coordinator. Our first
# implementation of `schedule`/`join` is Python-based and thus may have threading
# overhead. Also the latency between the coordinator and the workers can be large.
# If this is the case,
#
# * For `Model.fit`, you can set `steps_per_execution` argument provided at
# `Model.compile` to a value larger than 1.
#
# * For CTLs, you can pack multiple steps into a single `tf.function`:
#
# ```
# steps_per_invocation = 10
# @tf.function
# def step_fn(iterator):
# for _ in range(steps_per_invocation):
# features, labels = next(iterator)
# def replica_fn(features, labels):
# ...
#
# strategy.run(replica_fn, args=(features, labels))
# ```
#
# As we continue to optimize the library, we hope most users don’t have to
# manually pack steps in the future.
#
# In addition, a small trick for performance improvement is to schedule functions
# without a return value as explained in the handling task failure section above.
# + [markdown] id="chu5F7M_JmVk"
# ## Known Limitations
#
# <a id="known_limitations"> </a>
#
# Most of the known limitations are covered in above sections. This section
# provides a summary.
#
# ### `ParameterServerStrategy` general
#
# * `os.environment["grpc_fail_fast"]="use_caller"` is needed on every task, including the coordinator, to make fault tolerance work properly.
# * Synchronous parameter server training is not supported.
# * It is usually necessary to pack multiple steps into a single function to achieve optimal performance.
# * It is not supported to load a saved_model via `tf.saved_model.load` containing sharded variables. Note loading such a saved_model using TensorFlow Serving is expected to work.
# * It is not supported to load a checkpoint containg sharded optimizer slot variables into a different number of shards.
# * It is not supported to recover from parameter server failure without restarting the coordinator task.
# * Usage of `tf.lookup.StaticHashTable` (which is commonly employed by some `tf.keras.layers.experimental.preprocessing` layers, such as `IntegerLookup`, `StringLookup`, and `TextVectorization`) results in resources placed on the coordinator at this time with PS training. This has performance implication of lookup RPCs from workers to the coordinator. This is a current high priority to address.
#
# ### `Model.fit` specifics
#
# * `steps_per_epoch` argument is required in `Model.fit`. You can select a
# value that provides appropriate intervals in an epoch.
# * `ParameterServerStrategy` does not have support for custom callbacks that
# have batch-level calls for performance reason. You should convert those
# calls into epoch-level calls with suitably picked `steps_per_epoch`, so that
# they are called every `steps_per_epoch` number of steps. Built-in callbacks
# are not affected: their batch-level calls have been modified to be
# performant. Supporting batch-level calls for `ParameterServerStrategy` is
# being planned.
# * For the same reason, unlike other strategies, progress bar and metrics are
# logged only at epoch boundaries.
# * Input for `Model.fit` only takes the type `DatasetCreator`.
# * `run_eagerly` is not supported.
# * Evaluation in `Model.fit` is not yet supported. This is one of the
# priorities.
# * `Model.evaluate` and `Model.predict` are not yet supported.
#
# ### Custom Training Loop specifics
#
# * `ClusterCoordinator.schedule` doesn't support visitation guarantees for a dataset.
# * When `ClusterCoordinator.create_per_worker_dataset` is used, the whole dataset must be created inside the function passed to it.
# * `tf.data.Options` is ignored in dataset created by `ClusterCoordinator.create_per_worker_dataset`.
| site/en-snapshot/tutorials/distribute/parameter_server_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LoRAS "car_eval_4" Tutorial
# ## Prepare dataset
import pandas as pd
import numpy as np
from collections import Counter
from imblearn.datasets import fetch_datasets
data = fetch_datasets()['car_eval_4']
data.data.shape
labels=data.target
print(labels.shape)
features=data.data
print(features.shape)
label_1=np.where(labels == 1)[0]
label_1=list(label_1)
features_1=features[label_1]
features_1_trn=features_1[list(range(0,32))]
features_1_tst=features_1[list(range(32,65))]
label_0=np.where(labels == -1)[0]
label_0=list(label_0)
features_0=features[label_0]
features_0_trn=features_0[list(range(0,831))]
features_0_tst=features_0[list(range(831,1663))]
training_data=np.concatenate((features_1_trn,features_0_trn))
test_data=np.concatenate((features_1_tst,features_0_tst))
training_labels=np.concatenate((np.zeros(len(features_1_trn))+1,
np.zeros(len(features_0_trn))))
test_labels=np.concatenate((np.zeros(len(features_1_tst))+1,
np.zeros(len(features_0_tst))))
# ## LoRAS oversampling
import loras
min_class_points = features_1_trn
maj_class_points = features_0_trn
k = 20
num_shadow_points = 40
num_generated_points=(len(features_0)-len(features_1))//len(features_1)
num_aff_comb = 21
loras_min_class_points = loras.fit_resample(maj_class_points,
min_class_points, k=k,
num_shadow_points=num_shadow_points,
num_generated_points=num_generated_points,
num_aff_comb=num_aff_comb)
print(loras_min_class_points.shape)
LoRAS_feat = np.concatenate((loras_min_class_points, maj_class_points))
LoRAS_labels = np.concatenate((np.zeros(len(loras_min_class_points))+1,
np.zeros(len(maj_class_points))))
print(LoRAS_feat.shape)
print(LoRAS_labels.shape)
# ## SMOTE and its extensions oversampling
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, SVMSMOTE, ADASYN
sm = SMOTE(random_state=42, k_neighbors=30, ratio=1)
SMOTE_feat, SMOTE_labels = sm.fit_resample(training_data,training_labels)
print(SMOTE_feat.shape)
print(SMOTE_labels.shape)
smb = BorderlineSMOTE(random_state=42, k_neighbors=30, kind='borderline-1')
SMOTEb_feat, SMOTEb_labels = smb.fit_resample(training_data,training_labels)
print(SMOTEb_feat.shape)
print(SMOTEb_labels.shape)
smbt = BorderlineSMOTE(random_state=42, k_neighbors=30, kind='borderline-2')
SMOTEbt_feat, SMOTEbt_labels = smb.fit_resample(training_data,training_labels)
print(SMOTEbt_feat.shape)
print(SMOTEbt_labels.shape)
sms = SVMSMOTE(random_state=42, k_neighbors=30)
SMOTEs_feat, SMOTEs_labels = sms.fit_resample(training_data,training_labels)
print(SMOTEs_feat.shape)
print(SMOTEs_labels.shape)
ada = ADASYN(random_state=42,n_neighbors=30)
ADA_feat, ADA_labels = ada.fit_resample(training_data,training_labels)
print(ADA_feat.shape)
print(ADA_labels.shape)
# ## Defining ML models and metrics
from sklearn.metrics import f1_score, balanced_accuracy_score, average_precision_score
from sklearn.neighbors import KNeighborsClassifier
def get_metrics(y_test, y_pred, y_prob):
metrics = []
metrics.append(f1_score(y_test, y_pred))
metrics.append(balanced_accuracy_score(y_test, y_pred))
metrics.append(average_precision_score(y_test, y_prob[:,1]))
return metrics
def knn(X_train,y_train,X_test,y_test):
knn = KNeighborsClassifier(n_neighbors=30)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
y_prob = knn.predict_proba(X_test)
return get_metrics(y_test, y_pred, y_prob)
# ## Training
# +
results_normal_knn = knn(training_data, training_labels, test_data, test_labels)
results_loras_knn = knn(LoRAS_feat, LoRAS_labels, test_data, test_labels)
results_sm_knn = knn(SMOTE_feat, SMOTE_labels, test_data, test_labels)
results_sms_knn = knn(SMOTEs_feat, SMOTEs_labels, test_data, test_labels)
results_smb_knn = knn(SMOTEb_feat, SMOTEb_labels, test_data, test_labels)
results_smbt_knn = knn(SMOTEbt_feat, SMOTEbt_labels, test_data, test_labels)
results_ada_knn = knn(ADA_feat, ADA_labels, test_data, test_labels)
results = [results_normal_knn, results_loras_knn, results_sm_knn, results_sms_knn, results_smb_knn, results_smbt_knn, results_ada_knn]
# -
res_names = ['Normal KNN', 'LoRAS KNN', 'SMOTE KNN', 'SMOTE SVM KNN', 'SMOTE BORDELINE-1 KNN', 'SMOTE BORDELINE-2 KNN', 'ADASYN KNN']
met_names = ['f1_score', 'balanced_accuracy_score', 'average_precision_score']
for res, r_name in zip(results, res_names):
print(r_name, " : [")
for met,r in zip(res,met_names):
print(r, " : ", met)
print("]")
| Tutorials/LoRAS "car_eval_4" Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# ### Download the data and load it to Pandas.
#
# You can find them [here](https://drive.google.com/file/d/1NY6cmF9Shjw-dD7BD6bNmfcIVz-kQcFR/view?usp=sharing).
cast = pd.read_csv('data/cast.csv', index_col=None)
cast.head()
release_dates = pd.read_csv('data/release_dates.csv', index_col=None,
parse_dates=['date'], infer_datetime_format=True)
release_dates.head()
# ### Count the number of months in which movies with "Christmas" in their title tend to be released in the USA.
datetime_series = pd.Series(
pd.date_range("2000-01-01", periods=3, freq="M")
)
datetime_series
datetime_series.dt.month
filter_xmas = release_dates['title'].str.contains('Christmas')
release_dates[filter_xmas]['date'].dt.month.value_counts()
filter_xmas = release_dates['title'].str.contains('Christmas')
release_dates[filter_xmas].groupby(release_dates[filter_xmas].date.dt.month).agg('size')
# ### Count the number of months in which movies with "Christmas" in their title tend to be released in Canada.
filter_xmas = (release_dates['title'].str.contains('Christmas')) & (release_dates['country'] == 'Canada')
release_dates[filter_xmas].groupby(release_dates[filter_xmas].date.dt.month).agg('size')
# ### Count the number of months in which movies whose titles start with "The Hobbit" are released in the USA.
filter_hobbit = (release_dates['title'].str.startswith('The Hobbit')) & (release_dates['country'] == 'USA')
release_dates[filter_hobbit].groupby(release_dates[filter_hobbit].date.dt.month).agg('size')
# + jupyter={"outputs_hidden": true}
# -
# ### Count day of the week on which movies with "Romance" in their title tend to be released in the USA.
filter_rom = (release_dates['title'].str.contains('Romance')) & (release_dates['country'] == 'USA')
release_dates[filter_rom].groupby(release_dates[filter_rom].date.dt.dayofweek).agg('count')
filter_rom = (release_dates['title'].str.contains('Romance')) & (release_dates['country'] == 'USA')
release_dates[filter_rom][
# ### Count day of the week on which movies with "Action" in their title tend to be released in the USA.
filter_action = (release_dates['title'].str.contains('Romance')) & (release_dates['country'] == 'USA')
release_dates[filter_action].groupby(release_dates[filter_action].date.dt.dayofweek).agg('size')
filter_action = (release_dates['title'].str.contains('Romance')) & (release_dates['country'] == 'USA')
release_dates['day_of_week'] = release_dates['date'].apply(lambda x: "%d" %(x.dayofweek))
release_dates[filter_action].groupby('day_of_week').agg('size')
# ### On which date was each Judi Dench movie from the 1990s released in the USA?
filter_dench = (cast['name'] == '<NAME>')
filter_year = (release_dates['year'].between(1990,1999)) & (release_dates['country'] == 'USA')
dench_merge = pd.merge(cast[filter_dench], release_dates[filter_year], on=['title','year'])
#release_dates[filter_xmas].groupby(release_dates[filter_xmas].date.dt.month).agg('size')
dench_merge['date']
# ### In which months do films with <NAME> tend to be released in the USA?
filter_dench = (cast['name'] == '<NAME>')
filter_usa = (release_dates['country'] == 'USA')
dench_merge = pd.merge(cast[filter_dench], release_dates[filter_usa], on=['title','year'])
dench_merge.groupby(dench_merge.date.dt.month).agg('size')
# + jupyter={"outputs_hidden": true}
# -
# ### In which months do films with <NAME> tend to be released in the USA?
filter_tom = (cast['name'] == '<NAME>')
filter_usa = (release_dates['country'] == 'USA')
dench_merge = pd.merge(cast[filter_tom], release_dates[filter_usa], on=['title','year'])
dench_merge.groupby(dench_merge.date.dt.month).agg('size')
# + jupyter={"outputs_hidden": true}
| Pandas/6.2 Exercises-4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#2.6 Probability
#2.6.1 Basic Probability Theory
import torch
from torch.distributions import multinomial
from d2l import torch as d2l
fair_probs = torch.ones([6]) / 6 #被選中的機率
pick_sum=11 #要選的數目
data_num=10 #要產生幾筆
print(multinomial.Multinomial(pick_sum, fair_probs).sample())
print(multinomial.Multinomial(pick_sum, fair_probs).sample((data_num,)))
# -
# Store the results as 32-bit floats for division
counts = multinomial.Multinomial(1000, fair_probs).sample()
counts / 1000 # Relative frequency as the estimate
# +
counts = multinomial.Multinomial(10, fair_probs).sample((500,)) #產生500筆資料
cum_counts = counts.cumsum(dim=0)
estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)
d2l.set_figsize((6, 4.5))
for i in range(6):
d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")"))
d2l.plt.axhline(y=0.167, color='black', linestyle='dashed')
d2l.plt.gca().set_xlabel('Groups of experiments')
d2l.plt.gca().set_ylabel('Estimated probability')
d2l.plt.legend();
# -
| coookie89/Week1/ch2/2.6/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # KNN概述
#
# k-近邻(kNN, k-NearestNeighbor)算法是一种基本分类与回归方法,我们这里只讨论分类问题中的k-近邻算法。
#
# ## KNN工作原理
#
# > 工作原理
#
# 1 假设有一个带有标签的样本数据集(训练样本集),其中包含每条数据与所属分类的对应关系。
# 2 输入没有标签的新数据后,将新数据的每个特征与样本集中数据对应的特征进行比较。
#
# * 计算新数据与样本数据集中每条数据的距离。
# * 对求得的所有距离进行排序(从小到大,越小表示越相似)。
# * 取前 k (k 一般小于等于 20 )个样本数据对应的分类标签。
#
# 3 求 k 个数据中出现次数最多的分类标签作为新数据的分类。
#
# > 通俗理解
#
# 给定一个训练数据集,对新的预测实例,在训练数据集中找到与该实例最邻近的 k 个实例,这 k 个实例的多数属于某个类,就把该预测实例分为这个类。
#
# > 算法流程
#
# 收集数据:任何方法
# 准备数据:距离计算所需要的数值,最好是结构化的数据格式
# 分析数据:任何方法
# 训练算法:此步骤不适用于 k-近邻算法
# 测试算法:计算错误率
# 使用算法:输入样本数据和结构化的输出结果,然后运行 k-近邻算法判断输入数据分类属于哪个分类,最后对计算出的分类执行后续处理
#
# > 算法特点
#
# 优点:精度高、对异常值不敏感、无数据输入假定
# 缺点:计算复杂度高、空间复杂度高
# 适用数据范围:数值型和标称型
# ## KNN实例
#
# 项目背景参考此处:https://github.com/apachecn/AiLearning/blob/master/docs/ml/2.k-%E8%BF%91%E9%82%BB%E7%AE%97%E6%B3%95.md
# +
# 首先导入依赖包
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
# ### 准备数据
#
# 使用pandas直接从文件中读取数据
# +
def file2matrix(filename):
df = pd.read_table(filename,header=None,delim_whitespace=True)
return df
df = file2matrix('data/datingTestSet2.txt')
# -
# ### 分析数据
#
# 我们使用 matplotlib 来把数据进行可视化,通过2维散点图来显示不同样本的分布区域。
fig = plt.figure()
ax = fig.add_subplot(111)
# x、y必须是长度相等的序列,
# s为点的面积或大小,默认为20
# c为不同样本的颜色值,默认为'b'
# marker:点的形状,默认为'o'
ax.scatter(x=df.iloc[:,0], y=df.iloc[:,1],s=10, c=df.iloc[:,3])
plt.show()
# ### 数据归一化
#
# 基于离散图来看,x轴的坐标数值和y轴的数值不在一个数量级且相差较大。
# ### 使用sklearn实现knn算法
#
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
# 划分训练集和测试集数据
# 获取前三列作为特征
x = df.iloc[:,0:3]
# 获取最后一列作为标签
y = df.iloc[:,3:]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3)
knn = KNeighborsClassifier()
knn.fit(x_train,y_train)
prediction = knn.predict(x_test)
score = knn.score(x_test,y_test)
# print '真实分类标签:'+str(y_test)
print '模型分类结果:'+str(prediction)+'\n算法准确度:'+str(score)
| ml/07-apachecn/01-KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regression Plots
#
# Seaborn has many built-in capabilities for regression plots, however we won't really discuss regression until the machine learning section of the course, so we will only cover the **lmplot()** function for now.
#
# **lmplot** allows you to display linear models, but it also conveniently allows you to split up those plots based off of features, as well as coloring the hue based off of features.
#
# Let's explore how this works:
import seaborn as sns
# %matplotlib inline
tips = sns.load_dataset('tips')
tips.head()
# ## lmplot()
sns.lmplot(x='total_bill',y='tip',data=tips)
sns.lmplot(x='total_bill',y='tip',data=tips,hue='sex')
sns.lmplot(x='total_bill',y='tip',data=tips,hue='sex',palette='coolwarm')
# ### Working with Markers
#
# lmplot kwargs get passed through to **regplot** which is a more general form of lmplot(). regplot has a scatter_kws parameter that gets passed to plt.scatter. So you want to set the s parameter in that dictionary, which corresponds (a bit confusingly) to the squared markersize. In other words you end up passing a dictionary with the base matplotlib arguments, in this case, s for size of a scatter plot. In general, you probably won't remember this off the top of your head, but instead reference the documentation.
# http://matplotlib.org/api/markers_api.html
sns.lmplot(x='total_bill',y='tip',data=tips,hue='sex',palette='coolwarm',
markers=['o','v'],scatter_kws={'s':100})
# ## Using a Grid
#
# We can add more variable separation through columns and rows with the use of a grid. Just indicate this with the col or row arguments:
sns.lmplot(x='total_bill',y='tip',data=tips,col='sex')
sns.lmplot(x="total_bill", y="tip", row="sex", col="time",data=tips)
sns.lmplot(x='total_bill',y='tip',data=tips,col='day',hue='sex',palette='coolwarm')
# ## Aspect and Size
#
# Seaborn figures can have their size and aspect ratio adjusted with the **size** and **aspect** parameters:
sns.lmplot(x='total_bill',y='tip',data=tips,col='day',hue='sex',palette='coolwarm',
aspect=0.6,size=8)
# You're probably wondering how to change the font size or control the aesthetics even more, check out the Style and Color Lecture and Notebook for more info on that!
#
# # Great Job!
| data-visualization/seaborn/4_regression_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()">
<input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# !echo 'Title: Test Simulation 1 -- repFAM + feature selection'
# !echo 'Author: <NAME>'
# !echo 'Last modified:' `date`
from wand.image import Image as WImage
import matplotlib.pyplot as plt
# Images for this study are located [here][1].
#
# [1]: https://drive.google.com/drive/u/1/folders/1gxp28ctA59WtSEN2buuF76cRU7PCmgEh
# ## Simulation Truth
#
# - $(\mu_0^\star, \mu_1^\star) = (-2, 2)$
# - $N = (300, 300)$, i.e. $I = 2$
# - $J=7$
# - $K = 3$
# - $\mathbf{w}_1 = (0.7,~0.0,~0.3)$
# - $\mathbf{w}_2 = (0.6,~0.1,~0.3)$
# - $\sigma^2_i = 0.1$
# - $\mathbf{Z}^{\text{TR}_1}$:
WImage(filename="../results/test-sims/KMCMC2/z1/scale0/img/yz/Z_true.pdf")
# - $\mathbf Z^{\text{TR}_2}$:
WImage(filename="../results/test-sims/KMCMC2/z2/scale0/img/yz/Z_true.pdf")
# ## Simulation Study
# There are two simulated datasets. One that uses $\mathbf Z_1$, another that uses $\mathbf Z_2$.
# In $\mathbf Z_2$, two of the columns differ by only one bit.
#
# For each dataset, after simulating the observations $y_{i,n,j}$, the following models were fit
# for $K \in \{2, 3, 4, 5\}$:
# - IBP + feature selection
# - repFAM + feature selection, with different parameter settings for the repulsive penalty
# - The penalty term is $\prod_{k_1=1}^{K-1} \prod_{k_2=k_1}^K 1 - C_\phi(\mathbf z_{k_1}, \mathbf z_{k_2})$,
# where $C_\phi(\mathbf z_{k_1}, \mathbf z_{k_2}) = \exp(-||\mathbf z_{k_1} - \mathbf z_{k_2}||_1 / \phi)$,
# and $\phi > 0$. We will define $C_\phi(\cdot) = 1$ for $\phi=0$. Hence, when $\phi=0$, we recover the IBP.
# And as $\phi$ increases, the penalty for similar columns in $\mathbf Z$ is greater.
# - The repFAM was fit for $\phi \in \{0.01, 0.1, 1, 10\}$.
#
# So, in total, 40 models were fit.
#
# ## MCMC Settings
# - A burn-in period of 3000 iterations was used.
# - 1000 samples, thinned by every other sample were obtained.
# - Both $L_0$ and $L_1$ were set to 2.
#
#
# ## Results
# When $K \ge K^\text{TR}$, $\mathbf Z^\text{TR}$ is recovered (for both datasets).
#
# The following images summarize the LPML for each model. `scale` in the legends refer to $\phi$.
# The DIC and calibration metric are in the Google drive directory referenced above under `metrics/`.
# They show similar trends.
# ### LPML for $\mathbf Z_1$
WImage(filename="../results/test-sims/metrics/z1/LPML.pdf")
# ### LPML for $\mathbf Z_2$
WImage(filename="../results/test-sims/metrics/z2/LPML.pdf")
# First, for $\phi=0$, the sampling method for the regular IBP was used. LPML increases
# if $K$ approaches $K^\text{TR}$ and stays at the same level after. For $\phi=10$,
# LPML seems to start decreasing slightly after $K^\text{TR}$. The decreasing trend
# seems clearer for $\mathbf Z_2$.
#
# For $\phi = (0.01, 0.1, 1.0)$, $\sigma_i^2$ is estimated to be lower (0.05 instead of 0.1).
# I'm not certain why this could be the case. But it could be due to the small number of
# observations and relatively high proportion (about 20%) of missing values.
#
# ## Next steps
# A couple things we could try next are
# - running the experiment with larger number of observations. Perhaps $N=(1000, 1000)$.
# - using higher $\phi$, e.g. 100, 1000. To see the effects on LPML when $K > K^\text{TR}$.
| sims/repfam_fs/test/notebooks/test-sims-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3.6
# language: python
# name: python3.6
# ---
# + deletable=true editable=true
import cobrame
import cobra
import pickle
import math
import json
from glob import glob
from tqdm import tqdm
from os.path import exists
import seaborn as sns
from matplotlib import pyplot as plt
import palettable
import numpy as np
from collections import OrderedDict
from scipy.stats import ttest_ind, ks_2samp, ranksums, mannwhitneyu, spearmanr, pearsonr
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.cluster import AgglomerativeClustering
from me_biomass.general_info import met_to_name, aas_to_name
from me_biomass.me_solve_functions import solve_me_model
from me_biomass.load_model import load_me_model
from me_biomass.characterize_solution import *
from me_biomass.load_model import currency_met_to_synthesis_rxn
from me_biomass.update_mpl_rcparams import update_rcparams
from cobrame.util.dogma import amino_acids
# %matplotlib inline
def to_drop(x):
if x.startswith('charging') or x.startswith('formation') or x.startswith('translation'):
return True
elif x.startswith('transcription') or x.startswith('DM_'):
return True
else:
return False
# -
out_loc = './output/'
# + [markdown] deletable=true editable=true
# # Part 1
# Run the following to reproduce figures in
# - Section 1: Benchmarking ME-model predictions of biomass composition
# - Section 2: Growth condition-dependent biomass composition
# + deletable=true editable=true
source_to_name = {'C': 'Carbon', 'P': 'Phosphorus', 'S': 'Sulfur', 'N': 'Nitrogen'}
# filter metal ions and metabolites whose primary role is not as a prosthetic group
prosthetic_group_mets_to_filter = [
'lys__L_c','nad_c', 'nadp_c', 'ca2_c', 'cl_c', 'cobalt2_c', 'cu2_c', 'fe2_c',
'fe3_c', 'k_c', 'mg2_c', 'nadh_c', 'nadph_c', 'ni2_c', 'zn2_c', 'mn2_c', 'fad_c', 'nh4_c']
aas = [i[:-2] for i in amino_acids.values()]
# + deletable=true editable=true
me = load_me_model(json=False)
# + [markdown] deletable=true editable=true
# # (1) Benchmarking ME-model predictions of biomass composition
# + deletable=true editable=true
# run aerobic and anaerobic ME-model simulations
if not exists('./simulations/aerobic.json'):
me.reactions.EX_o2_e.lower_bound = -1000
solve_me_model(me, 2.5, using_soplex=False)
sol1 = me.solution
me.reactions.EX_o2_e.lower_bound = 0
solve_me_model(me, 2.5, using_soplex=False)
sol2 = me.solution
with open('./simulations/aerobic.json', 'w') as f:
json.dump(sol1.x_dict, f)
with open('./simulations/anaerobic.json', 'w') as f:
json.dump(sol2.x_dict, f)
else:
with open('./simulations/aerobic.json', 'r') as f:
x_dict = json.load(f)
sol1 = cobra.core.Solution(1, x_dict=x_dict, status='optimal')
with open('./simulations/anaerobic.json', 'r') as f:
x_dict = json.load(f)
sol2 = cobra.core.Solution(1, x_dict=x_dict, status='optimal')
# + deletable=true editable=true
# plot comparision of ME-model simulations to iJO1366 biomass function
fig, axes = plt.subplots(3, 1, figsize=(10, 11))
update_rcparams()
met_list = []
for i, kind in enumerate(['amino_acid', 'cofactors', 'coenzymes']):
if kind in ['amino_acid', 'cofactors']:
comp1 = compare_to_ijo_biomass(me, solution=sol1, kind=kind)
comp2 = compare_to_ijo_biomass(me, solution=sol2, kind=kind)
if kind == 'cofactors':
for met in prosthetic_group_mets_to_filter:
comp1.drop([met], inplace=True, errors='ignore')
comp2.drop([met], inplace=True, errors='ignore')
else:
comp1 = compare_cofactor_to_ijo_biomass(me, currency_met_to_synthesis_rxn=currency_met_to_synthesis_rxn,
solution=sol1)
comp2 = compare_cofactor_to_ijo_biomass(me, currency_met_to_synthesis_rxn=currency_met_to_synthesis_rxn,
solution=sol2)
logy = True
joined = comp1.join(comp2, rsuffix='_ana')
joined = joined.drop('gthox', errors='ignore')
joined.rename(lambda x: x.replace("_c", ''), inplace=True)
joined.drop('Measured_ana', axis=1, inplace=True)
joined.columns = ['ME-Aerobic', 'iJO1366 BOF', 'ME-Anaerboic']
ax = axes[i]
legend=False
if kind == 'amino_acid':
title = 'Amino Acids'
legend = True
elif kind == 'cofactors':
title = 'Prosthetic Groups'
elif kind == 'coenzymes':
title = 'Coenzymes'
joined.rename(met_to_name, inplace=True)
plt.rcParams['legend.facecolor'] = 'w'
joined[['ME-Aerobic', 'ME-Anaerboic', 'iJO1366 BOF']].plot(kind='bar', logy=logy, ax=ax,
color=['#0099E6', '#F23814', '#000000'],
legend=legend)
ax.set_xticklabels(joined.index, fontdict={'horizontalalignment': 'right', 'rotation':45})
ax.set_title(title)
ax.set_facecolor('w')
met_list.extend([i for i in joined.index])
axes[0].set_ylim([.01, 10])
axes[1].set_ylabel(r'Growth Normalized Synthesis ($\mathrm{\frac{mmol}{gDW}}$)', size=20)
ax.figure.tight_layout()
ax.figure.subplots_adjust(hspace=1)
ax.figure.savefig('%s/figure_2.png' % out_loc)
ax.figure.savefig('%s/Fig2.svg' % out_loc)
# + [markdown] deletable=true editable=true
# # (2) Growth condition-dependent biomass composition
# + deletable=true editable=true
def rename_columns(df):
full = False
col_names = []
for i in df.columns:
if 'iron' in i:
col_names.append('iron')
continue
try:
met = me.metabolites.get_by_id(i + '_c')
except:
met = me.metabolites.get_by_id(i)
if not met.name:
met.name = met.id.replace("_c", '')
elif met.id == 'nadp_c':
met.name = 'nadp'
elif met.id == 'nad_c':
met.name = 'nad'
elif met.id == 'fad_c':
met.name = 'fad'
if not full:
met.name = met.id.replace("_c", "")
col_names.append(met.name)
df.columns = col_names
return df
def get_cofactor_demand(df, growth_norm):
out_dict = {}
for c in tqdm(df.columns):
x_dict = df[c].to_dict()
sol = cobra.core.Solution(1, x_dict=x_dict, status='optimal')
out_dict[c] = {}
# get coenzyme, prosthetic group, and amino acid demands
prosthetic = compare_to_ijo_biomass(me, solution=sol, kind='cofactors',
growth_norm=growth_norm)
coenzymes = compare_cofactor_to_ijo_biomass(
me, currency_met_to_synthesis_rxn=currency_met_to_synthesis_rxn,
solution=sol, growth_norm=growth_norm)
aas = compare_to_ijo_biomass(me, solution=sol, kind='amino_acid', growth_norm=growth_norm)
# update output with biomass component demands
out_dict[c].update(aas.drop('Measured', axis=1).to_dict().popitem()[1])
out_dict[c].update(coenzymes.drop('Measured', axis=1).drop(
prosthetic_group_mets_to_filter, errors='ignore').to_dict().popitem()[1])
out_dict[c].update(prosthetic.drop('Measured', axis=1).drop(
prosthetic_group_mets_to_filter, errors='ignore').to_dict().popitem()[1])
return pd.DataFrame(out_dict).T
def get_color_for_conditions(cluster_df):
color_df = pd.DataFrame()
color_dict = {'N': 'g', 'C': 'k', 'P':'r', 'S':'y'}
ana_color_dict = {'N': '#90ee90', 'C': '#D3D3D3', 'P': '#ff6961', 'S': '#ffffe0'}
for i in cluster_df.index:
if 'anaerobic' in i:
color_df.loc[i, 'Nutrient Source'] = ana_color_dict[i.split('_')[0]]
else:
color_df.loc[i, 'Nutrient Source'] = color_dict[i.split('_')[0]]
return color_df
def summarize_conditions(df, growth_norm=True, split=False):
condition_df = get_cofactor_demand(df, growth_norm)
# these do not consider glutathione as a substrate since it is a cofactor
#condition_df.drop(['C_gthrd_e', 'S_gthrd_e', 'N_gthrd_e',
# 'C_anaerobic_gthrd_e', 'S_anaerobic_gthrd_e',
# 'N_anaerobic_gthrd_e'], inplace=True, errors='ignore')
condition_df = rename_columns(condition_df)
return condition_df
# + deletable=true editable=true
full_dict = {}
for fi in glob('./simulations/media_sims/*'):
with open(fi, 'r') as f:
x_dict = json.load(f)
fi = fi.split('/')[-1].replace('_sol.json', '')
if 'anaerobic' in fi:
name = fi.replace('anaerobic_', '').replace('_EX_', '_anaerobic_')
else:
name = fi.replace('aerobic_', '').replace('_EX_', '_')
full_dict[name] = x_dict
full_df = pd.DataFrame(full_dict)
df = full_df.dropna(axis=1).copy()
df.to_csv('%s/raw_solutions.csv' % out_loc)
# + deletable=true editable=true
full_df = full_df.fillna(0)
reorg_df = pd.DataFrame()
for i in full_df.columns:
if 'anaerobic' in i:
met = i.split('aerobic_')[-1][:-2]
source = source_to_name[i[0]] + ' anaerobic'
else:
met = '_'.join(i.split('_')[1:-1])
source = source_to_name[i[0]] + ' aerobic'
reorg_df.loc[source, met] = full_df.loc['biomass_dilution', i]
reorg_df.T.fillna('').to_excel('%s/S1_data.xlsx' % out_loc)
# + deletable=true editable=true
print('total number of conditions', reorg_df.clip(lower=1, upper=1).sum().sum())
print('total number of aerobic conditions', reorg_df.loc[[i for i in reorg_df.index if 'anaerobic' not in i]].clip(lower=1, upper=1).sum().sum())
growth_support_df = reorg_df.copy()
growth_support_df[growth_support_df>0.01] =1
print('total number of growth-supporting aerobic conditions', growth_support_df.loc[[i for i in reorg_df.index if 'anaerobic' not in i]].sum().sum())
print('total number of growth-supporting conditions', growth_support_df.sum().sum())
# + deletable=true editable=true
# plot distribution of growth rates by nutrient conditions
fig, axes = plt.subplots(3, 3, figsize=(15, 10), sharex=True)
iter_axes = iter(axes.flatten())
hist_df = reorg_df.copy()
hist_df[hist_df == 0] = np.nan
for index in hist_df.sort_index().index:
if 'Carbon' not in index and 'Nitrogen' not in index:
bins = 2
else:
bins = 10
ax = next(iter_axes)
_ = hist_df.loc[index].hist(ax=ax, bins=bins)
ax.set_title(index)
axes.flatten()[-1].remove()
fig.savefig('%s/growth_rates.png' % out_loc)
# + deletable=true editable=true
plt.scatter(df.loc['biomass_dilution'], df.loc['protein_biomass_to_biomass'])
plt.ylabel('Protein Biomass')
plt.xlabel('Growth Rate')
# + deletable=true editable=true
condition_df_file = '%s/condition_df_no_gr_norm.csv' % out_loc
if not exists(condition_df_file):
condition_df = summarize_conditions(df, growth_norm=False)
condition_df.to_csv(condition_df_file)
else:
condition_df = pd.read_csv(condition_df_file, index_col=0)
color_df = get_color_for_conditions(condition_df)
condition_df = condition_df.dropna(how='all')
# + deletable=true editable=true
no_growth = df.loc['biomass_dilution'][df.loc['biomass_dilution'] < .1].sort_values().index
print('Dropping due to low growth', no_growth)
condition_df = condition_df.drop(no_growth)
df = df.drop(no_growth, axis=1)
# + [markdown] deletable=true editable=true
# ### (Figure 4) PCA showing how micronutrient use differentiates conditions
# + deletable=true editable=true
condition_df = condition_df.drop('gthox', axis=1)
condition_df = (condition_df.T / df.loc['protein_biomass_to_biomass']).T.dropna()
# + deletable=true editable=true
# run PCA
pca_df = condition_df.copy()
expression_values = preprocessing.StandardScaler().fit_transform(pca_df)
pca = PCA(n_components=3)
a = pca.fit(expression_values)
X_r = a.transform(expression_values)
fig = plt.figure(figsize=(10, 10))
gs = fig.add_gridspec(3, 3)
ax = fig.add_subplot(gs[:-1, :-1])
source_to_color = dict(zip(['C_anaerobic', 'C', 'S_anaerobic', 'S', 'P_anaerobic', 'P', 'N_anaerobic', 'N'],
palettable.colorbrewer.qualitative.Paired_8.hex_colors))
# plot and format points on PCA plot
for z in range(len(X_r[:, 0])):
index = pca_df.index[z]
source = index.split('_')[0] if 'anaerobic' not in index else '_'.join(index.split('_')[:2])
if 'anaerobic' in source:
edgecolor = source_to_color[source.split('_')[0]]
color = 'white'
label=None
else:
edgecolor = None
label=source_to_name[source]
color = source_to_color[source.split('_')[0]]
ax.scatter(X_r[z, 0], X_r[z, 1], c=color, edgecolors=edgecolor, s=150, label=label, linewidths=1.5, alpha=.7)
ax.set_xlabel('PC 1 (%.2f)' % a.explained_variance_ratio_[0])
ax.set_ylabel('PC 2 (%.2f)' % a.explained_variance_ratio_[1])
ax.set_facecolor('w')
handles, labels = ax.get_legend_handles_labels()
by_label = OrderedDict(zip(labels[:-1], handles[:-1]))
ax.legend(by_label.values(), by_label.keys(), loc='upper left', facecolor='w', ncol=1, title='Nutrient Sources',
title_fontsize=13)
# plot PCA weightings
all_weights=pd.DataFrame()
for component in [0, 1]:
if component == 0:
ax = fig.add_subplot(gs[-1, :-1])
bar_kind = 'bar'
else:
ax = fig.add_subplot(gs[:-1, -1])
bar_kind = 'barh'
ax.set_facecolor('w')
weight_dfs = pd.DataFrame(a.components_[component], index=pca_df.columns).T
all_weights = all_weights.join(weight_dfs.T, how='outer', rsuffix='new')
# from https://www.pnas.org/content/110/6/2135
# inspired by https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4906156/
prebiotic_aas = ['ala__L', 'asp__L', 'glu__L', 'gly', 'ile__L',
'leu__L', 'pro__L', 'ser__L', 'thr__L', 'val__L']
prebiotic_aas = []
peroyl_scavanging = ['his__L', 'cys__L', 'met__L', 'tyr__L', 'trp__L']
weight_dfs.sort_values(0, axis=1).loc[0].plot(kind=bar_kind, ax=ax)
for i, p in enumerate(ax.patches):
if weight_dfs.sort_values(0, axis=1).columns[i] in prebiotic_aas:
p.set_color('g')
p.set_label('Prebiotic AA')
elif weight_dfs.sort_values(0, axis=1).columns[i] in peroyl_scavanging:
p.set_color('r')
p.set_label('Peroxyl scavanging AA')
elif '__L' not in weight_dfs.sort_values(0, axis=1).columns[i]:
p.set_color('k')
p.set_label('Cofactor')
else:
p.set_label('Non-prebiotic AA')
ax.set_title('PC %i weighting' % (component+1))
if component == 0:
ax.set_xticklabels(['']* len(weight_dfs.sort_values(0, axis=1).columns))
# remove duplicate legend entries
handles, labels = ax.get_legend_handles_labels()
by_label = OrderedDict(zip(labels[:-1], handles[:-1]))
ax.legend(by_label.values(), by_label.keys(), ncol=1, facecolor='w')
else:
ax.set_yticklabels(['']* len(weight_dfs.sort_values(0, axis=1).columns))
fig.tight_layout()
fig.savefig('%s/figure_4.svg' % out_loc)
all_weights.columns = ['PC 1', 'PC 2']
all_weights.sort_values('PC 1', ascending=False).applymap(lambda x: '%.3f' % x).to_csv(
'%s/figure_4_pc_weightings.csv' % out_loc)
# + deletable=true editable=true
all_weights.sort_values('PC 1', ascending=True).head(5)
# + deletable=true editable=true
from scipy.stats import pearsonr
print(spearmanr(X_r[:, 1], df.loc['biomass_dilution']))
plt.scatter(df.loc['biomass_dilution'], X_r[:, 1])
# + deletable=true editable=true
temp = pd.DataFrame()
for i in df[df!=0].dropna().index:
if to_drop(i):
continue
rho, p = spearmanr(X_r[:, 1], df.loc[i])
temp.loc[i, 'rho'] = rho
temp.loc[i, 'p'] = p
# + deletable=true editable=true
# plot histograms of specificy metabolites
hist_df = condition_df.copy()
to_plot = list(reversed(['gly', 'nad', 'his__L', 'btn']))
fig, axes = plt.subplots(1, 4, figsize=(10, 1), sharey=True)
for i, col in enumerate(to_plot):
if col not in to_plot:
continue
ax=axes[i]
ax.set_facecolor('w')
if col != 'nad':
ax.set_title(ijo.metabolites.get_by_id(col+'_c').name.replace('amide', 'amide \n'))
else:
ax.set_title('NAD')
ana = [i for i in hist_df.index if 'anaerobic' in i]
aer = [i for i in hist_df.index if 'anaerobic' not in i]
ax.hist([hist_df.loc[aer, col], hist_df.loc[ana, col]], bins=20)
ax.set_xticklabels(['' for i in ax.get_xticks()],
fontdict={'horizontalalignment': 'right', 'rotation':45})
ax.legend(['aerobic', 'anaerobic'], bbox_to_anchor=(-1.25, -.5), loc='upper center', fontsize=15, ncol=2,
facecolor='w')
fig.savefig('%s/hist_for_pca_figure_4.svg' % out_loc)
# + [markdown] deletable=true editable=true
# ## (Figure 3) Standard deviation analysis of nutrient sources
# + deletable=true editable=true
fig = plt.figure(figsize=(18,7))
gs = fig.add_gridspec(2, 5)
ax = fig.add_subplot(gs[0,:-1])
#condition_df = condition_df.drop(['C_ethso3_e', 'C_pro__L_e', 'C_for_e', 'N_pro__L_e', 'C_butso3_e'])
plot_df = condition_df.copy()
c2, c1 = palettable.wesanderson.Zissou_5.hex_colors[0:2]
ax = plot_df.max().sort_values().plot(kind='bar', logy=True,
label='max-min', ax=ax, color=c1)
ax = plot_df.min().sort_values().plot(kind='bar', logy=True, ax=ax,
color='w')
ax.set_facecolor('w')
ax.set_ylabel('Normalized biosynthesis \n demand' + r' ($mmol \cdot g_{protein}^{-1}$)', fontsize=18)
ax.set_title('Range of biosynthesis demands', fontsize=18)
ax.set_xticklabels(ax.get_xticklabels(),
fontdict={'fontsize': 18})
aer = [i for i in condition_df.index if 'anaerob' not in i]
anaer = [i for i in condition_df.index if 'anaerob' in i]
all_values ={}
for kind, lis in zip(['aerobic', 'anaerobic'], [aer, anaer]):
q = condition_df.loc[aer]/ condition_df.loc[aer].T.max(axis=1)
sources = set([i.split('_')[0] for i in q.index])
sorted_all = q.T.std(axis=1).sort_values().index
for s in sources:
source_rxns = [i for i in q.index if i.startswith(s)]
key = source_to_name[s] if kind == 'aerobic' else source_to_name[s]+' anearobic'
all_values[key] =dict(zip(sorted_all, list(q.loc[source_rxns, sorted_all].T.std(axis=1).values)))
ax = fig.add_subplot(gs[1,:-1])
all_val_df = pd.DataFrame(all_values)
all_val_df['sum'] = all_val_df.sum(axis=1)
all_val_df = all_val_df.sort_values('sum')
all_val_df[sorted(all_val_df.columns)].drop(['sum'], axis=1).plot.bar(
stacked=True, colors=palettable.tableau.Tableau_20.hex_colors, ax=ax)
ax.set_facecolor('w')
ax.legend(facecolor='w', ncol=2, fontsize=13)#, title='Nutrient Sources', title_fontsize=12)
ax.set_ylabel('Standard deviation', fontsize=18)
ax.set_title('Standard deviation by nutrient source and aerobicity', fontsize=18)
ax.set_xticklabels(ax.get_xticklabels(),
fontdict={'fontsize': 18})
fig.subplots_adjust(hspace=1)
fig.text(.03, .95, 'A', size=35)
fig.text(.03, .45, 'B', size=35)
for i in plot_df.index:
plot_df.loc[i, 'aerobicity'] = 'anaerobic' if 'anaerobic' in i else 'aerobic'
plot_df = plot_df.drop('atp', axis=1, errors='ignore')
core_biomass = pd.DataFrame(plot_df.drop('aerobicity', axis=1).min())
core_biomass = core_biomass[core_biomass>0].dropna().sort_values(0, ascending=False)
core_biomass.index.name = 'Metabolite'
core_biomass.columns = ['Demand']
core_biomass = core_biomass.applymap(lambda x: '%.3f' % x if x>1e-3 else '{:.1e}'.format(float(x)))
core_biomass = core_biomass.reset_index()
core_out = pd.DataFrame(index=list(range(20)), columns=list(range(4)))
core_out.iloc[:20, 0] = core_biomass.iloc[:20, 0]
core_out.iloc[:20, 1] = core_biomass.iloc[:20, 1]
core_out.iloc[:10, 2] = core_biomass.iloc[20:, 0].values
core_out.iloc[:10, 3] = core_biomass.iloc[20:, 1].values
core_out.columns = ['AA', 'Demand', 'Cofactor', 'Demand']
skip_c = True
if not skip_c:
ax = fig.add_subplot(gs[:, -1])
ax.axis('off')
ax.axis('tight')
t =ax.table(cellText=core_out.fillna('').values, colLabels=core_out.columns, loc='lower center')
t.auto_set_font_size(False)
t.set_fontsize(13)
t.scale(1.3,1.4)
fig.text(.72, .95, 'C', size=35)
fig.text(.835, .85, 'Core biomass function', size=20, ha='center')
fig.savefig('%s/figure_3.png' % out_loc)
fig.savefig('%s/Fig3.svg' % out_loc)
# + [markdown] deletable=true editable=true
# ## Cluster aerobic growth conditions (Figure 5)
# + deletable=true editable=true
p_cutoff = 1e-5
small_cluster_cutoff= 3
to_skip = ['bmocogdp', 'adocbl', '2dmmq8', 'mqn8', 'sheme']
filt_df = condition_df.loc[[i for i in condition_df.index if 'anaerobic' not in i]]
filt_df = filt_df[[i for i in filt_df if i != 'atp' and i not in to_skip]]
filt_df = filt_df.astype(float)
z_df = (filt_df - filt_df.mean()) / filt_df.std()
outliers = pd.DataFrame()
for i in z_df.columns:
if '__L' in i:
continue
up = z_df[i][z_df[i] > 3]
down = z_df[i][z_df[i] < -3]
#print(i, up, down)
for cond, val in up.items():
outliers.loc[i, cond] = val
for cond, val in down.items():
outliers.loc[i, cond] = val
# + deletable=true editable=true
fc_df =filt_df.applymap(np.log2) - filt_df.applymap(np.log2).mean()
fc_df = fc_df.T.loc[outliers.index, outliers.columns][~outliers.isna()]
fc_df = fc_df.rename(lambda x: '(%s) ' % x[0] + x[2:], axis=1)
z = sns.clustermap(fc_df.fillna(0), cmap='coolwarm', center=0, figsize=(20,5), method='ward')
fig, ax = plt.subplots(figsize=(20, 4))
g = sns.heatmap(z.data2d[z.data2d !=0], cmap='coolwarm', center=0, ax=ax, linecolor='k',
linewidths=.5)
ax.set_facecolor('w')
g.collections[0].colorbar.set_label('Log$_2$ fold change from average', rotation=270,
labelpad=20,fontsize=20)
ax.set_title('Biomass biosynthetic demand outliers', fontsize=20)
fig.savefig('%s/outliers.svg' % out_loc)
# + deletable=true editable=true
thf_outlier = outliers.loc['thf'].dropna().index
# + deletable=true editable=true
pydx5_outlier = outliers.loc['pydx5p'].dropna().index
non_pydx5_outlier = [i for i in condition_df.index if i not in pydx5_outlier]
for i in pydx5_outlier:
print(i, ijo.metabolites.get_by_id(i[2:]).name)
print(condition_df.loc[pydx5_outlier, 'pydx5p'].mean() / condition_df.loc[
non_pydx5_outlier, 'pydx5p'].mean())
# + deletable=true editable=true
pydx_df = pd.DataFrame()
for i in me.reactions.query('mod_pydx5p'):
r_name = i.id.split('_FWD')[0].split('_REV')[0]
if not isinstance(i, cobrame.MetabolicReaction):
continue
for j in pydx5_outlier:#condition_df.index:
pydx_df.loc[r_name, j] = df.loc[i.id, j]
pydx_df = pydx_df[pydx_df != 0].dropna(how='all')
pydx_df['sum'] = pydx_df[pydx5_outlier].mean(axis=1, skipna=True)
for i in pydx_df.index:
pydx_df.loc[i, 'name'] = ijo.reactions.get_by_id(i).name
#(pydx_df[pydx5_outlier].mean(axis=1, skipna=True) / pydx_df[non_pydx5_outlier].mean(axis=1, skipna=True)).sort_values()#
# + deletable=true editable=true
norm_df = filt_df / filt_df.max()
norm_df = norm_df.drop(outliers.columns)
no_aas = False
if no_aas:
clus_num=8
aas = [i[:-2] for i in amino_acids.values()]
norm_df = norm_df[[i for i in norm_df.columns if i not in aas]]
norm_df.to_csv("%s/normalized_condition_no_aas_df.csv" % out_loc)
else:
clus_num = 6
norm_df.to_csv("%s/normalized_condition_df.csv" % out_loc)
#z_df = pd.DataFrame(z_df, index=filt_df.index, columns=filt_df.columns)
summary=pd.DataFrame()
for n_clus in [clus_num]:
fit = AgglomerativeClustering(n_clusters=int(n_clus), linkage='ward',
affinity='euclidean').fit(norm_df)
out_df = pd.DataFrame([norm_df.index, fit.labels_], index=['met', 'cluster']).T
drop_indexes = []
p_df = pd.DataFrame()
new_df = pd.DataFrame()
sizes = []
for clust, temp_df in out_df.groupby('cluster'):
mets = [i for i in temp_df.met]
non_clust_mets = [i for i in out_df.met if i not in mets]
sizes.append(len(mets))
new_df.loc[clust, 'mets'] = ', '.join(mets)
for cofactor in norm_df.columns:
clust_values = norm_df.loc[mets, cofactor].values
non_clust_values = norm_df.loc[non_clust_mets, cofactor].values
#p_df.loc[clust, cofactor] = ks_2samp(clust_values, non_clust_values)[1]
p_df.loc[clust, cofactor] = ranksums(clust_values, non_clust_values)[1]
#p_df.loc[clust, cofactor] = ttest_1samp(clust_values, norm_df.loc['C_glc__D_e', cofactor])[1]
new_df.loc[clust, cofactor] = np.log2(clust_values.mean() / non_clust_values.mean())# -1 #non_clust_values.mean()
binary_p_df = p_df.copy()
binary_p_df[binary_p_df > p_cutoff] = 0
binary_p_df[(binary_p_df <= p_cutoff) & ( binary_p_df >0)] = 1
clust_cols = [i for i in new_df.columns if i!='mets']
binary_p_df[(new_df[clust_cols]>-.15) & (new_df[clust_cols] <.15)] = 0
summary.loc[n_clus, 'num_sig'] = binary_p_df.sum(axis=1).sum()
summary.loc[n_clus, 'num_clus'] = len(p_df.index)
summary.loc[n_clus, 'avg_num_sig'] = binary_p_df.sum(axis=1).mean()
summary.loc[n_clus, 'avg_size'] = np.array(sizes).mean()
summary.loc[n_clus, 'num_sig_clus'] = (binary_p_df.sum(axis=1) > 0).sum()
clus_to_skip = []
for i in new_df.index:
if len(new_df.loc[i, 'mets'].split(', ')) <= small_cluster_cutoff:
clus_to_skip.append(i)
print(len(new_df.index))
summary
# + deletable=true editable=true
# cluster 2 vs 5 differences
t_df = (df / df.loc['protein_biomass_to_biomass'])
q = (t_df[new_df.loc[1, 'mets'].split(', ')].mean(axis=1).map(np.log2) -
t_df[new_df.loc[4, 'mets'].split(', ')].mean(axis=1).map(np.log2)).dropna().sort_values()
q = q[q.abs() != np.inf]
q.loc[[i for i in q.index if not to_drop(i)]].sort_values(ascending=False).head(10)
# + deletable=true editable=true
biomass_compositions = pd.DataFrame()
for cluster_num in new_df.index:
mets = new_df.loc[cluster_num, 'mets'].split(', ')
column_name = 'Cluster ' + str(cluster_num + 1)
biomass_compositions[column_name] = condition_df.drop(to_skip,axis=1).loc[mets].mean()
biomass_compositions.loc['Conditions in cluster', column_name] = new_df.loc[cluster_num, 'mets']
biomass_compositions.T.to_excel('%s/S2_data.xlsx' % out_loc)
# + deletable=true editable=true
for i in new_df.index:
changes = binary_p_df.loc[i][binary_p_df.loc[i] > 0].index
add_str = ''
for change in changes:
add_str += ', %s (%.2f)' % (change, new_df.loc[i, change])
new_df.loc[i, 'changes'] = add_str.strip(', ')
save_df = new_df[['mets', 'changes']]
save_df.index.name = 'Cluster'
save_df.index = save_df.index.map(lambda x: x+1)
save_df['n_mets'] = save_df.mets.map(lambda x: len(x.split(', ')))
def convert_for_table(x):
out = ''
for i in x.split(', '):
out += '(%s) %s, ' % (i[0], ijo.metabolites.get_by_id(i[2:]).name)
return out
save_df.mets = save_df.mets.map(convert_for_table)
save_df.to_csv('%s/cluster_df.csv' % out_loc)
# + deletable=true editable=true
save_df
# + deletable=true editable=true
n_big_clus = len(new_df.index) - len(clus_to_skip)
fig, axes = plt.subplots( 1,n_big_clus, figsize=(10, 4), sharey=True)
axes = axes.flatten()
count=0
all_changed = []
for q in new_df.index:
all_changed.extend([i.split(' (')[0] for i in new_df.loc[q, 'changes'].split(', ')] )
all_changed = [i for i in all_changed if i != '']
plot_df = pd.DataFrame(index=set(all_changed))
for i in new_df.index:
if i in clus_to_skip:
continue
cluster = i
clusts = new_df.loc[cluster, 'mets'].split(', ')
changed = [i.split(' (')[0] for i in new_df.loc[cluster, 'changes'].split(', ')]
log_df = norm_df.copy()
plot_df.iloc[:, :] = 0
if changed ==['']:
print(clusts)
count+=1
continue
nonclus_mets = [i for i in log_df.index if i not in clusts]
plot_df['mean_clus'] = np.log2(log_df.loc[clusts, changed].mean()) - np.log2(log_df.loc[non_clust_mets, changed].mean())
temp_values = log_df.loc[clusts, changed] / log_df.loc[nonclus_mets, changed].mean() #-1
plot_df['mean_clus'] = temp_values.applymap(np.log2).mean()
plot_df['std_clus'] = temp_values.applymap(np.log2).std()
plot_df['mean_nonclus'] = log_df.loc[nonclus_mets, changed].mean()#log_df.loc['C_glc__D_e', changed]
plot_df['std_nonclus'] = log_df.loc[nonclus_mets, changed].std()
#plot_df.plot(kind='barh', y=['mean_clus', 'mean_nonclus'],
# xerr=[plot_df['std_clus'], plot_df['std_nonclus']], ax=axes[count])
plot_df.plot(kind='barh', y=['mean_clus'], xerr=plot_df['std_clus'], ax=axes[count])
axes[count].yaxis.set_tick_params(labelsize=12)
axes[count].legend_ = None
axes[count].set_title('Cluster %i' % (cluster+1), fontsize=14)
axes[count].plot([0,0], [-1, len(all_changed)], 'k--')
#axes[count].set_xlim([0, 1])
count+=1
#axes[count-1].legend(['In Cluster', 'Not In Cluster'], fontsize=10, ncol=1, bbox_to_anchor=(1,.5), loc='center left',
# title='Growth Conditions')
#axes[count-1].s
fig.tight_layout()
fig.text(.43, -.05, 'Log$_2$ fold change from average', ha='center', fontsize=15)
fig.savefig('%s/cluster_bar_graph.svg' % out_loc)
# + deletable=true editable=true
binary_columns = condition_df[condition_df==0].dropna(axis=1, how='all').columns
binary_heat_df = condition_df[binary_columns].copy()
binary_heat_df[binary_heat_df>0] = 1
binary_heat_df.columns = [met_to_name[i.replace('_c', '')]
for i in binary_heat_df.columns]
g = sns.clustermap(binary_heat_df, row_colors=color_df, yticklabels=False,
cmap=palettable.cartocolors.diverging.Geyser_3.get_mpl_colormap(),
method='ward')
g.ax_col_dendrogram.set_visible(False)
g.ax_row_dendrogram.set_visible(False)
g.cax.clear()
g.cax.axis('off')
g.cax.text(18, -1, 'Cofactor required', fontdict={'fontsize': 18})
g.cax.text(7, -1, 'Cofactor not required', fontdict={'fontsize': 18})
q = plt.setp(g.ax_heatmap.get_xticklabels(), rotation=45, ha='right')
q = plt.setp(g.ax_row_colors.get_xticklabels(), rotation=45, ha='right')
plt.savefig('%s/conditionally_essential_cofactors.png' % out_loc)
# + deletable=true editable=true
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
binary_p_df.sum().sort_values().plot(kind='bar', ax=axes[0])
axes[0].set_title('Number of clusters with signficant differences in metabolite')
binary_p_df.rename(lambda x: 'Cluster ' + str(x+1)).T.sum().sort_values().plot(kind='bar', ax=axes[1])
axes[1].set_title('Number of significant metabolite differences per cluster')
fig.tight_layout()
| me_biomass/part_1v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# You can press *shift + enter* to quickly advance through each line of a notebook. Try it!
# Check that you have a recent version of TensorFlow installed, v1.3 or higher.
import tensorflow as tf
print("You have version %s" % tf.__version__)
# Check if Matplotlib is working. After running this cell, you should see a plot appear below.
# +
# %matplotlib inline
import pylab
import numpy as np
# create some data using numpy. y = x * 0.1 + 0.3 + noise
x = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x))
y = x * 0.1 + 0.3 + noise
# plot it
pylab.plot(x, y, '.')
# -
# Check if Numpy and Pillow are working. After runnign this cell, you should see a random image appear below.
# +
import PIL.Image as Image
import numpy as np
from matplotlib.pyplot import imshow
image_array = np.random.rand(200,200,3) * 255
img = Image.fromarray(image_array.astype('uint8')).convert('RGBA')
imshow(np.asarray(img))
# -
# Check if Pandas is working. After running this cell, you should see a table appear below.
import pandas as pd
names = ['Bob','Jessica','Mary','John','Mel']
births = [968, 155, 77, 578, 973]
BabyDataSet = list(zip(names,births))
pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births'])
# That's it! You're ready to start the workshop.
| notebooks/00_test_install.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Octave
% language: octave
% name: octave
% ---
graphics_toolkit("gnuplot"); % use if "plot" does not work
% ## Finite element method (FEM) in 1d
% Implement the material input in the form of a function, e.g. k=@(x)1+x.^2; <br>
% for constant functions, the solution can again be compared to the analytical one.
%
% + magic_args="boundary value problem:"
% -k*u''(x)+k0*u(x)=g
% u(0)=U
% -k*u'(L)=T
k=@(x)+x.*0+1;
k0=1;
L=10;
f=1; Uhat=0; g=f+k0*Uhat;
U=2;
T=-2;
%% network
N=10;
POINTS=linspace(0,L,N+1)';
n_POINTS=size(POINTS,1);
ELEMENTS=[(1:n_POINTS-1)' (2:n_POINTS)'];
n_ELEMENTS=size(ELEMENTS,1);
DBOUNDARY=1;
DVALUE=U;
FREENODE=true(n_POINTS,1); FREENODE(DBOUNDARY)=false;
NBOUNDARY=n_POINTS;
NVALUE=T;
LENGTHS=POINTS(ELEMENTS(:,2))-POINTS(ELEMENTS(:,1));
CENTERS=sum(POINTS(ELEMENTS),2)/2;
MATERIALS=k(CENTERS);
F=g*ones(n_ELEMENTS,1);
MATERIALS0=k0*ones(n_ELEMENTS,1);
%% local matrices and rhs
A_local=[1 -1; -1 1];
M_local=[1/3 1/6; 1/6 1/3];
b_local=[1/2; 1/2];
%% global matrix and rhs
A=zeros(n_POINTS);
M=zeros(n_POINTS);
b=zeros(n_POINTS,1);
for i=1:n_ELEMENTS
A(ELEMENTS(i,:),ELEMENTS(i,:))=A(ELEMENTS(i,:),ELEMENTS(i,:))+A_local*MATERIALS(i)/LENGTHS(i);
M(ELEMENTS(i,:),ELEMENTS(i,:))=M(ELEMENTS(i,:),ELEMENTS(i,:))+M_local*MATERIALS0(i)*LENGTHS(i);
b(ELEMENTS(i,:))=b(ELEMENTS(i,:))+b_local*F(i)*LENGTHS(i);
end
AM=A+M;
%% boundary value conditions and solution
u=zeros(n_POINTS,1);
u(~FREENODE)=DVALUE;
b=b-AM*u;
b(NBOUNDARY)=b(NBOUNDARY)-NVALUE;
u(FREENODE)=AM(FREENODE,FREENODE)\b(FREENODE);
figure;
plot(POINTS,u); hold on
%% analytical solution
k=1;
K=sqrt(k0/k);
C1=(T/k+(U-g/k0)*K*exp(K*L))/K/(exp(-K*L)+exp(K*L));
C2=U-C1-g/k0;
u=@(x)C1*exp(-K*x)+C2*exp(K*x)+g/k0;
x=linspace(0,L,100); % points of visualization
hold on; plot(x,u(x)) % plot of the analytical solution
legend('FEM','analytical');
% -
% ## Tasks:
% 1) Modify the previous code and solve Dirichlet boundary problem (i.e. both boundary conditions od Dirichlet type).<br>
% 2) Plot the approximation of the flow function $-k(x)\cdot u'(x)$ using finite differences.
% + magic_args="flow function"
t=-(u(ELEMENTS(:,2))-u(ELEMENTS(:,1))).*MATERIALS./LENGTHS;
plot(POINTS(ELEMENTS(:,1))+LENGTHS/2,t)
grid on
% -
| solutions/Exercise11_eng_FEM_1d_reaction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href='https://ai.meng.duke.edu'> = <img align="left" style="padding-top:10px;" src=https://storage.googleapis.com/aipi_datasets/Duke-AIPI-Logo.png>
# # Text Classification using Word Counts / TFIDF
# In this notebook we will be performing text classification by using word counts and frequency to create numerical feature vectors representing each text document and then using these features to train a simple classifier. Although simple, we will see that this approach can work very well for classifying text, even compared to more modern document embedding approaches. Our goal will be to classify the articles in the AgNews dataset into their correct category: "World", "Sports", "Business", or "Sci/Tec".
#
# **Notes:**
# - This does not need to be run on GPU, but will take ~5 minutes to run
#
#
# +
import os
import numpy as np
import pandas as pd
import string
import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
import urllib.request
import zipfile
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
# #!python -m spacy download en_core_web_md
nlp = spacy.load('en_core_web_sm')
import nltk
from nltk.stem import WordNetLemmatizer
nltk.download('omw-1.4')
import warnings
warnings.filterwarnings('ignore')
# -
# ## Download and prepare data
# +
# Download the data
if not os.path.exists('../data'):
os.mkdir('../data')
if not os.path.exists('../data/agnews'):
url = 'https://storage.googleapis.com/aipi540-datasets/agnews.zip'
urllib.request.urlretrieve(url,filename='../data/agnews.zip')
zip_ref = zipfile.ZipFile('../data/agnews.zip', 'r')
zip_ref.extractall('../data/agnews')
zip_ref.close()
train_df = pd.read_csv('../data/agnews/train.csv')
test_df = pd.read_csv('../data/agnews/test.csv')
# Combine title and description of article to use as input documents for model
train_df['full_text'] = train_df.apply(lambda x: ' '.join([x['Title'],x['Description']]),axis=1)
test_df['full_text'] = test_df.apply(lambda x: ' '.join([x['Title'],x['Description']]),axis=1)
# Create dictionary to store mapping of labels
ag_news_label = {1: "World",
2: "Sports",
3: "Business",
4: "Sci/Tec"}
train_df.head()
# -
# View a couple of the documents
for i in range(5):
print(train_df.iloc[i]['full_text'])
print()
# ## Pre-process text
# Before we create our features, we first need to pre-process our text. There are several methods to pre-process text; in this example we will perform the following operations on our raw text to prepare it for creating features:
# - Tokenize our raw text to break it into a list of substrings. This step primarily splits our text on white space and punctuation. As an example from the [NLTK](https://www.nltk.org/api/nltk.tokenize.html) website:
#
# ```
# >>> s = "Good muffins cost $3.88\nin New York. Please buy me two of them.\n\nThanks."
# >>> word_tokenize(s)
# ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.',
# 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
# ```
#
# - Remove punctuation and stopwords. Stopwords are extremely commonly used words (e.g. "a", "and", "are", "be", "from" ...) that do not provide any useful information to us to assist in modeling the text.
#
# - Lemmatize the words in each document. Lemmatization uses a morphological analysis of words to remove inflectional endings and return the base or dictionary form of words, called the "lemma". Among other things, this helps by replacing plurals with singular form e.g. "dogs" becomes "dog" and "geese" becomes "goose". This is particularly important when we are using word counts or freqency because we want to count the occurences of "dog" and "dogs" as the same word.
#
# There are several libraries available in Python to process text. Below we have shown how to perform the above operations using two of the most popular: [NLTK](https://www.nltk.org) and [Spacy](https://spacy.io).
def tokenize(sentence,method='spacy'):
# Tokenize and lemmatize text, remove stopwords and punctuation
punctuations = string.punctuation
stopwords = list(STOP_WORDS)
if method=='nltk':
# Tokenize
tokens = nltk.word_tokenize(sentence,preserve_line=True)
# Remove stopwords and punctuation
tokens = [word for word in tokens if word not in stopwords and word not in punctuations]
# Lemmatize
wordnet_lemmatizer = WordNetLemmatizer()
tokens = [wordnet_lemmatizer.lemmatize(word) for word in tokens]
tokens = " ".join([i for i in tokens])
else:
# Tokenize
with nlp.select_pipes(enable=['tokenizer','lemmatizer']):
tokens = nlp(sentence)
# Lemmatize
tokens = [word.lemma_.lower().strip() for word in tokens]
# Remove stopwords and punctuation
tokens = [word for word in tokens if word not in stopwords and word not in punctuations]
tokens = " ".join([i for i in tokens])
return tokens
# +
# Process the training set text
tqdm.pandas()
train_df['processed_text'] = train_df['full_text'].progress_apply(lambda x: tokenize(x,method='nltk'))
# Process the test set text
tqdm.pandas()
test_df['processed_text'] = test_df['full_text'].progress_apply(lambda x: tokenize(x,method='nltk'))
# -
# ## Create features using word counts
# Now that our raw text is pre-processed, we are ready to create our features. There are two approaches to creating features using word counts: **Count Vectorization** and **TFIDF Vectorization**.
#
# **Count Vectorization** (also called Bag-of-words) creates a vocabulary of all words appearing in the training corpus, and then for each document it counts up how many times each word in the vocabulary appears in the document. Each document is then represented by a vector with the same length as the vocabulary. At each index position an integer indicates how many times each word appears in the document.
#
# **Term Frequency Inverse Document Frequency (TFIDF) Vectorization** first counts the number of times each word appears in a document (similar to Count Vectorization) but then divides by the total number of words in the document to calculate the *term frequency (TF)* of each word. The *inverse document frequency (IDF)* for each word is then calculated as the log of the total number of documents divided by the number of documents containing the word. The TFIDF for each word is then computed by multiplying the term frequency by the inverse document frequency. Each document is represented by a vector containing the TFIDF for every word in the vocabulary, for that document.
#
# In the below `build_features()` function, you can specify whether to create document features using Count Vectorization or TFIDF Vectorization.
def build_features(train_data, test_data, ngram_range, method='count'):
if method == 'tfidf':
# Create features using TFIDF
vec = TfidfVectorizer(ngram_range=ngram_range)
X_train = vec.fit_transform(train_df['processed_text'])
X_test = vec.transform(test_df['processed_text'])
else:
# Create features using word counts
vec = CountVectorizer(ngram_range=ngram_range)
X_train = vec.fit_transform(train_df['processed_text'])
X_test = vec.transform(test_df['processed_text'])
return X_train, X_test
# Create features
method = 'tfidf'
ngram_range = (1, 2)
X_train,X_test = build_features(train_df['processed_text'],test_df['processed_text'],ngram_range,method)
# ## Train model
# Now that we have created our features representing each document, we will use them in a simple softmax regression classification model to predict the document's class. We first train the classification model on the training set.
# Train a classification model using logistic regression classifier
y_train = train_df['Class Index']
logreg_model = LogisticRegression(solver='saga')
logreg_model.fit(X_train,y_train)
preds = logreg_model.predict(X_train)
acc = sum(preds==y_train)/len(y_train)
print('Accuracy on the training set is {:.3f}'.format(acc))
# ## Evaluate model
# We then evaluate our model on the test set. As you can see, the model performs very well on this task, using this simple approach! In general, Count Vectorization / TFIDF Vectorization performs surprising well across a broad range of tasks, even compared to more computationally intensive approaches such as document embeddings. This should perhaps not be surprising, since we would expect documents about similar topics to contain similar sets of words.
# Evaluate accuracy on the test set
y_test = test_df['Class Index']
test_preds = logreg_model.predict(X_test)
test_acc = sum(test_preds==y_test)/len(y_test)
print('Accuracy on the test set is {:.3f}'.format(test_acc))
| 3_nlp/classification/text_classification_tfidf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -
# ( Run <code>jupyter notebook</code> under the project directory )
#
# # XGBoost for Iris Dataset
#
# We use this example to demenstrate how to use ppxgboost for encypting an xgboost model for multi-class
# prediction. We directly use the iris data from Sklearn, but one
# can go to https://archive.ics.uci.edu/ml/datasets/iris to download the original dataset.
#
# + pycharm={"name": "#%%\n"}
import sys
sys.path.append('../third-party')
import pandas as pd
import numpy as np
import xgboost as xgb
from secrets import token_bytes
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from ppxgboost import BoosterParser as boostparser
from ppxgboost import PPBooster as ppbooster
from ppxgboost.PPBooster import MetaData
from ppxgboost.PPKey import PPBoostKey
from ope.pyope.ope import OPE
from ppxgboost import PaillierAPI as paillier
# + pycharm={"name": "#%%\n"}
iris = load_iris()
X = iris.data
y = iris.target
# Pre-assign the column name first.
# the default feature name from the xgboost -- iris have 4 columns
feature_names = ['f0', 'f1', 'f2', 'f3']
X = pd.DataFrame(X, columns=feature_names)
# -
X.head()
y
# + pycharm={"name": "#%%\n"}
# splitting the data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
test_input_vector = pd.DataFrame(X_test, columns=feature_names)
# + pycharm={"name": "#%%\n"}
# first dump and pickled the model in the file directory.
# total number of tree = total_estimators * number_labels
# e.g. for the imported iris dataset, the number of classes is 3.
# Just provide estimator number for testing purposes.
total_estimaters = 6
model = xgb.XGBClassifier(n_estimators=total_estimaters, objective='multi:softmax')
model.fit(X, y)
# + pycharm={"name": "#%%\n"}
# Get the number of classes -- i.e. 3 from iris dataset
# The classes as array can be get by calling model.classes_
num_classes = model.n_classes_
# Booster Parser will parse the tree
# (add fake metadata here as this testing only test the model correctness)
min_max = {'min': 0, 'max': 100}
meta_min_max = MetaData(min_max)
p_trees, features, min_max = boostparser.model_to_trees(model.get_booster(), min_max)
# + [markdown] pycharm={"name": "#%% md\n"}
# Encryption Preparation for XGBoost Model
# 1). Set up some metadata information for the dataset.
# 2). Set up the encryption materials
# 3). Encrypt the model
# 4). Encrypt the query
# 5). Perform the prediction
# 6). Decrypt the prediction
# + pycharm={"name": "#%%\n"}
# ##################################################################################
# # The folowing is to compute the scores based on the OPE processed decision tree #
# ##################################################################################
# # Set up encryption materials.
# # token bytes calls the os.urandom().
prf_key = token_bytes(16)
OPE_key = token_bytes(16)
encrypter = OPE(OPE_key)
public_key, private_key = paillier.he_key_gen()
pp_boostKey = PPBoostKey(public_key, prf_key, encrypter)
#
# 1. process the tree into ope_enc_tree
enc_trees = ppbooster.enc_xgboost_model(pp_boostKey, p_trees, meta_min_max)
# + pycharm={"name": "#%%\n"}
# 2. Encrypts the input vector for prediction (using prf_key_hash and ope-encrypter) based on the feature set.
ppbooster.enc_input_vector(prf_key, encrypter, features, test_input_vector, meta_min_max)
# + [markdown] pycharm={"name": "#%% md\n"}
# Note that: The prediction on the server side is done differently from the log:binary. This is because
# the server needs to perofrm the softmax aggregation.
# + pycharm={"name": "#%%\n"}
# # 3. OPE evaluation based on OPE encrypted values in the tree nodes.
enc_predictions = ppbooster.predict_multiclass(enc_trees, num_classes, test_input_vector)
# + pycharm={"name": "#%%\n"}
# 4. Client decryption.
result = ppbooster.client_decrypt_prediction_multiclass(private_key, enc_predictions)
# + pycharm={"name": "#%%\n"}
real_y = model.predict(X_test)
assert np.array_equal(result, real_y)
print("success!")
# -
| example/Iris.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.1
# language: julia
# name: julia-1.5
# ---
# # Natural Language Processing
# ## Marco teórico
# El procesamiento de lenguaje natural, mencionado como NLP a partir de ahora por sus siglas en inglés (Natural Language Processing), Es una rama de la inteligencia artficial que busca realizar análisis y transformaciones a cuerpos de texto para encontrar patrones, inferir significados y relaciones entre elementos (palabras, párrafos, oraciones, etc.) o, en general, servir en un modelo con algún propósito de toma de decisiones, inferencia o predicción.
#
# Aquí veremos los básicos de esta área utilizando el paquete `TextAnalysis` en Julia.
#
# ### Documentos
# Los documentos se definen como cualquier cuerpo de texto que puede ser representado de alguna manera específica en el disco de la computadora. Existen los siguientes tipos:
# - Tipo archivo (FileDocument): Un documento representado como texto plano en el disco.
# - Tipo string (StringDocument): Un documento representado como un string codificado en UTF8 en memoria RAM
# - Tipo Token: (TokenDocument): Un documento representado como una sucesión de tokens UTF8, es decir, palabras o símbolos individuales (strings tras ser 'tokenizados').
# - Tipo N-grama: (NGramDocument): Un documento representado como una colección de pares donde un elemento es un token y el otro es un entero que representa el una frecuencia de ocurrencia de dicho string.
#
# Observemos los tipos continuación:
#
using TextAnalysis
# #### Documentos de tipo string/cadena
# Los documentos de tipo cadena suelen ser oraciones individuales, párrafos o textos más completos. No obstante, parte de tener una conjunto de datos limpio yace en organizar los textos en archivo o más pequeño posible.
#
# Además, éstos se guardan en memoria ram, representados como una cadena de bits en codificación UTF-8. Por ello, tener textos muy grandes localizados en una sola variable identificadora puede dificultar su manipulación.
#
str = "Este es un texto de prueba. Este es la segunda oración"
sd = StringDocument(str)
# #### Documentos de tipo archivo
#
# Los documentos de tipo archivo se utilizan cuando tenemos un archivo contenedor del texto que queremos analizar. Para generarlo en julia basta con poner la ruta hacia el archivo:
#
pathname = "./nlp/archivo.txt"
fd = FileDocument(pathname)
# #### Documentos de tipo token
# Los token en el contexto de procesamiento de lenguaje natural son elementos individuales y uniformes que yacen en una colección. Se habla de token usualmente para referirse a palabras individuales cuando tenemos un alfabtero latino, no obstante, para otros alfabetos puede que el concepto cambie ligeramente.
#
# Además, los tokens podrían referirse a letras individuales, oraciones o cualquier forma de agrupar el texto que posea información estructural. No obstante, esas dos agrupaciones mencionadas no suelen poseer información útil al ser respectivamente muy pequeñas y muy grandes.
#
# Para crearlas en Julia podemos crear un arreglo de ellas:
#
#
mis_tokens = String["Esta", "es", "una", "oración", "de", "prueba"]
# Aquí aprovechamos a mostrar además que para crear arreglos de un tipo uniforme en Julia, podemos anteponer el nombre del tipo y el compilador sabrá que debe esperar y forzar dicho tipo (ejemplo, `Int32` en un arreglo forzaría a todos los `Integer` dentro a que sean representado por 32 bits)
#
typeof(mis_tokens)
td = TokenDocument(mis_tokens)
# #### Documento de tipo N-grama
# Podemos pensar en que el documento de tipo token puede ser generado a partir de un documento de tipo string, y así también el de tipo string ser generado a partir de uno de tipo archivo. Esto es correcto y existen métodos específicos para ello.
#
# Por una parte, para pasar de un string a una lista de tokens podemos utilizar el paquete `WordTokenizer`, el cual tiene múltiples métodos de tokenización de alto rendimiento para procesar grandes cuerpos de texto y reducirlos a tokens listos para el análisis.
#
# Ahora, el documento de tipo N-grama tiene más sentido pensarlo viniendo de uno de tipo token, pues al tokenizar un texto grande, es muy probable que tengamos palabras repetidas y obtener la frecuencia en la que éstas palabras ocurren a lo largo de dicho texto juega un rol en el análisis exploratorio inicial.
#
# Los documento de tipo N-grama son precisamente pares de tokens con un número entero que cuenta las ocurrencias de dicho token en algún texto. Aquí podemos generarlo de las siguientes maneras:
#
#
dict_ocurrencias = Dict("hola" => 1, "mundo" => 1)
ngd = NGramDocument(dict_ocurrencias)
# ## Procesamiento
# ### Funciones
# La siguiente es una exploración a algunas funciones ya definidas (para todos los tipos anteriores mediante multiple dispatch) en el paquete.
#
# #### Texto
#
text(sd), text(fd), text(td)
# #### Tokens
#
tokens(sd)
tokens(fd)
# #### N-gramas
#
ngrams(sd)
ngrams(fd)
ngrams(sd, 2)
ngrams(sd, 2, 3)
# Podemos también extraer la estructura de un documento de N-gramas para entender si tiene bigramas o trigramas, etc.
#
ngram_complexity(NGramDocument(ngrams(sd), 2))
# #### Metadata
#
language(sd) ## El lenguaje por defecto es inglés...
# Podemos cambiarlo utilizando la versión 'mutadora' de la función `language`:
#
language!(sd, TextAnalysis.Languages.Spanish())
# Así igual los demás elementos de la metadata...
#
title(sd), author(sd), timestamp(sd)
title!(sd, "Mi título"), author!(sd, "Yo"), timestamp!(sd, "Desconocido")
sd
# ### Procesamiento de documentos
# Antes de comenzar a hacer transformaciones y análisis a los documentos, nos puede interesar hacer una limpieza en caso de tener caracteres corruptos o pequeñas molestias de formato que nos haría más limpio nuestro trabajo si no estuvieran
#
remove_corrupt_utf8!(sd) ## <- ejemplo de ello
str_2 = StringDocument("HolA!!!,. Soy un teXto, que No está mUy Bien escrito..")
prepare!(str_2, strip_punctuation)
text(str_2)
# Removiendo las mayúsculas...
#
remove_case!(str_2)
text(str_2)
# Podemos además remover ciertas palabras..
#
remove_words!(str_2, [" no"])
text(str_2)
# Otras posibilidades son:
#
# ```
# - prepare!(sd, strip_articles)
# - prepare!(sd, strip_indefinite_articles)
# - prepare!(sd, strip_definite_articles)
# - prepare!(sd, strip_preposition)
# - prepare!(sd, strip_pronouns)
# - prepare!(sd, strip_stopwords)
# - prepare!(sd, strip_numbers)
# - prepare!(sd, strip_non_letters)
# - prepare!(sd, strip_spares_terms)
# - prepare!(sd, strip_frequent_terms)
# - prepare!(sd, strip_html_tags)
# ```
#
# Además de poder ser utilizadas juntas:
#
# `prepare!(sd, strip_articles| strip_numbers| strip_html_tags)`
#
#
| _build/html/_sources/Natural_Language_Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf2
# language: python
# name: tf2
# ---
# + code_folding=[]
# imports ...
import os
import tensorflow as tf
import numpy as np
import collections
from tqdm import tqdm
# -
path = '/mnt/gdn-workloads/ihubara/tfrecord_dir_packed'
list_ = ! ls /mnt/gdn-workloads/ihubara/tfrecord_dir_packed
# + code_folding=[0, 14, 31]
def record2dict(record:bytes) -> collections.OrderedDict:
example = tf.train.Example()
example.ParseFromString(record.numpy())
result = collections.OrderedDict()
feature = example.feature.feature
result['input_ids'] = np.array(feature['input_ids'].int64_list.value)
result['input_mask'] = np.array(feature['input_mask'].int64_list.value)
result['segment_ids'] = np.array(feature['segment_ids'].int64_list.value)
result['masked_lm_positions'] = np.array(feature['masked_lm_positions'].int64_list.value)
result['masked_lm_ids'] = np.array(feature['masked_lm_ids'].int64_list.value)
result['masked_lm_weights'] = np.array(feature['masked_lm_weights'].float_list.value)
result['next_sentence_labels'] = np.array(feature['next_sentence_labels'].int64_list.value)
return result
def packed_record2dict(record:bytes) -> collections.OrderedDict:
example = tf.train.Example()
example.ParseFromString(record.numpy())
result = collections.OrderedDict()
feature = example.feature.feature
result['input_ids'] = np.array(feature['input_ids'].int64_list.value)
result['input_mask'] = np.array(feature['input_mask'].int64_list.value)
result['segment_ids'] = np.array(feature['segment_ids'].int64_list.value)
result['positions'] = np.array(feature['positions'].int64_list.value)
result['masked_lm_positions'] = np.array(feature['masked_lm_positions'].int64_list.value)
result['masked_lm_ids'] = np.array(feature['masked_lm_ids'].int64_list.value)
result['masked_lm_weights'] = np.array(feature['masked_lm_weights'].float_list.value)
result['next_sentence_positions'] = np.array(feature['next_sentence_positions'].int64_list.value)
result['next_sentence_labels'] = np.array(feature['next_sentence_labels'].int64_list.value)
result['next_sentence_weights'] = np.array(feature['next_sentence_weights'].float_list.value)
return result
def get_next_sentence_weights(record:bytes) -> np.ndarray:
example = tf.train.Example()
example.ParseFromString(record.numpy())
feature = example.feature.feature
return np.array(feature['next_sentence_weights'].float_list.value)
# -
'''
Count number of records.
'''
counter = 0
for file in tqdm(list_):
records = tf.data.TFRecordDataset(os.path.join(path, file))
records = [record for record in records]
counter += len(records)
# +
'''
Compute average of number samples per record (avg_seq_per_pack).
'''
pbar = tqdm(total=4746826)
pack_numbers = []
for file in list_:
records = tf.data.TFRecordDataset(os.path.join(path, file))
for record in records:
next_sentence_weights = get_next_sentence_weights(record)
pack_numbers.append(next_sentence_weights.sum())
pbar.update(1)
pbar.close()
# + active=""
# Before packing:
# len(input_ids) - 512, list of token ids where (101 - start, 102 - end, 103 - mask), padded to 512 with zeros.
# len(input_mask) - 512, 111...111000...000, where the number of ones corresponds to the effective sample length, padded to 512 with zeros.
# len(segment_ids) - 512, 000...000111...111000...000, where first zeros correspond to the first sentence, ones to second sentence,
# and padded to 512 with zeros.
# len(masked_lm_positions) - 76, positions of masked tokens (103), padded to 76 with zeros.
# len(masked_lm_ids) - 76, token ids of masked tokens, padded to 76 with zeros.
# len(masked_lm_weights) - 76, 111...111000...000, number of ones equals to number of masked tokens.
# len(next_sentence_labels) - 1, 0 or 1, where 1 if sentence 2 is the next sentence of sentence 1.
#
#
# After packing:
# len(packed_input_ids) - 512, list of token ids where (101 - start, 102 - end, 103 - mask), padded to 512 with zeros.
# If we have 2 samples packed: 101,...,102,...,102,101,...,102,...,102,0...0
# where 101,...,102 first sentence, ,...,102 second sentence,
# 101,...,102 third sentence and ,...,102 forth sentence.
# len(packed_input_mask) - 512, 111...111222...222000...000, where the number of ones corresponds to the first sample length,
# and the number of twos corresponds to the second sample length. (If there are 3 samples 1...12...23...30...0.)
# len(packed_segment_ids) - 512, 000...000111...111000...000111...111000...000 where 000...000111...111 the first and the second samples,
# padded to 512 with zeros.
# len(packed_positions) - 512, 0,1,2,3,...,<length of first sample> - 1,0,1,2,3,...,<length of second sample> - 1,0,0,...,0
# len(packed_masked_lm_positions) - 79, positions of masked tokens (103), padded to 79 with zeros.
# len(packed_masked_lm_ids) - 79, token ids of masked tokens, padded to 76 with zeros.
# len(packed_masked_lm_weights) - 79, 111...111222...222000...000 where 111...111 corresponds to first sample
# and 222...222 to second. (If there are 3 samples 1...12...23...30...0.)
# len(packed_next_sentence_positions) - 3, <position of first sample>,<position of second sample>,0 (corresponds to 101 positions)
# len(packed_next_sentence_labels) - 3, 0 or 1, where 1 if sentence 2 is the next sentence of sentence 1.
# len(packed_next_sentence_weights) - 3, 110 (If there are 3 samples 111.)
#
#
#
#
# Data packed from 30 tfrecords has 4746826 records (multi-samples).
# Total number of samples in records is 9648228.
#
# avg_seq_per_pack = 2.0325640754474676
#
#
# Coverage during training: ('batch size'=14, 'number of accumulation steps'=4, 'number of cards'=8, 'number of training steps'=6365)
# 14*4*8*6365 / 9648228 * 100 = 29.55%
# -
records = tf.data.TFRecordDataset(os.path.join(path, list_[0]))
dict_ = record2dict(next(iter(records)))
dict_['masked_lm_positions']
dict_['masked_lm_ids']
dict_['masked_lm_weights']
| Intel-HabanaLabs/benchmarks/bert/implementations/TensorFlow/nlp/bert/notebooks/records investigation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculating variability amplitudes.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from tqdm import trange
plotpar = {'axes.labelsize': 30,
'font.size': 30,
'legend.fontsize': 15,
'xtick.labelsize': 30,
'ytick.labelsize': 30,
'text.usetex': True}
plt.rcParams.update(plotpar)
import kepler_datjka as kd
# -
# Load the catalog of stars in the kinematics-and-rotation project.
# +
df = pd.read_csv("../data/gaia_mc_cuts.csv")
Rvar = np.zeros(len(df))
for i in trange(len(df.kepid.values)):
lcdir = "/Users/rangus/.kplr/data/lightcurves/{}".format(str(df.kepid.values[i]).zfill(9))
x, y, yerr = kd.load_and_join(lcdir)
Rvar[i] = np.percentile(y, 95) - np.percentile(y, 5) * 1e6
# -
df["Rvar"] = Rvar
df.to_csv("../data/gaia_mc_rvar.csv")
plt.plot(df.Rper[:1357], df.Rvar[:1357], ".")
plt.xlabel("Rper")
plt.ylabel("Rvar")
plt.scatter(df.Prot[:1357], df.Rvar[:1357], c=df.Teff[:1357], alpha=.5)
plt.colorbar()
plt.xlabel("Prot")
plt.ylabel("Rvar")
plt.yscale("log")
plt.xscale("log")
plt.scatter(df.Prot[:1357], df.Rper[:1357], c=df.Teff[:1357], alpha=.5)
plt.colorbar()
plt.xlabel("Prot")
plt.ylabel("Rper")
plt.yscale("log")
plt.xscale("log")
plt.scatter(df.Teff[:1357], df.Prot[:1357], c=np.log10(df.Rvar[:1357]), alpha=.5)
plt.colorbar()
plt.xlabel("Teff")
plt.ylabel("Prot")
plt.yscale("log")
plt.plot(x, y, ".")
plt.hist(y, 50);
plt.axvline(np.percentile(y, 5), color="k", ls="--")
plt.axvline(np.percentile(y, 95), color="k", ls="--")
Rvar =
print(Rvar, df.Rper.values[0])
| code/Calculating_variability.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Develop an initial model
# +
# Python modules
import dateutil
from datetime import datetime
import re
# Data science packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Scikit Learn utility classes & functions
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import make_scorer, mean_absolute_error
# Scikit Learn models
from sklearn.linear_model import Lasso, ElasticNet, Ridge
from sklearn.ensemble import (AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor, GradientBoostingRegressor,
RandomForestRegressor)
from sklearn.svm import SVR, LinearSVR
from xgboost import XGBRegressor
# Our own code
from src.data.data_builder import DataBuilder, BettingData, MatchData
from src.data.data_transformer import DataTransformer
from src.data.feature_builder import FeatureBuilder
from src.model.metrics import measure_estimators, regression_accuracy
np.random.seed(42)
# +
# Set up data and create cumulative features
csv_paths = ('data/afl_betting.csv', 'data/ft_match_list.csv')
data_classes = (BettingData, MatchData)
raw_df = DataBuilder(data_classes, csv_paths).concat()
model_df = DataTransformer(raw_df).stack_teams()
fb = FeatureBuilder(model_df)
fb.transform()
team_df = fb.df.dropna()
team_df
# -
# ## Which models perform better on betting/match data?
# +
# Set up & split data for models
team_features = pd.get_dummies(team_df.drop(['score', 'oppo_score'], axis=1))
team_labels = pd.Series(team_df['score'] - team_df['oppo_score'], name='score_diff')
data = train_test_split(team_features, team_labels)
# +
# Pass data to models & measure performance
estimators = (Lasso(),
ElasticNet(),
Ridge(),
LinearSVR(),
AdaBoostRegressor(),
BaggingRegressor(),
ExtraTreesRegressor(),
GradientBoostingRegressor(),
RandomForestRegressor(),
SVR(kernel='rbf'),
XGBRegressor())
estimator_scores = measure_estimators(estimators, data, model_type='regression')
estimator_scores
# +
# MAE scores
mae_scores = estimator_scores.sort_values(['score_type', 'error'], ascending=[True, True])
plt.figure(figsize=(15, 7))
sns.barplot(x='estimator', y='error', hue='score_type', data=mae_scores)
plt.ylim(ymin=27)
plt.title('Model mean absolute error for cross-validation & test sets\n', fontsize=18)
plt.ylabel('MAE', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
# Accuracy scores
acc_scores = mae_scores.sort_values(['score_type', 'accuracy'], ascending=[True, False])
plt.figure(figsize=(15, 7))
sns.barplot(x='estimator', y='accuracy', hue='score_type', data=acc_scores)
plt.ylim(ymin=0.65)
plt.title('Model accuracy for cross-validation & test sets\n', fontsize=18)
plt.ylabel('Accuracy', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
# Std scores
std_scores = mae_scores[mae_scores['score_type'] == 'cv'].sort_values('std_error', ascending=True)
plt.figure(figsize=(15, 7))
sns.barplot(x='estimator', y='std_error', data=std_scores)
# plt.ylim(ymin=0.65)
plt.title('Standard deviation for model mean absolute error\n', fontsize=18)
plt.ylabel('Std', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
# plt.legend(fontsize=14)
plt.show()
# -
# ### Linear models perform better than ensembles
#
# The 'boost' ensembles have comparable performance to the better linear models, but all the linear models get around 29 MAE for both the CV and test scores, with Lasso having the best error scores and Ridge having the 3rd best error and 2nd best accuracy (SVR has terrible error, but the best CV accuracy for some reason).
#
# As expected, ensembles perform better, relatively speaking, when looking at standard deviation, but even then the differences are small and both types of models are spread from best to worst.
# ## Does reducing the number of features make a difference?
# +
# Try a stripped-down set of features to see if certain models are over-fitting and perform better with fewer features
# relative to other models
min_features = pd.get_dummies(team_df[['team', 'oppo_team', 'line_odds', 'year', 'round_number', 'at_home']])
X_train, X_test = data[:2]
train_index = X_train.index
test_index = X_test.index
min_data = (min_features.loc[train_index, :],
min_features.loc[test_index, :],
data[2],
data[3])
min_estimator_scores = measure_estimators(estimators, min_data, model_type='regression')
# +
# MAE scores
min_mae_scores = (min_estimator_scores.merge(mae_scores, on=['estimator', 'score_type'], suffixes=('_min', ''))
.assign(error_diff=lambda x: x['error_min'] - x['error'],
accuracy_diff=lambda x: x['accuracy_min'] - x['accuracy'])
.sort_values(['score_type', 'error'], ascending=[True, True]))
plt.figure(figsize=(15, 7))
sns.barplot(x='estimator', y='error', hue='score_type', data=min_mae_scores)
plt.ylim(ymin=27)
plt.title('Mean Absolute Error for cross validation & test sets\n', fontsize=18)
plt.ylabel('MAE', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
plt.figure(figsize=(15, 7))
sns.barplot(x='estimator', y='error_diff', hue='score_type', data=min_mae_scores)
plt.title('Difference in model errors\n(minimal features - maximal features)\n', fontsize=18)
plt.ylabel('MAE Diff', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
# Accuracy scores
min_acc_scores = min_mae_scores.sort_values(['score_type', 'accuracy'], ascending=[True, False])
plt.figure(figsize=(15, 7))
sns.barplot(x='estimator', y='accuracy', hue='score_type', data=min_acc_scores)
plt.ylim(ymin=0.65)
plt.title('Accuracy for cross validation & test sets\n', fontsize=18)
plt.ylabel('Accuracy', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
plt.figure(figsize=(15, 7))
sns.barplot(x='estimator', y='accuracy_diff', hue='score_type', data=min_acc_scores)
plt.title('Difference in accuracy for cross validation & test sets\n', fontsize=18)
plt.ylabel('Accuracy Diff', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
# -
# ### Linear models are still the best
#
# Even with fewer features to prevent the ensemble models from overfitting, the linear models tend to perform better, and Lasso is still best-performing. It does improve performance on the test set for most models, so we'll want to remove or compress features as part of our model tuning.
# ## Is Lasso, ElasticNet, or Ridge better?
#
# Lasso and ElasticNet generally have the lowest MAE scores and are in the top half for accuracy. Ridge has worse CV scores but particularly good test set scores, which means it either generalises very well or is just lucky with the train/test splits.
# +
# Round 24 represents first finals round and doesn't count for tipping competitions
tipping_features = team_features[team_features['round_number'] < 25]
tipping_labels = team_labels[tipping_features.index.values]
lasso = make_pipeline(StandardScaler(), Lasso())
en = make_pipeline(StandardScaler(), ElasticNet())
ridge = make_pipeline(StandardScaler(), Ridge())
estimators = (lasso, en, ridge)
model_names = []
errors = []
accuracies = []
years = []
for year in range(2011, 2017):
X_train = tipping_features[tipping_features['year'] < year]
X_test = tipping_features[tipping_features['year'] == year]
y_train = tipping_labels.loc[X_train.index]
y_test = tipping_labels.loc[X_test.index]
for estimator in estimators:
estimator.fit(X_train, y_train)
y_pred = estimator.predict(X_test)
years.append(year)
model_names.append(estimator.steps[-1][0])
errors.append(mean_absolute_error(y_test, y_pred))
accuracies.append(regression_accuracy(y_test, y_pred))
year_scores = pd.DataFrame({'model': model_names,
'year': years,
'error': errors,
'accuracy': accuracies})
year_scores
# +
# MAE scores
plt.figure(figsize=(15, 7))
sns.barplot(x='year', y='error', hue='model', data=year_scores)
# Not starting axis at 0 to make small relative differences clearer
plt.ylim(ymin=25)
plt.title('Model error per season\n', fontsize=18)
plt.ylabel('MAE', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12)
plt.legend(fontsize=14)
plt.show()
# Accuracy scores
plt.figure(figsize=(15, 7))
sns.barplot(x='year', y='accuracy', hue='model', data=year_scores)
# Not starting axis at 0 to make small relative differences clearer
plt.ylim(ymin=0.55)
plt.title('Model accuracy per season\n', fontsize=18)
plt.ylabel('Accuracy', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
# -
# ### Lasso is the best model
#
# Lasso has the lowest MAE 5 out of 6 seasons (with 2 of those basically ties, and the one where ElasticNet is best also being basically a tie). As shown in other comparisons, accuracy is a bit more random, resulting in a wider spread of winners: Lasso is most accurate for 4 seasons (1 of which it ties with Ridge), ElasticNet for 2, and Ridge tied for 1.
# +
# Accuracy scores with betting accuracy & footy tipper
betting_scores = (((team_df['line_odds'] < 0) & (team_df['score'] >= team_df['oppo_score']) |
(team_df['line_odds'] > 0) & (team_df['score'] <= team_df['oppo_score']))
.loc[tipping_features[tipping_features['year'] > 2010].index.values]
.rename('accuracy')
.groupby(level=1)
.mean()
.reset_index()
.assign(model='betting', error=0))
# Footy tipper accuracy scores taken from a trial that took too long to run,
# so I'm just copy pasting from a notebook
footy_tipper_scores = pd.DataFrame({'model': ['footy_tipper'] * len(range(2012, 2017)),
'year': list(range(2012, 2017)),
'error': [0] * len(range(2012, 2017)),
'accuracy': [0.768116, 0.739130, 0.739130, 0.750000, 0.714976]})
plt.figure(figsize=(15, 10))
sns.barplot(x='year',
y='accuracy',
hue='model',
data=year_scores.append([betting_scores, footy_tipper_scores], sort=False))
# Not starting axis at 0 to make small relative differences clearer
plt.ylim(ymin=0.55)
plt.title('Accuracy per season', fontsize=18)
plt.ylabel('Accuracy', fontsize=14)
plt.xlabel('', fontsize=14)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12, rotation=90)
plt.legend(fontsize=14)
plt.show()
# -
# Even without any tuning, a lasso model performs reasonably well, beating the betting odds in 2 seasons (vs 2 for Footy Tipper and 3 ties for ElasticNet interestingly) and beating the fully-tuned Footy Tipper ensemble 1 out of 5 times (with ElasticNet beating it twice). Pretty good start.
#
# Winners: Lasso, Betting, Betting, FootyTipper, FootyTipper, Betting
#
# 2nd: ElasticNet/Betting, ElasticNet, FootyTipper, Lasso/Ridge, ElasticNet/Betting, Lasso
#
# There is some evidence in the season breakdown to suggest that ElasticNet will perform better on a per-season basis, but the sample size is pretty small, and MAE is a more-reliable metric for model quality as (per the MoS blog post) accuracy suffers from much higher variance.
| notebooks/2019_season/2.1-initial-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ee1qMIGmzwhO" colab_type="text"
# Github code test following the readme from:
#
# https://github.com/huggingface/pytorch-pretrained-BigGAN/
# + id="Y6Xee0FOUSrv" colab_type="code" colab={}
# !pip install pytorch-pretrained-biggan
# !mkdir generated_images
# + id="lZSVE0ZIS-R8" colab_type="code" outputId="1eb053e1-059d-4542-b24c-ec423461aec1" colab={"base_uri": "https://localhost:8080/", "height": 51}
import torch
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, truncated_noise_sample,
convert_to_images, save_as_images, display_in_terminal)
from IPython.display import Image
import nltk
nltk.download('wordnet')
img_size = 256 # 128, 256 or 512
# Load pre-trained model tokenizer (vocabulary)
model = BigGAN.from_pretrained('biggan-deep-{}'.format(img_size))
# Prepare a input
truncation = 0.4
class_vector = one_hot_from_names(['iPod', 'mountain bike', "jack-o'-lantern"], batch_size=3)
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=3)
# All in tensors
noise_vector = torch.from_numpy(noise_vector)
class_vector = torch.from_numpy(class_vector)
# Generate an image
with torch.no_grad():
output = model(noise_vector, class_vector, truncation)
# Save results as png images
save_as_images(output, 'generated_images/output')
# + id="8_9JNPR3dC4n" colab_type="code" outputId="40f5305f-e346-4eef-b18c-1862f7aa9145" colab={"base_uri": "https://localhost:8080/", "height": 773}
# Display results
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
for i in range(3):
image = mpimg.imread("generated_images/output_%d.png" % i)
plt.imshow(image)
plt.show()
| HuggingFace/BigGan_handsonai_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import numpy as np
import csv
import pandas as pd
import matplotlib.pyplot as plt
import os
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import io, datasets, transforms
from tqdm.notebook import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# -
# constants
img_width = 384
img_height = 128
batch_size = 50
op_str = '0123456789+-*/'
# +
outfile = open('annotations_digits.csv', 'w', newline='')
output = csv.writer(outfile)
output.writerow(['Image','Label','Left','Operator','Right','Value'])
ctr = 0
num = 0
while ctr < 50000:
i = (num//10)%10
j = num%10
for k in ['+', '-', '*', '/']:
for l in ['prefix', 'postfix', 'infix']:
if ctr >= 50000:
break
if k == '/' and (j == 0 or (i/j)%1 != 0):
continue
ctr += 1
if l == 'prefix':
output.writerow([f'{ctr}.jpg', l, k, i, j, int(eval(f'{i}{k}{j}'))])
elif l == 'infix':
output.writerow([f'{ctr}.jpg', l, i, k, j, int(eval(f'{i}{k}{j}'))])
else:
output.writerow([f'{ctr}.jpg', l, i, j, k, int(eval(f'{i}{k}{j}'))])
if ctr >= 50000:
break
num += 1
print(ctr)
# +
#enable cuda
use_cuda = torch.cuda.is_available()
if use_cuda:
device = torch.device('cuda')
loader_kwargs = {'num_workers': 1, 'pin_memory': True}
else:
device = torch.device('cpu')
loader_kwargs = {}
print(device)
# +
class ExpressionDataset(Dataset):
"""Binomial expression dataset"""
def __init__(self, csv_file, root_dir):
self.labels = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transforms.ToTensor()
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = os.path.join(self.root_dir, self.labels.iloc[idx,0])
img = Image.open(img_name)
leftimg = img.crop((0,0,img_height,img_height))
middleimg = img.crop((img_height,0,img_height*2,img_height))
rightimg = img.crop((img_height*2,0,img_height*3,img_height))
exprtype, left, op, right, ans = self.labels.iloc[idx,1:]
exprtype_int = 0
if exprtype == "infix":
exprtype_int = 1
elif exprtype == "postfix":
exprtype_int = 2
return [self.transform(img).float(), self.transform(leftimg).float(), self.transform(middleimg).float(), self.transform(rightimg).float(), exprtype_int, op_str.index(left), op_str.index(op), op_str.index(right), ans]
dataset = ExpressionDataset(csv_file='/kaggle/working/annotations_digits.csv', root_dir='../input/soml-hackathon/SoML/SoML-50-old/data')
plt.imshow(dataset[10][3].squeeze())
print(len(dataset))
train_set, test_set, validation_set = torch.utils.data.random_split(dataset, [40000,5000,5000])
train_loader = DataLoader(train_set, batch_size=50, shuffle=True)
test_loader = DataLoader(test_set, batch_size=50, shuffle=True)
validation_loader = DataLoader(validation_set, batch_size=50, shuffle=True)
# -
class DigitNetwork(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size = 10)
self.conv2 = nn.Conv2d(10, 20, kernel_size = 5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(2420, 100)
self.fc2 = nn.Linear(100, 14)
# self.input_layer = torch.nn.Linear(784, 112)
# self.hl2 = torch.nn.Linear(112, 14)
# self.relu = torch.nn.ReLU()
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 3))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 3))
x = torch.flatten(x,1)
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.dropout(x, training = self.training)
x = self.fc2(x)
return x
# x = self.input_layer(x)
# x = self.relu(x)
# x = self.hl2(x)
def digit_acc(self, loader):
total = 0
correct = 0
with torch.no_grad():
for fullimg, leftimg, midimg, rightimg, types, left, mid, right, ans in loader:
batch_size = fullimg.shape[0]
sections = [[leftimg,left], [midimg,mid], [rightimg,right]]
for images, labels in sections:
images, labels = images.to(device), labels.to(device)
batch_size = images.shape[0]
#images = images.reshape(batch_size, 28*28)
output = digit_net(images)
prediction = torch.argmax(output, dim=1)
correct += torch.sum(prediction == labels)
total += batch_size
return correct/total
def acc(self, loader):
total = 0
correct = 0
op_str = "0123456789+-*/"
with torch.no_grad():
for fullimg, leftimg, midimg, rightimg, types, left, mid, right, ans in loader:
ans = ans.to(device)
sections = [leftimg, midimg, rightimg]
batch_size = fullimg.shape[0]
predictions = []
answers = []
for images in sections:
images = images.to(device)
output = self(images)
predicted = torch.argmax(output, dim=1)
predictions.append(predicted)
for i in range(batch_size):
l,m,r = predictions[0][i], predictions[1][i], predictions[2][i]
if l >= 10: # prefix
answers.append(eval(f'{m}{op_str[l]}{r}'))
elif m >= 10: # infix
answers.append(eval(f'{l}{op_str[m]}{r}'))
elif r >= 10: # postfix
answers.append(eval(f'{l}{op_str[r]}{m}'))
else:
answers.append(0)
correct += torch.sum(torch.tensor(answers).to(device) == ans)
total += batch_size
return correct/total
# +
digit_net = DigitNetwork().to(device)
lossfn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(digit_net.parameters(), lr=0.001)
print(f"Accuracy: {digit_net.digit_acc(validation_loader)}")
for epoch in range(8):
avg_loss = 0
num_iters = 0
digit_net.train()
for fullimgs, leftimgs, midimgs, rightimgs, types, left, mid, right, answers in tqdm(train_loader):
sections = [[leftimgs,left], [midimgs,mid], [rightimgs,right]]
for images, labels in sections:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
batch_size = images.shape[0]
#images = images.reshape(batch_size, 28*28)
output = digit_net(images)
loss = lossfn(output, labels)
loss.backward()
optimizer.step()
avg_loss += loss.item()
num_iters += 1
digit_net.eval()
print(f"Loss: {avg_loss/num_iters}")
print(f"Accuracy: {digit_net.digit_acc(validation_loader)}")
# -
digit_net.eval()
print(f"Accuracy on expressions: {digit_net.acc(test_loader)}")
torch.save(digit_net.state_dict(), '/kaggle/working/value_net_dict')
| expression-evaluator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # COMP 135 day09: MAP estimation for Logistic Regression
#
# ## Outline
#
# * **Part 1: Understanding sigmoids and Logistic Regression as a model**
# * **Part 2: Computing the MAP objective**
# * **Part 3: Gradient descent for the MAP: Comparing 1st and 2nd order GD**
#
# ## Takeaways
#
# * First-order methods are cheap but require many iterations
# * Second-order methods are awesome, but still require careful step-size selection
# * For all gradient descent methods, selecting step sizes is super important. Line search is needed!
import numpy as np
import pandas as pd
import scipy.stats
np.set_printoptions(precision=3, suppress=False)
pd.options.display.float_format = '{:,.3g}'.format # show 3 digits of precision
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=1.25)
# # Part 1: The Probabilistic view of logistic regression
# ### Task: Binary classification
#
# Given $N$ observations of *paired* feature-outcome observations: $\{ x_n, t_n \}$.
#
# * Each input feature $x_n$ is a scalar real: $x_n \in \mathbb{R}$
# * Each output or "label" or "outcome" $t_n$ is a scalar binary value: $t_n \in \{0, 1\}$
#
# We're also given a feature transform function $\phi$ which maps each $x_n$ to a vector in $M$-dimensional space. This function is known in advance.
#
# We want to make good predictions of new outcomes $t_*$ given new features $x_*$.
#
#
# ## Feature transformation
#
# For now, we'll assume that the "feature transform" $\phi(x_n)$ just simply passes along the features $x_n$, while adding an additional offset or "intercept" feature that is always 1. This is a *simplifying* assumption for today.
def calc_features(x_N1, M=2):
''' Transform raw features into complete features useful for prediction
Could do any non-linear transformations thought relevant for the problem.
Here we'll just do an identity transform with an extra intercept feature.
Args
----
x_N1 : 2D array, shape (N, 1) = (n_examples,)
Returns
-------
phi_NM : 2D array, shape (N, M) = (n_examples, n_transformed_features)
First column will contain all ones (a bias or intercept feature)
Second column will just include the raw features
'''
assert x_N1.ndim == 2
assert x_N1.shape[1] == 1
N = x_N1.shape[0]
phi_NM = np.zeros((N, M))
phi_NM[:,0] = 1
phi_NM[:,1] = x_N1[:,0]
return phi_NM
# +
x_N1 = np.linspace(-1, 1, 5)[:,np.newaxis]
# Get transformed features using our "calc_features" function
# * first column will be all 1s, an "intercept"
# * second column will be the x values
calc_features(x_N1)
# -
# ## Understanding the logistic sigmoid function
#
# As discussed in your pre-recorded lectures, the *logistic sigmoid function* is:
#
# \begin{align}
# \sigma(r) = \frac{1}{1 + e^{-r}}
# \end{align}
#
# It maps real inputs $r \in (-\infty, +\infty)$ to the probability interval $(0, 1)$.
#
# We call it a "sigmoid" function because it has an S-shaped curve, which you'll plot below.
#
# This function is also sometimes called the "expit" function.
#
# We can use an existing implementation of this function available in SciPy:
#
# * expit: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.expit.html
from scipy.special import expit as sigmoid
sigmoid(0)
sigmoid(-4)
sigmoid(4)
sigmoid(np.asarray([-6, -4, -2, 0, 2, 4, 6]))
# ## Exercise 1a: Plot the logistic sigmoid function
#
# We give you an array of G candidate $r$ values below.
G = 101
r_G = np.linspace(-8, 8, G)
sigmoid_of_r_G = sigmoid(np.asarray(r_G)) # TODO evaluate sigmoid at each r value
plt.plot(r_G, sigmoid_of_r_G, 'k.-');
plt.xlabel('r'); plt.ylabel('$\sigma(r)$');
plt.ylim([-0.001, 1.001])
#
# ## Define the Likelihood
#
# Each observation (indexed by $n$) is drawn iid from a Bernoulli as follows:
#
# $$
# t_n | w \sim \text{BernPMF}\left( t_n | \sigma(w^T \phi(x_n)) \right)
# $$
#
# where $w \in \mathbb{R}^M$ is a weight vector, the same size as our feature vector $\phi(x_n) \in \mathbb{R}^M$
#
# The key properties here are:
# * The *mean* of $t_n$ is a *non-linear activation* of a linear function of the transformed features.
#
# ## Define the Prior
#
# For now, we'll assume that weights come from a zero mean prior with some covariance determined by a scalar parameter $\alpha$:
#
# $$
# w \sim \mathcal{N}( 0, \alpha^{-1} I_M )
# $$
#
# A zero mean prior makes sense if we don't know if the slope should be negative or positive.
#
# ### Parameter we'll treat as a random variable: $w$
#
# * Weights vector: $w = [w_1, w_2, \ldots w_M]^T$, so $w \in \mathbb{R}^M$
#
# ### Parameters we'll treat as fixed: $\alpha$
#
# * Prior precision $\alpha > 0$
#
# The larger $\alpha$ is, the more confident we are in the weight values before seeing any data.
#
# ## Create a simple toy data for analysis
#
# Just execute the cells below to get the sense of how to generate toy data from this model
#
# We'll manually intervene to set the weight vector to a known value. This makes it easy to tell if our learning is working later on.
N = 10 # Number of examples we observe
M = 2 # Number of transformed features
# Create the weight vector we'll use to generate our dataset. Set an intercept of 1.2 and a slope of -0.75
true_w_M = np.asarray([0.1, -0.25])
# Create a "true" alpha value which controls the prior precision
true_alpha = 0.01
# Create observed features $x$ and observed outputs $t$ manually
x_N1 = np.asarray([-5, -0.8, -0.7, -0.6, -0.4, 0.5, 0.8, 0.9, 4.3, 4.1]).reshape((N, 1))
phi_NM = calc_features(x_N1)
prng = np.random.RandomState(101) # reproducible random seed
t_N = (prng.rand(N) < sigmoid(np.dot(phi_NM, true_w_M))).astype(np.float64)
# ## Visualize the toy dataset
# +
plt.plot(x_N1, t_N, 'k.');
ax_h = plt.gca()
ax_h.set_xlim([-8, 8]); ax_h.set_xticks([-6, -4, -2, 0, 2, 4, 6]);
ax_h.set_ylim([-.1, 1.1]);
xgrid_G1 = np.linspace(-8, 8, 100)[:,np.newaxis]
plt.plot(xgrid_G1, sigmoid(np.dot(calc_features(xgrid_G1), true_w_M)), 'c-', linewidth=3);
plt.xlabel('input: x');
plt.ylabel('output: t');
plt.title("Toy Data\n true_slope %.2f \n true intercept %.2f" % (
true_w_M[1], true_w_M[0]));
# -
# ## Discussion 1b: What about this observed dataset of 10 points would prefer a *negative* slope vs. a positive slope?
# +
# TODO discuss
# -
# # Part 2: MAP estimation : View as optimization problem
# There is NO closed form for the posterior over weights $p( w | t)$.
#
# However, we can evaluate (and thus optimize) the MAP objective, since this doesn't require knowing the full posterior.
#
# Let's see how. Begin with the MAP optimization problem:
#
# \begin{align}
# w^* = \arg \max_{w \in \mathbb{R}^M} ~~ p( w | t_{1:N} )
# \end{align}
#
# Rewriting using the log of the objective for tractability and simplifying via Bayes rule, we get the objective function to maximize is:
#
# \begin{align}
# \mathcal{M}(w) &= \log p( w | t_{1:N})
# \\
# &= \log p( w ) + \log p( t_{1:N} | w ) - \underbrace{\log p(t_{1:N})}_{\text{const wrt}~ w}
# \end{align}
#
# Thus, we can simply ignore the constant term, and maximize the following alternative objective:
# \begin{align}
# \mathcal{M}'(w) &= \log \text{MVNormPDF}( w | 0, \alpha^{-1} I_M ) + \sum_{n=1}^N \log \text{BernPMF}( t_n | \sigma(w^T \phi(x_n) )
# \end{align}
#
# Finally, we can *standardize* our problem by transforming so we *minimize* rather than *maximize*, just by multiplying by -1. Now the *loss* function we wish to minimize is:
#
# \begin{align}
# \mathcal{L}(w) &= - \log \text{MVNormPDF}( w | 0, \alpha^{-1} I_M ) - \sum_{n=1}^N \log \text{BernPMF}( t_n | \sigma(w^T \phi(x_n) )
# \end{align}
#
# Thus, we can find our optimal weights $w^*$ via:
#
# \begin{align}
# w^* = \arg \min_{w \in \mathbb{R}^M} ~~ \mathcal{L}(w)
# \end{align}
# How can we compute each of these terms?
#
# * Use `scipy.stats.multivariate_normal.logpdf` to evaluate the log prior PDF $\log \text{MVNormPDF}(\cdot)$
# * For the likelihood pdf, use this formula:
#
# $$
# \sum_{n=1}^N \log \text{BernPMF}(t_n | p_n ) = \sum_{n=1}^N t_n \log p_n + (1-t_n) \log (1 - p_n)
# $$
#
# This is translated into the code below.
def calc_sum_of_log_bern_pmf(t_N, p_N):
''' Calculate the log of the bernoulli pmf for N observations
Args
----
t_N : 1D array, shape (N,)
Binary value (0 or 1) for each example n
p_N : 1D array, shape (N,)
Probability parameter of the Bernoulli for each example n
Returns
-------
summed_logpmf : scalar float
Summed log PMF over all N examples given
'''
# Make sure provided probabilities are not hard 0 or hard 1
# so that the log values will not be numerically bad
safe_p_N = np.minimum(np.maximum(p_N, 1e-100), 1 - 1e-13)
return np.sum(np.log(safe_p_N)[t_N==1]) + np.sum(np.log(1-safe_p_N)[t_N==0])
# ## Exercise 2a: Compute the objective of our minimization problem
#
# Translate the formula for $\mathcal{L}(w)$ above into concrete NumPy expressions
def calc_loss(wguess_M, phi_NM, t_N, alpha=0.1):
''' Compute the MAP loss objective function.
The loss is equal to the negative log prior plus negative log likelihood
Args
----
w_M : 1D array, shape (M,)
Weight parameter at which we want to evaluate the loss
phi_NM : 2D array, shape (N,M)
Observed input features
Each row is a feature vector for one example
t_N : 1D array, shape (N,)
Observed outputs
Each row is a output scalar value for one example
alpha : positive scalar
Prior precision
Returns
-------
loss : scalar float
The value of the loss function at provided w value
'''
log_prior_pdf = scipy.stats.multivariate_normal.logpdf(wguess_M, mean=np.zeros_like(wguess_M * 1/alpha),
cov=np.eye(len(wguess_M))/alpha) # TODO compute log prior pdf value
log_lik_pdf = calc_sum_of_log_bern_pmf(t_N,sigmoid(np.dot(phi_NM,wguess_M))) # TODO compute log likelihood pdf value
return -1 * log_prior_pdf + -1 * log_lik_pdf
# ## Exercise 2b: Evaluate the MAP objective (aka MAP loss function) at possible w values
#
phi_NM
t_N
np.log(0.5)
# Try with all zero weights
w1_M = np.zeros(M)
calc_loss(w1_M, phi_NM, t_N, true_alpha)
# Try with all weights set to 10
w2_M = 10 * np.ones(M)
calc_loss(w2_M, phi_NM, t_N, true_alpha)
# +
# Try with all weights set to TRUE values
# TODO write code using calc_loss(...)
# -
# ## Discussion 2c: Which value of the weight vector out of the 3 tried had the "best" loss value? Does that agree with what you expect?
#
# Use what you know about how this toy dataset was generated (hint: we know which weights were used to make the true observations).
# +
# TODO discuss
# -
# ## Demo: Visualizing the MAP objective as a contour plot
#
# Step through the code below to see how we create a 2d contour plot visualization of our MAP optimization problem.
# +
# Create a 2-dim grid of possible w values
G = 51 # G possible values for intercept
w0_grid_G = np.linspace(-2, 2, G)
H = 51 # H possible values for slope
w1_grid_H = np.linspace(-2, 2, H)
w0_GH, w1_GH = np.meshgrid(w0_grid_G, w1_grid_H,)
# -
# Compute loss at each possible value in our grid
loss_GH = np.zeros((G, H))
for gg in range(G):
for hh in range(H):
cur_w_M = np.hstack([w0_GH[gg,hh], w1_GH[gg, hh]])
loss_GH[gg, hh] = calc_loss(cur_w_M, phi_NM, t_N, true_alpha)
# +
# Create a pretty contour plot over the grid of w[0], w[1], loss values
levels = np.linspace(0, 40, 51) # 50 evenly spaced levels
fig_handle, ax_handle = plt.subplots(nrows=1, ncols=1, figsize=(8,8));
ax_handle.contour(w0_GH, w1_GH, loss_GH, levels=levels, linewidths=0, colors='k')
cntrf_handle = ax_handle.contourf(w0_GH, w1_GH, loss_GH, levels=levels, cmap='RdBu_r', vmin=levels[0], vmax=levels[-1]);
cbar = plt.colorbar(cntrf_handle, ax=ax_handle)
cbar.set_label('MAP loss objective (lower is better)', fontsize=16);
cbar.set_ticks(levels[::10]);
plt.xlabel('intercept $w_1$');
plt.ylabel('slope $w_2$');
plt.gca().set_aspect('equal', 'box');
# -
# ## Exercise 2d: Visually interpret the plot above. By inspection, which intercept and slope values are optimal? What is the loss at this optimal point?
# +
# TODO interpret the plot and discuss with your group
# -
# ## Exercise 2e: Numerically, search the grid of computed loss values `loss_GH` and determine the MAP value of weight vector
# +
# TODO solve this cell
# Hint: you might find it easier to flatten each array of shape (G,H) into shape (L,) where L=G*H
loss_L = loss_GH.flatten()# new shape (G*H,)
w0_L = w0_GH.flatten() # new shape (G*H,)
w1_L = w1_GH.flatten() # new shape (G*H,)
# TODO find values of w0 (intercept) and w1 (slope) that minimize the loss
# -
# # Part 3: Gradients, Hessians, and Gradient Descent
#
# ### Gradient and Hessian formulas
#
# We saw in lecture that we can compute the gradient and Hessian as:
#
# \begin{align}
# \nabla_w \mathcal{L} &= \Phi^T ( \sigma(\Phi w) - t ) + \alpha w
# \\
# \nabla_w \nabla_w \mathcal{L} &= \Phi^T R(w) \Phi + \alpha I_M
# \end{align}
#
# where $R$ is a diagonal matrix given by
#
# $$
# R = \text{diag}( \sigma(\Phi w) \sigma(- \Phi w ) )
# $$
#
# The functions below compute the gradient and Hessian. You don't need to do anything, just inspect them to gain understanding.
def calc_R(w_M, phi_NM):
s_N = np.dot(phi_NM, w_M)
R_NN = np.diag( sigmoid(s_N) * sigmoid(-s_N) )
return R_NN
def calc_gradient_of_map_loss(w_M, phi_NM, t_N, alpha):
''' Calculate the gradient.
Returns
-------
g_M : 1D array, shape (M,)
Gradient vector evaluated at current weights w
'''
# Compute predicted probability of positive class
yproba_N = sigmoid( np.dot(phi_NM, w_M) )
return np.dot(phi_NM.T, (yproba_N - t_N)) + alpha * w_M
def calc_hessian_of_map_loss(w_M, phi_NM, t_N, alpha):
''' Calculate the Hessian.
Returns
-------
H_MM : 2D array, shape (M,M)
Hessian matrix evaluated at current weights w
'''
R_NN = calc_R(w_M, phi_NM)
return np.dot(phi_NM.T, np.dot(R_NN, phi_NM)) + alpha * np.eye(M)
# ## First-order gradient descent
#
# The code below performs 1st-order GD.
#
# While not converged, we perform the updates:
#
# $$
# w_{t+1} \gets w_t - \epsilon g( w_t )
# $$
# +
max_n_steps = 100
w_M = 1.5 * np.ones(M)
step_size = 0.2 # Selected by starting at 1.0, and trying smaller values until first 5 steps made loss better
GD1_history_of_w = [w_M]
GD1_history_of_loss = [calc_loss(w_M, phi_NM, t_N, true_alpha)]
for step in range(max_n_steps):
# Compute gradient
g_M = calc_gradient_of_map_loss(w_M, phi_NM, t_N, true_alpha)
# Update the weights by taking a step downhill
w_M = w_M - step_size * g_M
# Print out progress
cur_loss = calc_loss(w_M, phi_NM, t_N, true_alpha)
print("step %3d/%d loss %11.4f | gradient_norm %9.4f | intercept %9.3f | slope %9.3f" % (
step, max_n_steps, cur_loss, np.sum(np.abs(g_M)), w_M[0], w_M[1]))
GD1_history_of_loss.append(cur_loss)
GD1_history_of_w.append(w_M)
if step % 10:
step_size = 0.95 * step_size # slowly decay the step size
bestw_fromGD_M = w_M
# -
# ## Discussion 3a: Compare the GD estimate of the best weights $w$ to those found via grid search
#
# +
print("Optimal weights via grid search")
bestid = np.argmin(loss_GH.flatten())
bestw_fromgridsearch_M = np.asarray([w0_GH.flatten()[bestid], w1_GH.flatten()[bestid]])
print(bestw_fromgridsearch_M)
print("Optimal weights via 1st order gradient descent")
print(w_M)
# -
calc_loss(bestw_fromgridsearch_M, phi_NM, t_N, true_alpha)
calc_loss(bestw_fromGD_M, phi_NM, t_N, true_alpha)
# +
# TODO discuss: which is better? are they similar?
# -
# # Second order gradient descent
# +
max_n_steps = 100
w_M = 1.5 * np.ones(M)
step_size = 0.15 # Selected by starting at 1.0, and trying smaller values until first 5 steps made loss better
GD2_history_of_w = [w_M]
GD2_history_of_loss = [calc_loss(w_M, phi_NM, t_N, true_alpha)]
for step in range(max_n_steps):
g_M = calc_gradient_of_map_loss(w_M, phi_NM, t_N, true_alpha)
H_MM = calc_hessian_of_map_loss(w_M, phi_NM, t_N, true_alpha)
w_M = w_M - step_size * np.linalg.solve(H_MM, g_M) # compute H^1 times g
cur_loss = calc_loss(w_M, phi_NM, t_N, true_alpha)
print("step %3d/%d loss %11.4f | gradient_norm %9.4f | intercept %9.3f | slope %9.3f" % (
step, max_n_steps, cur_loss, np.sum(np.abs(g_M)), w_M[0], w_M[1]))
GD2_history_of_loss.append(cur_loss)
GD2_history_of_w.append(w_M)
if step % 10:
step_size = 0.95 * step_size # slowly decay step size
# -
whistory_GD1_T2 = np.vstack(GD1_history_of_w)
whistory_GD2_T2 = np.vstack(GD2_history_of_w)
# +
# Create a pretty contour plot over the grid of w[0], w[1], loss values
levels = np.linspace(0, 40, 51) # 50 evenly spaced levels
fig_handle, ax_handle = plt.subplots(nrows=1, ncols=1, figsize=(8,8));
ax_handle.contour(w0_GH, w1_GH, loss_GH, levels=levels, linewidths=0, colors='k')
cntrf_handle = ax_handle.contourf(w0_GH, w1_GH, loss_GH, levels=levels, cmap='RdBu_r', vmin=levels[0], vmax=levels[-1]);
cbar = plt.colorbar(cntrf_handle, ax=ax_handle)
cbar.set_label('MAP loss objective (lower is better)', fontsize=16);
cbar.set_ticks(levels[::10]);
# Show the first 10 iterates of GD
ax_handle.plot(whistory_GD1_T2[:1,0], whistory_GD1_T2[:1,1], 'kx', markersize=15, label='Initial w value')
ax_handle.plot(whistory_GD1_T2[:10,0], whistory_GD1_T2[:10,1], 'ks-', label='First 10 steps of 1st-order GD')
ax_handle.plot(whistory_GD2_T2[:10,0], whistory_GD2_T2[:10,1], 'ms-', label='First 10 steps of 2st-order GD')
plt.xlabel('intercept $w_1$');
plt.ylabel('slope $w_2$');
plt.gca().set_aspect('equal', 'box');
plt.ylim([-3, 2]);
plt.legend(loc='upper left');
# -
# ## Discussion: Compare the *first step* that 1st-order GD took to the *first step* that 2nd-order GD took.
#
# * Which is a better *direction*?
# * Which ended up closer to the optimal value because of well-chosen step-length?
# ## Discussion: Compare the overall behavior of 1st and 2nd order GD.... do you see big qualitative differences?
#
# * Which one makes faster progress toward the minimum?
# * How are both sensitive to the step-size choice?
| notebooks/day09-ProbabilisticLogisticRegressionAndMAPEstimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Some7hing0riginal/lighthouselabs-midterm1/blob/main/Mid_Term_Light_Version_v2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="B7NtcKNLVEqM" outputId="02af4e86-9ca8-425b-b5fc-81bdad2232f3"
import numpy as np
import pandas as pd
from datetime import datetime, timezone
import time
import pytz
from sqlalchemy import create_engine
from oauth2client.client import GoogleCredentials
from google.colab import drive
from google.colab import auth
auth.authenticate_user()
import gspread
zone_mtl = pytz.timezone('America/Montreal')
date_mtl = datetime.now(zone_mtl)
date_mtl
gs_row=1
gs_col=1
gc = gspread.authorize(GoogleCredentials.get_application_default())
wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/1C5CKkVvInTiP9XRtuSjjgvSDu-a7RNgwhNQtjAYrWXI/edit#gid=0')
worksheet = wb.add_worksheet(title='LOG-'+str(date_mtl), rows="100", cols="20")
drive.mount('/gdrive') # you may have to authentificate , follow the process of the browser window
#change this
# file_path = '/gdrive/MyDrive/lhl/Mid_Term/flights_2019.csv'
training_file_path ='/gdrive/MyDrive/lhl/Mid_Term/flights_18-19_OCT_NOV_DEC_RIGHT_BEFORE_TRAINING.csv'
test_file_path = '/gdrive/MyDrive/lhl/Mid_Term/flights_test_final_df.csv'
# df = pd.read_csv(file_path,header=0,skiprows=range(1,6000000),nrows=3000000)
df = pd.read_csv(training_file_path,index_col=0)#
df_test = pd.read_csv(test_file_path,index_col=0)#
filter_forDF_withDelay=['destPrecipMM','origPrecipMM','dayofweek_num','origVisibility','destVisibility','origAvgTempC','destAvgTempC','dest_airport_id','origin_airport_id','tail_num','distance',"crs_dep_time",'arr_delay']
df_test=df_test[['destPrecipMM','origPrecipMM','dayofweek_num','origVisibility','destVisibility','origAvgTempC','destAvgTempC','dest_airport_id','origin_airport_id','tail_num','distance',"crs_dep_time"]]
# + id="9s8FkmBZRSdo"
df=df[filter_forDF_withDelay]
# + colab={"base_uri": "https://localhost:8080/"} id="a3eNHTcXRgu0" outputId="513356d1-478e-4777-b2fb-a818dd7b80d2"
df.info()
# + colab={"base_uri": "https://localhost:8080/"} id="oITPQI38QbHv" outputId="9b1fd739-d439-42b5-a46f-bee44a93e5fb"
df_test.info()
# + [markdown] id="lgEHKXI_K4IU"
# # correlation
# + id="JubcIX09K0fp" colab={"base_uri": "https://localhost:8080/", "height": 729} outputId="ce55f2c0-fdf3-42d5-f061-b9dd175d9f1e"
code_block_name = "Correlation HeatMap "
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col,code_block_name)
worksheet.update_cell(gs_row,gs_col+1,'Started')
worksheet.update_cell(gs_row,gs_col+2,str(dateTimeObj))
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#dfFinal = df[['origwindspeedKmph','dayofweek_num','destwindspeedKmph','destPrecipMM','origPrecipMM','origVisibility','destVisibility','destWeatherCode','origWeatherCode','origAvgTempC','destAvgTempC','dest_airport_id','origin_airport_id','tail_num','distance','crs_elapsed_time',"crs_dep_time", "crs_arr_time", "fl_date", "origin", "dest", "arr_delay"]]
corrMatrix = df.corr()
# ndf = df.loc[df.max(axis=1) > 0.30, df.max(axis=0) > 0.30]
# sns.heatmap(ndf)
# plt.show()
# sn.set(rc = {'figure.figsize':(10,6)})
# sn.heatmap(corrMatrix, annot=False)
plt.figure(figsize=(30, 20))
mask = np.triu(np.ones_like(corrMatrix, dtype=np.bool))
cut_off = 0.05 # only show cells with abs(correlation) at least this value
extreme_1 = 0.65 # show with a star
extreme_2 = 0.85 # show with a second star
extreme_3 = 0.95 # show with a third star
mask |= np.abs(corrMatrix) < cut_off
corrMatrix = corrMatrix[~mask] # fill in NaN in the non-desired cells
remove_empty_rows_and_cols = True
if remove_empty_rows_and_cols:
wanted_cols = np.flatnonzero(np.count_nonzero(~mask, axis=1))
wanted_rows = np.flatnonzero(np.count_nonzero(~mask, axis=0))
corrMatrix = corrMatrix.iloc[wanted_cols, wanted_rows]
annot = [[f"{val:.4f}"
+ ('' if abs(val) < extreme_1 else '\n★') # add one star if abs(val) >= extreme_1
+ ('' if abs(val) < extreme_2 else '★') # add an extra star if abs(val) >= extreme_2
+ ('' if abs(val) < extreme_3 else '★') # add yet an extra star if abs(val) >= extreme_3
for val in row] for row in corrMatrix.to_numpy()]
heatmap = sns.heatmap(corrMatrix, vmin=-1, vmax=1, annot=annot, fmt='', cmap='BrBG')
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize': 18}, pad=16)
plt.show()
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col+3,'Finished')
worksheet.update_cell(gs_row,gs_col+4,str(dateTimeObj))
gs_row += 1
# + [markdown] id="Jvsn9wT2K6Ob"
# # Distribution
#
# + colab={"base_uri": "https://localhost:8080/", "height": 609} id="7fwh87lcTVn7" outputId="f336a9ce-ffd6-4819-e8dd-1f025f65c9aa"
dfFinal
# + id="pkJlVQfCKxSV" colab={"base_uri": "https://localhost:8080/", "height": 549} outputId="f4ef1dc8-16a9-492c-9bff-926b0759ce4d"
code_block_name = "Distribution Graph "
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col,code_block_name)
worksheet.update_cell(gs_row,gs_col+1,'Started')
worksheet.update_cell(gs_row,gs_col+2,str(dateTimeObj))
#histogram to visualize different distributions.
#regular, Log, Zoom close to 0, Big zoom close to 0.
fig, (ax1, ax2, ax3,ax4,ax5) = plt.subplots(1, 5,figsize=(20, 10))
ax1.set_xlabel('time in minutes')
ax1.set_ylabel('number of')
ax1.set_title('regular')
ax1.hist(df['arr_delay'], bins=100)
ax2.set_xlabel('time in minutes')
ax2.set_ylabel('number of')
ax2.set_title('Log')
ax2.hist(np.log(df['arr_delay'].clip(1,1000)), bins=100)
# to make the model work better, need normal distribution, so think about logging what will be predicted.
#Then inverse of the log. take the exponent. np.exp. ( order of the delay, minutes, hours, days )
#np.exp(log) = arrival delay
ax3.set_xlabel('Delay in minutes')
ax3.set_ylabel('Number of Flights')
ax3.set_title('Zoom around Zero Delay')
ax3.hist(df['arr_delay'], bins=100)
ax3.set_xlim(-100, 300)
fig.suptitle('Flight Delays in minutes', fontsize=30)
ax4.set_xlabel('time in minutes')
ax4.set_ylabel('number of')
ax4.set_title('Bigger Zoom around Zero Delay')
ax4.hist(df['arr_delay'], bins=1000)
ax4.set_xlim(-50, 50)
fig.suptitle('Flight Delays in minutes', fontsize=30)
fig.show
ax5.set_xlabel('time in minutes')
ax5.set_ylabel('number of')
ax5.set_title('Bigger Zoom around Zero Delay')
ax5.hist(df['arr_delay'], bins=1000)
ax5.set_xlim(-50, 50)
fig.suptitle('Flight Delays in minutes', fontsize=30)
fig.show
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col+3,'Finished')
worksheet.update_cell(gs_row,gs_col+4,str(dateTimeObj))
gs_row += 1
# + id="LOq-yV3nVMKV"
def train_test_split(df,features_list,target,gs_row,worksheet):
from sklearn.model_selection import train_test_split# Import train_test_split function
gs_col =1
code_block_name = "train_test_split"
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col,code_block_name)
worksheet.update_cell(gs_row,gs_col+1,'Started')
worksheet.update_cell(gs_row,gs_col+2,str(dateTimeObj))
X=df[features_list] # Features
y=df[target] # Target
import statsmodels.api as sm
X = sm.add_constant(X)
print(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # 70% training and 30% test
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col+3,'Finished')
worksheet.update_cell(gs_row,gs_col+4,str(dateTimeObj))
return X_train, X_test, y_train, y_test
def train_model(X_train,y_train,gs_row,worksheet):
gs_col =1
code_block_name = "train_model sm.OLS(y_train,X_train)"
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col,code_block_name)
worksheet.update_cell(gs_row,gs_col+1,'Started')
worksheet.update_cell(gs_row,gs_col+2,str(dateTimeObj))
import statsmodels.api as sm
print("lin_reg = sm.OLS(X_train,y_train)")
lin_reg = sm.OLS(y_train.astype(float),X_train.astype(float)) #OLS is the most popular.
print("model = lin_reg.fit()")
model = lin_reg.fit()
print("print_model = model.summary()")
print_model = model.summary()
print(print_model)
from sklearn.linear_model import LinearRegression
gs_col =1
code_block_name = "train_model regressor = LinearRegression()"
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col,code_block_name)
worksheet.update_cell(gs_row,gs_col+1,'Started')
worksheet.update_cell(gs_row,gs_col+2,str(dateTimeObj))
regressor = LinearRegression()
regressor.fit(X_train, y_train)
print("regressor.coef_")
print(regressor.coef_)
print(regressor.score(X_train,y_train))
gs_col =1
code_block_name = "train_model = RandomForestRegressor()"
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col,code_block_name)
worksheet.update_cell(gs_row,gs_col+1,'Started')
worksheet.update_cell(gs_row,gs_col+2,str(dateTimeObj))
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(X_train, y_train)
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col+3,'Finished')
worksheet.update_cell(gs_row,gs_col+4,str(dateTimeObj))
return model
def predict_results(model,X_train,gs_row,worksheet):
gs_col =1
code_block_name = "predict_results"
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col,code_block_name)
worksheet.update_cell(gs_row,gs_col+1,'Started')
worksheet.update_cell(gs_row,gs_col+2,str(dateTimeObj))
y_pred=model.predict(X_train)
dateTimeObj = datetime.now()
worksheet.update_cell(gs_row,gs_col+3,'Finished')
worksheet.update_cell(gs_row,gs_col+4,str(dateTimeObj))
return y_pred
def score_this(model,y_train,y_pred,features_list,gs_row,worksheet):
gs_col =1
#Import scikit-learn metrics module for accuracy calculation
from sklearn.metrics import r2_score
# Model Accuracy, how often is the classifier correct?
ModelAccuracy =("Accuracy:",r2_score(y_train, y_pred))
#Adj_r2 = 1 - (1-r2_score(y_test, y_pred)) * (len(y)-1)/(len(y)-X.shape[1]-1)
import time
import numpy as np
from sklearn import metrics
Mean_Absolute_Error = metrics.mean_absolute_error(y_train, y_pred)
Mean_Squared_Error = metrics.mean_squared_error(y_train, y_pred)
Root_Mean_Squared_Error = np.sqrt(metrics.mean_squared_error(y_train, y_pred))
print("Mean_Absolute_Error")
print(Mean_Absolute_Error)
print("Mean_Squared_Error")
print(Mean_Absolute_Error)
print("Root_Mean_Squared_Error")
print(Root_Mean_Squared_Error)
worksheet.update_cell(gs_row,gs_col,str(ModelAccuracy))
worksheet.update_cell(gs_row+1,gs_col,str(Mean_Absolute_Error))
worksheet.update_cell(gs_row+2,gs_col,str(Mean_Squared_Error))
worksheet.update_cell(gs_row+3,gs_col,str(Root_Mean_Squared_Error))
worksheet.update_cell(gs_row+4,gs_col,str(len(df)))
worksheet.update_cell(gs_row+5,gs_col,str(list(features_list)))
worksheet.update_cell(gs_row+6,gs_col,str(model.get_params()))
# Evaluating the Algorithm
start_time = time.time()
importances = model.feature_importances_
std = np.std([
tree.feature_importances_ for tree in model.estimators_], axis=0)
elapsed_time = time.time() - start_time
print(f"Elapsed time to compute the importances: "
f"{elapsed_time:.3f} seconds")
import pandas as pd
print("features_list.insert(0, 'const')")
features_list.insert(0, 'const')
print("NEXT LINE IS forest_importances = pd.Series(importances, index=features_list)")
print(" this number of features " + str(len(features_list)))
forest_importances = pd.Series(importances, index=features_list)
print("features_list.pop(0)")
features_list.pop(0)
fig, ax = plt.subplots()
forest_importances.plot.bar(yerr=std, ax=ax)
ax.set_title("Feature importances using MDI")
ax.set_ylabel("Mean decrease in impurity")
fig.tight_layout()
plt.show()
from sklearn import metrics
#Simple calculation of Adj. R2
from datetime import datetime
return ModelAccuracy,Mean_Absolute_Error,Mean_Squared_Error,Root_Mean_Squared_Error
# def filter_this(df,arr_delay_cutoff,gs_row,worksheet):
# cancelled_filter = (
# (df['cancelled'] !=0) |
# (df['diverted'] !=0 ) |
# (df['arr_delay'] >arr_delay_cutoff )
# )
# df.drop(df[cancelled_filter].index,inplace=True)
# return df
# + id="MRogMINhVSqX" colab={"base_uri": "https://localhost:8080/"} outputId="168c730a-35f9-4e31-8805-0fc3ab08ff92"
dateTimeObj = datetime.now()
import matplotlib.pyplot as plt
import copy
features_list_o=filter_forDF_withDelay
# features_list_o=['dest_airport_id','origin_airport_id','c_crs_arr_hour','c_crs_dep_hour','dayofweek_num','haul','origwindspeedKmph','destwindspeedKmph','destPrecipMM','origPrecipMM','origVisibility','destVisibility','destWeatherCode','origWeatherCode','origAvgTempC','tail_num']
#features_list_o=(['c_crs_arr_hour','c_crs_dep_hour','origwindspeedKmph'])#,'destwindspeedKmph','destPrecipMM','origPrecipMM','origVisibility','destVisibility','dayofweek_num','destWeatherCode','origWeatherCode','tail_num','month'])
target=['arr_delay']
arr_delay_cutoff_list=150 # The loop will loop thru the different delay types.
months_list=[12]
wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/1D6jakWCOJjWmntTMr8erIS8zCYGP_WYNPOjRWRgHLCQ/edit#gid=0')
#check ça df[~df.country.isin(countries_to_keep)]
# df_filter = (
# df['month']!=12 #enter the month you want to keep ( applies to both years )
# )
# df.drop(df[df_filter].index,inplace=True)
zone_mtl = pytz.timezone('America/Montreal')
date_mtl = datetime.now(zone_mtl)
gc = gspread.authorize(GoogleCredentials.get_application_default())
wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/1D6jakWCOJjWmntTMr8erIS8zCYGP_WYNPOjRWRgHLCQ/edit#gid=0')
tabName = 'Jo-Training-'# Change the name so we can figure out what is what.
worksheet = wb.add_worksheet(title=tabName+str(arr_delay_cutoff_list)+" TS "+str(date_mtl), rows="1000", cols="20")
gs_row = 1
gs_col = 1
print(arr_delay_cutoff_list)
features_list = copy.deepcopy(features_list_o)
# cancelled_filter = (
# # (df['cancelled'] !=0) |
# # (df['diverted'] !=0) |
# # (df['arr_delay'] >arr_delay_cutoff_list)
# )
# df.drop(df[cancelled_filter].index,inplace=True)
# df = filter_this(df,arr_delay_cutoff_list[i],gs_row,worksheet)
dflen=len(df)
worksheet.update_cell(gs_row,gs_col,
'Delay >' + str(arr_delay_cutoff_list))
gs_row += 1
worksheet.update_cell(gs_row,gs_col,
'Month = ' +str(months_list))
gs_row += 1
worksheet.update_cell(gs_row,gs_col,
'DF number of rows ' + str(dflen))
gs_row += 1
wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/1D6jakWCOJjWmntTMr8erIS8zCYGP_WYNPOjRWRgHLCQ/edit#gid=0')
gs_row += 1
worksheet.update_cell(gs_row,gs_col,
'Number of features ' + str(len(features_list)))
gs_row += 1
gc = gspread.authorize(GoogleCredentials.get_application_default())
X_train, X_test, y_train, y_test = train_test_split(df,features_list,target,gs_row,worksheet)
gs_row += 1
model = train_model(X_train,y_train,gs_row,worksheet)
gs_row += 1
y_pred = predict_results(model,df_test,gs_row,worksheet)
save_to = '/gdrive/MyDrive/lhl/Mid_Term/y_pred.csv'
dfFinal.to_csv(save_to)
import pickle
model_save_name='Testing Model.sav'
with open(f"/gdrive/MyDrive/lhl/Mid_Term/{model_save_name}", 'wb') as f:
pickle.dump(model, f)
gs_row += 1
ModelAccuracy = score_this(model,y_train,y_pred,features_list,gs_row,worksheet)
gs_row += 8
import pickle
model_save_name='Testing Model.sav'
with open(f"/gdrive/MyDrive/lhl/Mid_Term/{model_save_name}", 'wb') as f:
pickle.dump(model, f)
| notebooks/Mid_Term_Light_Version_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Plotly
#
# ---
# **Overview.** We introduce and apply a new and exciting graphics package [plotly](https://plot.ly/python/). We show how we can leverage our knowledge of Matplotlib to jumpstart our usage of plotly. We then show how to access some of [plotly's unique features](https://plot.ly/python/) to do things that are difficult or impossible with our knowledge of matplotilb.
#
# **Outline**
#
# - [Loading data](#data): `pd.read_html` example to get latitude and longitude coordinates for country capitals
# - [Matplotlylib](#matplotlylib): converting a matplotlib figure to a plotly figure
# - [Plotly api](#api): covers the basics of plotly's own api
# - [Maps](#maps): building maps with plotly
#
#
# **Note: requires internet access to run.**
#
# This Jupyter notebook was created by <NAME>, <NAME>, and <NAME> for the NYU Stern course [Data Bootcamp](http://databootcamp.nyuecon.com/).
#
# ---
# ## Reminders
#
# * Packages. Collections of tools that extend Python's capabilities. We add them with `import` statements.
# * `conda` and `pip`: package managers for python. Install new packages using `conda install package_name` or `pip3 install package name`.
#
# We will need to have the plotly python package installed. To do this enter the
# following from the command line (command prompt on windows, terminal on mac):
#
# ```
# pip install plotly --upgrade
# pip install cufflinks
# conda install -c anaconda pandas-datareader=0.2.1
# conda install -c anaconda html5lib=0.999
# conda install -c anaconda lxml=3.7.3
# ```
#
# Once you've done that, come back to this notebook and run the following cell to
# make sure plotly is installed properly.
# +
import numpy as np # foundation for Pandas
import pandas as pd # data package
from pandas_datareader import wb, data as web # worldbank data
import html5lib
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import seaborn.apionly as sns # fancy matplotlib graphics (no styling)
# plotly imports
import plotly # just to print version and init notebook
from plotly.offline import iplot, iplot_mpl # plotting functions
import plotly.graph_objs as go # ditto
# these lines make our graphics show up in the notebook
# %matplotlib inline
plotly.offline.init_notebook_mode(connected=True)
import cufflinks as cf # gives us df.iplot that feels like df.plot
cf.set_config_file(offline=True, offline_show_link=False)
# check versions (overkill, but why not?)
print('Pandas version: ', pd.__version__)
print('Plotly version: ', plotly.__version__)
print('Today: ', dt.date.today())
# -
# ## Data Sources <a id=data></a>
#
# Before we get too far, we'll need some data. Let's get some now.
#
# First we will download national data from the World Bank for all countries in Europe. In order to put this data on a map we will need to have either the 3 letter ISO code for the country or latitude and longitude coordinates. We'll grab both here. This will be a little sophistocated, so bear with us.
#
# ### Latitude and Longitude data
#
# We show these steps here so that you can re-use this code if you want to do something similar in the future.
#
# * [Latitude and longitude coordinates](http://www.csgnetwork.com/llinfotable.html): `http://www.csgnetwork.com/llinfotable.html`
#
# * use `pd.read_html`: Read HTML tables into a ``list`` of ``DataFrame`` objects.
# - first arg: A URL, a file-like object, or **raw string containing HTML**
# - attrs : dict or None, optional
# * This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These attributes must be [valid](https://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2) HTML table attributes to work correctly. For example, `attrs = {'id': 'table'}` is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag
# - Right click + Inspect
#
# read a list of latitude and longitude coordinates for
# country capitals
lat_lon = pd.read_html("http://www.csgnetwork.com/llinfotable.html", header=0,
attrs={"align": "center", "cellpadding": 5, "bgcolor": "#FFFFFF"})[0]
lat_lon.head()
lat_lon.dtypes
lat_lon['Latitude'].str.split("°")
lat_lon['Latitude'].str.split("°").str.get(1)
#lat_lon['Latitude'].str.split("°").str.get(1).str.split("'")
#lat_lon['Latitude'].str.split("°").str.get(1).str.split("'").str[0].astype(float)
#lat_lon['Latitude'].str.split("°").str.get(1).str.split("'").str[0].astype(float)/60
#(lat_lon['Latitude'].str.split("°").str.get(1).str.split("'").str[0].astype(float)/60).astype(str).str.lstrip('0')
# If the RHS is S, set the sign negative
#lat_lon['Latitude'].str.split("°").str.get(1).str.split("'").str[1] == 'S'
(lat_lon['Latitude'].str.split("°").str.get(1).str.split("'").str[1] == 'S').replace({True: '-', False: ""})
# +
# clean up so lat and long are numeric in degrees east and degrees north
def clean_latlon(series, to_negate):
# get XX.YY data
split1 = series.str.split("°")
split2 = split1.str.get(1).str.split("'")
data = split1.str.get(0) + (split2.str.get(0).astype(float)/60).astype(str).str.lstrip('0')
# now add a negative side if last character == to_negate
signs = split2.str.get(1) == to_negate
signs = signs.replace({True: "-", False: ""})
data = signs.str[:] + data.str[:]
return data.astype(float)
lat_lon["Latitude"] = clean_latlon(lat_lon["Latitude"], "S")
lat_lon["Longitude"] = clean_latlon(lat_lon["Longitude"], "W")
lat_lon = lat_lon.drop("Capital", axis=1)
#lat_lon = lat_lon.set_index("Country")
lat_lon.head()
# -
# ### ISO codes
#
# Access the ISO codes from [this website]('https://unstats.un.org/unsd/methodology/m49/')
#
# `'https://unstats.un.org/unsd/methodology/m49/'`
# +
#import ssl
#ssl._create_default_https_context = ssl._create_unverified_context
# -
import requests
from bs4 import BeautifulSoup
# dataframe of country names and iso codes
url = 'https://unstats.un.org/unsd/methodology/m49/'
iso_raw = requests.get(url)
iso_soup = BeautifulSoup(iso_raw.content, 'html.parser')
iso_soup.find_all('table')[0]
# Efficient way of scraping: if the html contains 'table' tags, we can pass it straight to 'pd.read_html' (sometimes it doesn't work with the url).
# +
# #pd.read_html?
# -
iso = pd.read_html(str(iso_soup.find_all('table')[0]), header=0)
print(iso)
iso = pd.read_html(str(iso_soup.find_all('table')[0]), header=0)[0]
iso.shape
iso = iso.rename(columns={"ISO-alpha3 code": "ISO", "Country or Area": "Country"})
iso = iso.drop("M49 code", axis=1)
#iso = iso.set_index("Country")
iso.head()
iso.tail()
# ### Publications (scientific and technical journal articles)
# select 44 european countries
europe = ["Albania", "Andorra", "Armenia", "Austria", "Azerbaijan", "Belarus",
"Belgium", "Bosnia and Herzegovina", "Bulgaria", "Croatia", "Cyprus",
"Czech Republic", "Denmark", "Estonia", "Finland", "France", "Georgia",
"Germany", "Greece", "Hungary", "Iceland", "Ireland", "Italy",
"Kazakhstan", "Kosovo", "Latvia", "Liechtenstein", "Lithuania",
"Luxembourg", "Macedonia", "Malta", "Moldova", "Monaco", "Montenegro",
"Netherlands", "Norway", "Poland", "Portugal", "Romania", "Russia",
"San Marino", "Serbia", "Slovakia", "Slovenia", "Spain", "Sweden",
"Switzerland", "Turkey", "Ukraine", "United Kingdom", "Vatican City"]
iso[iso['Country'].isin(europe)]
# +
def euro_wb_data(indicators, year=2013): # get data from worldbank
iso_europe = iso[iso['Country'].isin(europe)]
# IP.JRN.ARTC.SC is "scientific and technical journal articles"
# NOTE: visit the world bank website to pick a different subject if you'd like
# link: http://data.worldbank.org/indicator
df = wb.download(country=iso_europe["ISO"], indicator=indicators,
start=year, end=year)
df = df.reset_index(level="year")
df.index.name = "Country"
# some countries didn't have data. Drop them now
df = df.dropna()
return df
papers = euro_wb_data(["IP.JRN.ARTC.SC"])
papers.rename(columns={"IP.JRN.ARTC.SC": "publications"}, inplace=True)
papers = papers.reset_index()
papers.head()
# -
# ### Merge
papers = pd.merge(papers, iso, on = 'Country', how='left')
papers.head()
papers.shape
papers = pd.merge(papers, lat_lon, on = 'Country', how='left')
papers.head()
papers.shape
papers = papers.set_index('Country')
papers.head()
# ## Other data sources
# +
# Earnings by school and gender. Source plotly docs. Real source, unknown
url = "https://raw.githubusercontent.com/plotly/datasets/master/school_earnings.csv"
earnings = pd.read_csv(url)
print(earnings.head())
earnings = earnings.set_index("School")
earnings = earnings.sort_values("Women")
print("\n\nAfter set_index and sort_values:\n")
print(earnings.head())
# -
# tips at restaurants in NYC. Source unknown, but classic dataset
tips = sns.load_dataset("tips")
tips.head()
# info on titanic passengers. Source unknown.
titanic = sns.load_dataset("titanic")
titanic.head()
# ## Background
#
# Plotly is a **javascript based plotting library**. Plotly leverages industry grade
# javascript technologies to provide great flexibility and good performance.
#
#
# Being a javascript library, plotly graphics are **inherently interactive** meant to be
# viewed in a webbrowser. The good news is that **we can embed our interactive
# plots in any website:** Jupyter notebooks, blog posts, etc. The *great* news is
# that we don't have to write any javascript ourselves!
#
# The plotly project was started about five years ago. Over that time, plotly has
# transitioned between three phases:
#
# 1. Online only mode: plotly started as a web service where you uploaded your
# data and constructed plots on their website.
# 2. Online plotting mode: the next phase was to allow you to build plots from
# your favorite programming language (Python!), but the plots were actually
# created on their servers and you were given a link to view the plot on their
# website.
# 3. Offline mode: You can now construct plotly graphics 100% offline. While all
# three modes still exist, we will use the purely offline mode in our notes.
# ## Matplotlylib <a id=matplotlylib></a>
#
# As a warmup, let's utilize our expertise of Matplotlib to quickly generate some basic plotly graphics.
#
# The main steps in this process are:
#
# 1. Construct a matplotlib figure as we normally do.
# 2. Pass the `Figure` object (usually named `fig` in our examples) to the function `iplot_mpl`.
#
# That's it!
#
# **Disclaimer**: the functions that convert matplotlib figures to plotly figures are not perfect. We'll see some issues below, but will show how to build the plots using plotly's API so they look as we expect.
#
# We'll start by looking at some examples from the seaborn documentation. The actual figures are not important here. We are mostly concerned with how well matplotlylib can take a matplotlib figure and construct a plotly figure.
ax = sns.swarmplot(x="day", y="total_bill", data=tips)
fig_mpl = ax.get_figure()
# Now let's convert our Matplotlib figure `fig_mpl` into a plotly figure named `fig_py`. To do this we will use the function `iplot_mpl` as follows:
iplot_mpl(fig_mpl)
# For this example we see that the converter did a decent job, though it didn't quite get the xlabels correct
# Let's try another example
out = sns.pointplot(x="class", y="survived", hue="sex", data=titanic,
palette={"male": "g", "female": "m"},
markers=["^", "o"], linestyles=["-", "--"]);
iplot_mpl(out.get_figure())
# What worked well in this conversion? What didn't work?
# Let's do one more example using the college graduate data
fig_mpl, ax = plt.subplots(figsize=(6, 10))
earnings.plot.barh(ax=ax, y="Men", color="Blue")
earnings.plot.barh(ax=ax, y="Women", color="Pink")
iplot_mpl(fig_mpl)
# What did and didn't work here?
#
# Below we'll recreate this same figure using plotly's api and overcome these issues
# ## Plotly API <a id=api></a>
#
# Let's now consider how to use plotly's own API to construct plots instead of
# building the graphics through matplotlib.
#
# Plotly has over 20 core chart types and many more can be created by combining one or more chart types in the same figure. We don't have time to cover all of them here, but please check out the [documentation](https://plot.ly/python/).
# Plotly has a purely **declarative** API. This means that we describe all the
# features we want in our figure at once, without worrying about which functions
# to call in what order.
#
# The plotly can achieve this is by fully describing the plot in a data format called JSON.
# For our purposes we can think of JSON as dictionaries, where values can be of any type,
# including other dictionaries.
# Plotly figures are composed of two things:
#
# 1. A list of **trace**s that describe the data that will be plotted and how it should appear
# 2. A **layout** that describes features of the overall plot. Things like ticks, labels, titles, fonts, margins, etc.
#
# The `trace`s describe the data that should be plotted as well as how it should be displayed. Here's an example of a trace defining a scatter plot:
#
# ```python
# trace = dict(type="scatter", # trace type
# x=[1, 2, 3], # x data
# y=[1, 4, 9], # y data
# name="Squares" # legend label
# )
# ```
#
# In this example, `x`, `y`, `name` and `marker` are called the **attributes** of the trace. All traces have a `type` attribute that describes the type of chart to generate for a particular piece of data. Here we chose `scatter`, which is what plotly calls scatter plots or line plots.
#
# An example of a `layout` is
#
# ```python
# l = dict(title="Penguins food", # plot title
# yaxis=dict(title="Quantity (%)", # yaxis label
# range=(0, 1) # set limits for y axis
# )
# ```
#
# Notice that the value associated with `yaxis` had type `dict`. This allowed us to control features of the yaxis.
#
# For an overwhelmingly comprehensive overview of all trace types and their associated attributes (everything plotly can do) see the chart attribute [reference](https://plot.ly/python/reference/) in the plotly python documentation
# After we have defined one or more traces and a layout, we build the plotly figure using the function `plotly.graph_objs.Figure`. The imports up top allow us to refer to this function as `go.Figure`. This is how we call the function:
#
# ```python
# fig = go.Figure(data=D, layout=L)
# ```
#
# where `D` is a list of **traces** and `L` describes the **layout**.
# Finally, the last thing we need to know how to do is display the figure. In the notebook we will use the function `plotly.offline.iplot`, which we imported directly as `iplot`. To display the figure above we would do
#
# ```python
# iplot(fig)
# ```
# To get a feel for what this looks like, let's revisit the horizontal bar chart using the college grad earnings data.
#
# Here's how we might build that figure using plotly's API
# the long way. Construct all the dicts by hand
men = dict(type="bar", # trace type
orientation="h", # make bars horizontal
name="Men", # legend entry
x=earnings["Men"], # x data
y=earnings.index, # y data
marker={"color": "Blue"} # blue bars
)
women = dict(type="bar", # trace type
orientation="h", # horizontal bars
name="Women", # legend entry
x=earnings["Women"], # x data
y=earnings.index, # y data
marker={"color": "Pink"} # pink bars
)
layout = dict(width=650, height=750, # plot width/height
yaxis={"title": "School"}, # yaxis label
title="Gender earnings disparity", # title
xaxis={"title": "Annual Salary (thousands)"} # xaxis label
)
iplot(go.Figure(data=[men, women], layout=layout))
# **Exercise**: now generate a similar plot using the `df.iplot` method. (*Hint* you can reuse the `layout` object from above)
earnings[['Men', 'Women']].iplot(kind='barh', layout=layout)
# **Example: Dumbell plot**. The striking fact in the data is that there is a gap between earnings of men and women. To highlight that, here's another way we might visualize the same data:
# +
men2 = dict(type="scatter",
name="Men",
mode="markers", # draw dots
x=earnings["Men"], # x data
y=earnings.index, # y data
marker={"color": "Blue", "size": 12} # dot color/size
)
women2 = dict(type="scatter", name="Women", mode="markers",
x=earnings["Women"], y=earnings.index,
marker={"color": "Pink", "size": 12})
def draw_line(row):
sc = row.name
line = dict(type="scatter", # trace type
x=[row["Women"], row["Men"]], # x data
y=[sc, sc], # y data flat
mode="lines", # draw line
name=sc, # name trace
showlegend=False, # no legend entry
line={"color": "gray"} # line color
)
return line
lines = list(earnings.apply(draw_line, axis=1))
# use + for two lists
data = [men2, women2] + lines
# build and display the figure
fig = go.Figure(data=data, layout=layout)
iplot(fig)
# -
# **Exercise**: Look at the [figure attribute reference](https://plot.ly/python/reference/) and figure out how to remove the grid lines from the figure above. First remove the veritcal ones, then horizontal, then both. (*Hint:* Look for an attribute on the `xaxis` and `yaxis` of the layout) (*Hint 2* you can get the layout by doing `fig.layout`)
fig.layout
# +
layout = fig.layout
layout['xaxis']['showgrid'] = False
layout['yaxis']['showgrid'] = False
fig = go.Figure(data=data, layout=layout)
iplot(fig)
# -
# ## Maps <a id=maps></a>
#
# We will finish this notebook by looking at some new things we can now do because we are using plotly.
# There are two map-based traces in plotly:
#
# - `scattergeo`: this allows you to draw lines or dots on a map
# - `choropleth`: this allows you to fill regions with different colors
#
# There is also the `geo` layout attribute. We'll look
# +
# first create layout/marker objects we can re-use in both plots
layout = dict(geo={"scope": "europe", "resolution": 50},
width=750, height=550)
marker = {"color": papers["publications"],
"size": papers["publications"]/5000,
"colorscale": "Reds",
"colorbar": {"title": "# of papers"}}
# -
# #### `scattergeo` dots
#
# We'll look at the `scattergeo` trace type first. Suppose we want to draw dots on the map. There are two possible sets of trace attributes we can work with:
#
# 1. Set `lat` and `lon` each to a list that specifiy the latitide and longitude for each point respectively
# 2. Set the `locations` to be one of `"ISO-3"`, `"USA-states"`, or `"country names"` and then set `location` to be a valid member of that mode.
#
# We can then set any other attributes
#
# Let's see an example of each version:
# +
# using location mode
trace = dict(type="scattergeo", # trace type
mode="markers", # draw points
locations=papers["ISO"], # use ISO code
marker=marker # marker settings (size, color, ...)
)
iplot(go.Figure(data=[trace], layout=layout), link_text="")
# +
# using lat/lon mode
trace = dict(type="scattergeo", # trace type
mode="markers", # draw dots
lat=papers["Latitude"], # latitude coordinate
lon=papers["Longitude"], # longitude coordinate
marker=marker # marker settings (color, size...)
)
iplot(go.Figure(data=[trace], layout=layout), link_text="")
# -
# #### `scattergeo` lines
#
# Now supppose we want to draw lines from the Italy to France, Germany, and Spain.
#
# We can do this using `scattergeo` traces, where the lat and lon attributes on each trace are of length 2. The point will be drawn from `(lat[0], lon[0])` to `(lat[1], lon[1])`:
papers["Latitude"][papers.ISO == "ITA"]
papers["Latitude"][papers.ISO == "ITA"].iloc[0]
# +
def get_lat_lon_for(df, iso):
lat = df["Latitude"][df["ISO"] == iso]
lon = df["Longitude"][df["ISO"] == iso]
return float(lat.iloc[0]), float(lon.iloc[0])
italy_lat, italy_lon = get_lat_lon_for(papers, "ITA")
traces = []
for country in ["FRA", "ESP", "DEU"]:
lat, lon = get_lat_lon_for(papers, country)
trace = dict(type="scattergeo", # trace type
mode="lines", # draw lines
lat=[italy_lat, lat], # latitude coordinates
lon=[italy_lon, lon], # longitude coordinates
line={"width": 4.0}, # thick lines
name="ITA to {}".format(country) # legend entry
)
traces.append(trace)
iplot(go.Figure(data=traces, layout=layout))
# -
# ### Choropleth
#
# The other type of map plotly can create is called a choropleth map. Here we have each region filled in with a solid color.
#
# Let's use our country data to see what this looks like
# +
trace = dict(type="choropleth",
locations=papers["ISO"], # use ISO names
z=papers["publications"], # defines the color
colorscale="Viridis", # change pallette
text=papers.index, # change text on hover
)
# reuse the same layout
iplot(go.Figure(data=[trace], layout=layout), link_text="")
# -
# ### USA States
#
# This example was taken directly from the plotly python documentation. See [here](https://plot.ly/python/choropleth-maps/#united-states-choropleth-map)
# +
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv')
for col in df.columns:
df[col] = df[col].astype(str)
scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\
[0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
df['text'] = df['state'] + '<br>' +\
'Beef '+df['beef']+' Dairy '+df['dairy']+'<br>'+\
'Fruits '+df['total fruits']+' Veggies ' + df['total veggies']+'<br>'+\
'Wheat '+df['wheat']+' Corn '+df['corn']
data = [ dict(
type='choropleth',
colorscale = scl,
autocolorscale = False,
locations = df['code'],
z = df['total exports'].astype(float),
locationmode = 'USA-states',
text = df['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = "Millions USD")
) ]
layout = dict(
title = '2011 US Agriculture Exports by State<br>(Hover for breakdown)',
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
iplot(go.Figure(data=data, layout=layout), link_text="")
# -
# ## Animations
#
# You can also embed animations into plotly charts. Here's another example from their documentation
# +
t=np.linspace(-1,1,100)
x=t+t**2
y=t-t**2
xm=np.min(x)-1.5
xM=np.max(x)+1.5
ym=np.min(y)-1.5
yM=np.max(y)+1.5
N=50
s=np.linspace(-1,1,N)
xx=s+s**2
yy=s-s**2
data=[dict(x=x, y=y,
mode='lines',
line=dict(width=2, color='blue')
),
dict(x=x, y=y,
mode='lines',
line=dict(width=2, color='blue')
)
]
layout=dict(xaxis=dict(range=[xm, xM], autorange=False, zeroline=False),
yaxis=dict(range=[ym, yM], autorange=False, zeroline=False),
title='Kinematic Generation of a Planar Curve', hovermode='closest',
updatemenus= [{'type': 'buttons',
'buttons': [{'label': 'Play',
'method': 'animate',
'args': [[]]}]}])
frames=[dict(data=[dict(x=[xx[k]],
y=[yy[k]],
mode='markers',
marker=dict(color='red', size=10)
)
]) for k in range(N)]
figure1=dict(data=data, layout=layout, frames=frames)
iplot(figure1)
# -
# ### Financial charts
#
# Another one from the docs https://plot.ly/python/candlestick-charts/#custom-candlestick-colors
# +
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import Line, Marker
from datetime import datetime
df = web.DataReader("aapl", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))
fig = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)
# Make increasing ohlc sticks and customize their color and name
fig_increasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='increasing', name='AAPL',
marker=Marker(color='rgb(150, 200, 250)'),
line=Line(color='rgb(150, 200, 250)'))
# Make decreasing ohlc sticks and customize their color and name
fig_decreasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='decreasing',
marker=Marker(color='rgb(128, 128, 128)'),
line=Line(color='rgb(128, 128, 128)'))
# Initialize the figure
fig = fig_increasing
# Add decreasing data with .extend()
fig['data'].extend(fig_decreasing['data'])
iplot(fig)
# -
| Code/notebooks/bootcamp_plotly_update.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/akash-agni/Machine_Learning/blob/master/Credit_Card_Customer_Churn_Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="_uH8BmqDcjIR"
# # Credit Card Customer Churn Analysis
# + [markdown] id="fZ-Y8xNHcjIk"
# Hello There!! Been a long time since I uploaded anything, might have some rough edges, apologies for that, do point out any issues in comments.
# + [markdown] id="FM2hr-LbcjIm"
# So this Dataset really drew my attention in last few days, as someone working in Finance, it seemed really intreasting to look into, and man did I enjoy working on it...
#
# So the basic idea is as below.
#
# <I><b>Business manager of a consumer credit card portfolio of a bank is facing severe customer attrition problem in the recent months. This is impacting the business. The business manager wants to leverage the power of data analytics to understand the primary reasons of attrition. She also wants to have an ability to understand the customers who are likely to close their accounts with the bank in near future, so that she can focus her efforts, well in advance to retain those customers.</b></I>
# + [markdown] id="INBJ2X9RcjIm"
# So lets put on our analytics hats and help out our friend here, below are the steps and we will try to find a viable solution to this guys problem in the smallest amount of time possible, dont wanna spend entire day here do we.
# + [markdown] id="nRWdx01OcjIn"
# <h4>
# <ol>
# <li>Data Loading</li>
# <li>Data Cleaning</li>
# <li>Univariant Analysis</li>
# <li>Multivariant Analysis</li>
# <li>Feature Engineering</li>
# <li>Model Development</li>
# <li>Results Analysis</li>
# <li>Conclusion</li>
# </ol>
# </h4>
# + id="oVpXhjQocjIo"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats as ss
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics as mtr
# + id="VSFjnIvrcjIp"
import warnings
warnings.filterwarnings('ignore')
# + [markdown] id="liD970Y3cjIp"
# ## Data Loading
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 38} id="gXR_dwcXdSxQ" outputId="60bf7be1-2727-430c-bce3-43f720c59e68"
from google.colab import files
uploaded = files.upload()
# + id="WkPe-E8TdmiR"
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(name=fn, length=len(uploaded[fn])))
# + colab={"base_uri": "https://localhost:8080/", "height": 326} id="z0AQB2rYcjIq" outputId="cb4347af-c3b7-4b49-b406-42eb064f583f"
df = pd.read_csv('BankChurners.csv')
print(f"Data has {df.shape[0]} rows and {df.shape[1]} columns")
df.head(5)
# + [markdown] id="4mSqxNBlcjIx"
# ## Data Cleaning
# + id="U7kVEp1CcjIy"
# Lets drop 2 unnessecery columns, or probably very useful,who know!!
df = df.drop(['Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1',
'Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2'],axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="8RGnvp-qcjI0" outputId="ac8eb449-f723-469b-dde2-a2fcbee192e0"
# Lets get info on columns
df.info()
# + colab={"base_uri": "https://localhost:8080/"} id="ZLLKEzJtcjI1" outputId="67bd61be-074d-4a10-8116-f3bedae56689"
print("Nubmber of Duplicate Rows: ",df.duplicated().sum())
# + colab={"base_uri": "https://localhost:8080/"} id="rKns_s7dcjI2" outputId="76282d9d-9ad5-41b1-9fb5-c91faaf533f5"
print(f"How pure is primary key 'CLIENTNUM': {len(df['CLIENTNUM'].unique())/len(df) * 100}%")
# + [markdown] id="USttBqKHcjI3"
# #### No Null, No Duplicates, No Overlapping keys, this is what analytics heaven must feel like
# + [markdown] id="rxKY_ZprcjI4"
# ## Univariant Analysis
# + id="zD_NXma2cjI4"
# Function to get Descriptive Analysis of Numeric columns
def Numeric_Analysis(x):
print('='*40)
print(f'Descriptive Statistics of {x.name}')
print('='*40)
print(x.describe())
print('='*40)
print(f"Probability Density Plot for {x.name}")
print('='*40)
ax,fig = plt.subplots(figsize=(8,4))
fig = sns.kdeplot(x.values,shade=True)
fig = plt.xlabel(x.name)
plt.show()
# Function to get Descriptive Analysis of Categorical columns
def Categorical_Analysis(x):
print('='*40)
print(f'One-Way Frequency Table of {x.name}')
print('='*40)
desc = pd.DataFrame(x.value_counts())
desc.columns = ['Frequency']
desc['Percentage'] = np.round((x.value_counts()/len(df) * 100).values,3)
print(desc)
print('='*40)
fig,ax = plt.subplots(figsize=(8,6))
print(f'One-Way Frequency Plot of {x.name}')
print('='*40)
fig = sns.barplot(x=desc.index,y=desc['Percentage'].values)
fig.plot()
fig = plt.ylabel('Percentage')
fig = plt.xlabel(x.name)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="4q1GYRlAcjI5" outputId="42604f03-cfaa-44bc-860a-cb4a0e878489"
for col, dtyp in dict(df.drop(['CLIENTNUM'], axis=1).dtypes).items():
if dtyp.str == '|O':
Categorical_Analysis(df[col])
else:
Numeric_Analysis(df[col])
print("X--------------X"*6,)
# + [markdown] id="aujoiFBXcjI6"
# ## Understanding of Data From Descriptive Analysis
# + [markdown] id="Q94WFUrpcjI8"
# <ul>
# <li><b>CLIENTNUM: </b>Primary Key, No duplicates Not of much use as of now</li>
# <li><b>Attrition_Flag: </b>Target Feature, Categorical, Very unbalanced distribution of data, will see if we can use some kind of sampling technique to improve</li>
# <li><b>Customer_Age: </b>Discret Numerical Feature, I was expecting a skewd distribution, as older population are more prone to avoid using credit cards then the younger one.</li>
# <li><b>Gender: </b>Not being sexist, but its a little weird that there are more 'Female' Card holders than 'Men'.</li>
# <li><b>Dependent_count: </b>Discret Numerical Feature, an average family of 4 or less seems to be the case.</li>
# <li><b>Education: </b>Categorical Feature, gradutes seems to be most represented category, we might be able to combine a few categories into one based on there bad rate.</li>
# <li><b>Marital_Status: </b>Married people seems to be most represented here, it is possible that one customer might have more than one card in family, and drop a few to reduce liability</li>
# <li><b>Income_Category :</b>It is already binned, which causes some loss of info, and also I expected normal distribution, but it seems to be linear descending distribution as income increases.</li>
# <li><b>Card_Category :</b>Blue is overally dominant here, and others are way to small to add any useful info, we might drop this</li>
# <li><b>Months_on_book :</b>Normally distributed as expected, except for that weird peak, which might suggest tampering with original data, probably someone replace missing values with mean of data, causing such peak at mean, this is not good.</li>
# <li><b>Total_Relationship_Count :</b>Number of products owned by customer, we will see how it relates with attrition.</li>
# <li><b>Months_Inactive_12_mon :</b>It seems Inactivie users mostly comeback after 3 months max or probably drop out, a good hypothisis to check.</li>
# <li><b>Contacts_Count_12_mon :</b>Very similar to last column, as most people comeback by 3 months, most contact is done during that period to bring user back to spending, its possible that users who do not respond even after 3 months are more probable to drop off permanently</li>
# <li><b>Credit_Limit :</b>Has a weird bump at the end of the tail, worth checking further.</li>
# <li><b>Total_Revolving_Bal :</b> Total Revolving balance means, how much balance is left over after each last payment made by customer, or pending debt, seems most people payoff there debt or have none, but a large portion seems to carry a huge amount.</li>
# <li><b>Avg_Open_To_Buy :</b>Average open credit line over last 12 months, distribution very similar to credit_limit, might be correlated and thus redundunt</li>
# <li><b>Total_Amt_Chng_Q4_Q1 :</b>Not quite sure what it means, perhaps the percentage change in last 1 year in overall balance.</li>
# <li><b>Total_Trans_Amt :</b>Very uneven distribution, perhaps will work better if we just bin it.</li>
# <li><b>Total_Trans_Ct :</b>A double hump camel, this tells us there are high frequency users and low frequency users in our data, usually its the low frequency users who sticks with a bank longer, as they have less issues with there cards.</li>
# <li><b>Total_Ct_Chng_Q4_Q1 :</b>Not quite sure what this is, lets assume its point change in total transaction count</li>
# <li><b>Avg_Utilization_Ratio :</b>Card Utilization Ratio is debt/credit_limit at any given time,I am asummin the average is over 12 months, which would simply be<b> (Credit_Limit - Avg_Open_To_Buy)/Credit_Limit)</b> seems we already have some engineered columns here</li>
# </ul>
# + [markdown] id="bXCr3HL0cjJJ"
# #### This data seems heavely modified and engineered already, we must carefully avoid the trap of multicollinearity
# + [markdown] id="wwUSQjg3cjJT"
# ## Multivariant Analysis
# + id="rgsIiQGjcjJU"
num_cols = [col for col in df.drop(['CLIENTNUM','Attrition_Flag'], axis=1).columns if df[col].dtype.str != '|O']
cat_cols = [col for col in df.drop(['CLIENTNUM','Attrition_Flag'], axis=1).columns if df[col].dtype.str == '|O']
# + colab={"base_uri": "https://localhost:8080/", "height": 816} id="PcOKpMs0cjJV" outputId="a61318f6-9e27-4675-9436-0e56e67946f1"
fig, ax = plt.subplots(figsize=(12,12))
fig = sns.heatmap(df[num_cols].corr(), fmt='.1', cmap='Reds', annot=True)
fig = plt.xticks(rotation=70)
# + [markdown] id="KqeTS8VKcjJW"
# <h3>Insights:</h3>
#
# <ul>
# <li><b>Months_on_books-vs-Customer_Age:</b> The older the customer, the longer they have been with company, very strongly correlated, either have to drop one or find a way to combine that</li>
# <li><b>Credit_Limit -vs- Average_Open_To_Buy: </b> As expected, this is way similar as credit_limit, we can just drop one of the columns.</li>
# <li><b>Total_Trans_Amt -vs- Total_Trans_Ct: </b>The more transactions you do, the more amount you generate in debt, no surprise there.</li>
# </ul>
# + id="fu1oxHYYcjJX"
# Getting Cramer's V for Categorical Correlation
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x,y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1))
rcorr = r-((r-1)**2)/(n-1)
kcorr = k-((k-1)**2)/(n-1)
return np.sqrt(phi2corr/min((kcorr-1),(rcorr-1)))
# + id="yCxBB8OdcjJX"
n = len(cat_cols)
cat_corr = np.zeros([n,n])
for i in range(n):
for j in range(n):
cat_corr[i,j] = cramers_v(df[cat_cols[i]],df[cat_cols[j]])
cat_corr = pd.DataFrame(cat_corr, index=cat_cols, columns=cat_cols)
# + colab={"base_uri": "https://localhost:8080/", "height": 564} id="y2ebAZrCcjJZ" outputId="2247ff9a-a155-419a-dce0-fba039755ee7"
fig, ax = plt.subplots(figsize=(8,8))
fig = sns.heatmap(cat_corr, fmt='.2', cmap='Reds', annot=True)
fig = plt.yticks(rotation=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="tjHW7HcKcjJa" outputId="790fa916-05e1-4cf1-ca99-21fbf301dce8"
pd.crosstab(df['Gender'],df['Income_Category'], margins='row')
# + [markdown] id="I1Y_Zj6_cjJb"
# <h3>Insights</h3>
# + [markdown] id="4b0vqa9dcjJb"
# No major correlation, other than <b>"Income_Category -vs- Gender"</b>,although majority of card holders are females, there income category is mostly on lower end, or unknonw. We will have to find a way to combine the two columns, to avoid unstable models.
# + [markdown] id="mj2V9lwacjJd"
# ## Feature Engineering
# + colab={"base_uri": "https://localhost:8080/", "height": 111} id="fggHTaUkcjJe" outputId="74f3fa90-f007-44e6-a9b9-dc7ef6a2327d"
# Combine the Customer_Age and Months_on_book to get a new column so we can drop one of them and avoid correlation
Per_of_life_as_cust = df['Months_on_book']/(df['Customer_Age']*12) * 100
df['Per_of_life_as_cust'] = Per_of_life_as_cust
df[['Customer_Age','Per_of_life_as_cust']].corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 111} id="ntEKVmqbcjJf" outputId="6358421a-7dee-4bb4-c84d-04cd0dc1cf79"
# Average Transaction amount gives us a better feature and also avoids correlation
Avg_Trans_Amt = df['Total_Trans_Amt']/df['Total_Trans_Ct']
df['Avg_Trans_Amt'] = Avg_Trans_Amt
df[['Avg_Trans_Amt','Total_Trans_Ct']].corr()
# + id="eu3fU1gScjJg"
df = df.drop(['Total_Trans_Amt','Months_on_book','Avg_Open_To_Buy','Card_Category'], axis=1)
# + id="FU59ou_CcjJh"
X = df.drop(['CLIENTNUM','Attrition_Flag'], axis=1).copy()
y = (df['Attrition_Flag'].copy() == 'Attrited Customer')*1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=55, test_size=0.3)
# + id="w2wj29DRcjJi"
to_standardize = ['Customer_Age','Credit_Limit','Total_Revolving_Bal','Total_Amt_Chng_Q4_Q1','Total_Trans_Ct',
'Total_Ct_Chng_Q4_Q1','Avg_Utilization_Ratio','Per_of_life_as_cust','Avg_Trans_Amt']
to_woe = [col for col in X_train.columns if col not in to_standardize]
# + id="kkt8TI9kcjJi"
ss = StandardScaler()
ss.fit(X_train[to_standardize])
X_train[to_standardize] = ss.transform(X_train[to_standardize])
X_test[to_standardize] = ss.transform(X_test[to_standardize])
# + [markdown] id="xnoF4wNfcjJj"
# ### What is Weight of Evidence?
# + [markdown] id="K1E9u1XzcjJk"
# <b>The weight of evidence tells the predictive power of an independent variable in relation to the dependent variable. Since it evolved from credit scoring world, it is generally described as a measure of the separation of good and bad customers. "Bad Customers" refers to the customers who left company, and "Good Customers" refers to the customers who continued to use credit card.</b>
# + [markdown] id="0Y5kIDrmcjJk"
# <b>Therefore a positive WOE mean more "Good Customers" than "Bad Customers", and vice versa for negative WOE. Absolute value of WOE tells us the seperation of two.</b>
# + [markdown] id="r-H7ff9IcjJl"
# 
# + [markdown] id="JPRv5mBLcjJm"
# <b>We can replace the categorical features with there respective WOE, rather than using dummy variables. This helps us avoid increasing dimensionality of our data</b>
# + [markdown] id="mZ1aFyIvcjJm"
# ### What is IV?
# + [markdown] id="B4fOy-l6cjJn"
# <b>IV stands for Information Value, it is useful in determining the predictive power of a feature based on there class seperation, using WOE</b>
# + [markdown] id="4ExKDifYcjJo"
# 
# + [markdown] id="FBxb255acjJp"
# 
# + id="S8UzDjV_cjJp"
target_flag = y_train == 1
N = len(y_train)
def WOE_Calculator(x):
rows = list()
#print(x.name)
for attr in list(x.unique()):
#print(attr)
x_at = x[x == attr]
n = len(x_at)
good_per = (len(x_at[~target_flag])+0.5)/N * 100
bad_per = (len(x_at[target_flag])+0.5)/N * 100
woe = np.log(good_per/bad_per)
iv = ((good_per - bad_per)/100)*woe
rows.append([x.name, attr, n, good_per, bad_per, woe, iv])
return pd.DataFrame(rows, columns=['Feature', 'Attribute', 'Count', 'Good%', 'Bad%', 'WOE', 'IV'])
# + id="DHsDyp4gcjJq"
df_iv = pd.DataFrame(columns=['Feature', 'Attribute', 'Count', 'Good%', 'Bad%', 'WOE', 'IV'])
for col in X_train[to_woe].columns:
df_iv = pd.concat([WOE_Calculator(X_train[col]), df_iv])
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="atssVnePcjJr" outputId="413d8516-63a1-41c7-99aa-6c3f48db6be6"
df_iv.sort_values(by='WOE').head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="tjhXad6CcjJs" outputId="8f4109ba-0984-4175-dd63-b2f8e708227a"
df_iv.groupby('Feature')['IV'].sum()
# + id="XeIg_3YxcjJu"
for col in X_train[to_woe].columns:
woe_dict = dict(df_iv[df_iv['Feature'] == col][['Attribute','WOE']].set_index('Attribute')['WOE'])
X_train[col] = X_train[col].apply(lambda x : woe_dict[x])
X_test[col] = X_test[col].apply(lambda x : woe_dict[x])
# + [markdown] id="9IpMx6C8cjJv"
# ## Model Development & Validation
# + colab={"base_uri": "https://localhost:8080/"} id="mCvCse-EcjJv" outputId="1a02fa0d-a6d8-4001-8517-4e5badef8485"
clf = LogisticRegression(solver='lbfgs')
clf.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 435} id="orqfq6WlcjJw" outputId="4757590f-0b81-4075-af85-ad27b476b986"
y_pred = clf.predict(X_test)
print(mtr.classification_report(y_test, y_pred))
print('AUC of ROC is : ',mtr.roc_auc_score(y_test, y_pred))
fig = sns.heatmap(mtr.confusion_matrix(y_test, y_pred), fmt='', cmap='Blues', annot=True)
# + id="IzxLOCeccjJx"
def gains_table(y_true, y_pred):
y = pd.DataFrame(np.c_[y_true,y_pred[:,1]],columns=['y_true','y_pred']).sort_values(by='y_pred')
y_true = y['y_true']
y_pred = y['y_pred']
n = len(y_true)
y_pred = pd.Series(y_pred*100).sort_values(ascending=False)
bins = [i for i in range(0,n-int(n/10),int(n/10))] + [n]
rows = []
cum_good = 0
cum_bad = 0
good_n = (y_true == 0).sum()
bad_n = (y_true == 1).sum()
for i in range(0,10):
x = y_pred[bins[i]:bins[i+1]]
max_prob = np.max(x)
min_prob = np.min(x)
mean_prob = np.mean(x)
count = len(x)
good = len(x[y_true == 0])
bad = len(x[y_true == 1])
good_per = np.round(good/count * 100,2)
bad_per = np.round(bad/count * 100,2)
cum_good = cum_good + good
cum_bad = cum_bad + bad
if bad == 0:
fpr = np.inf
else:
fpr = good/bad
cum_good_per = np.round(cum_good / good_n * 100,2)
cum_bad_per = np.round(cum_bad / bad_n * 100,2)
ks = cum_bad_per - cum_good_per
rows.append([max_prob, mean_prob, min_prob, count, good, bad, good_per, bad_per,cum_good, cum_bad, fpr,
cum_good_per, cum_bad_per, ks])
return pd.DataFrame(rows, columns=['Max Proba', 'Mean Proba', 'Min Proba', 'Count', 'Good', 'Bad', 'Good%', 'Bad%',
'Cumalative Good', 'Cumalative Bad', 'False Positive Rate', 'Cumalative Good%',
'Cumalative Bad%', 'KS'])
# + colab={"base_uri": "https://localhost:8080/", "height": 376} id="NTgTudYocjJy" outputId="fd8fefcf-4fd4-41cd-90f1-d2be7319d529"
y_pred_prob = clf.predict_proba(X_test)
gains_table(y_test.values, y_pred_prob)
# + id="CZUjSF8AcjJz"
y_pred2 = (y_pred_prob[:,1] >= 0.11)*1
# + colab={"base_uri": "https://localhost:8080/", "height": 435} id="ia-0GgPPcjJ0" outputId="ff35cb57-b244-410b-be4d-36ae24d4ad69"
print(mtr.classification_report(y_test, y_pred2))
print('AUC of ROC is : ',mtr.roc_auc_score(y_test, y_pred2))
fig = sns.heatmap(mtr.confusion_matrix(y_test, y_pred2), fmt='', cmap='Blues', annot=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 374} id="8w_50ALicjJ0" outputId="8faef73d-257e-4fae-f3b6-f1192b207eea"
model_coef = pd.Series(dict(zip(list(X_train.columns),list(clf.coef_[0])))).sort_values()
fig,ax = plt.subplots(figsize=(6,6))
fig = sns.barplot(x=model_coef.values, y=model_coef.index)
# + id="9RufiWAbcjJ1"
| Credit_Card_Customer_Churn_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Datos para Streamlit
# En este notebook sólo enseño de dónde obtuve los datos para la visualización de Streamlit
# +
import mysql.connector as mariadb
import sys
try:
conn = mariadb.connect(
user="root",
password="<PASSWORD>",
host="127.0.0.1",
port=3306
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
sys.exit(1)
cursor=conn.cursor()
cursor.execute("USE %s"%"noticias")
# -
def get_corpus(seleccion= ["noticia"], fecha="fecha"):
seleccion=seleccion+[fecha]
cursor.execute("SELECT %s FROM notas WHERE fecha = %s"%tuple(seleccion))
resultados=cursor.fetchall()
return resultados
corpus=get_corpus(["noticia"],"fecha")
len(corpus)
# ## Procesamiento de lenguaje
# +
from nltk import word_tokenize
from nltk.stem import SnowballStemmer
import spacy
from nltk.corpus import stopwords
from matplotlib import pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import matplotlib.colors as mcolors
from collections import Counter
# -
stemmer = SnowballStemmer('spanish')
nlp = spacy.load('es_core_news_sm')
def normalizar(texto):
doc = nlp(texto)
palabras = [t.orth_ for t in doc if not (t.is_punct | t.is_stop) and t.pos_ != 'PRON']
tokens = [t.lower() for t in palabras if len(t) > 3 and t.isalpha()]
raices = [stemmer.stem(token) for token in tokens]
return raices
word_list = normalizar("Esto es lo que uno debería esperar después de que se pre-procesa un texto")
word_list
def modificar_corpus(corpus):
corpus_modificado=[]
for noticia in corpus:
corpus_modificado.append(normalizar(noticia[0]))
return corpus_modificado
CM=modificar_corpus(corpus)
# ## Aprendizaje No Supervisado
# +
import gensim
from gensim import models
import gensim.corpora as corpora
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from gensim.models import LdaModel, CoherenceModel
# -
dictionary=corpora.Dictionary(CM)
dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)
bow_corpus = [dictionary.doc2bow(noticia) for noticia in CM]
num_of_topics=8
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=num_of_topics, id2word=dictionary,
passes=13, workers=2,
random_state=1,per_word_topics=True)
# ## Resultados
import pandas as pd
obj=lda_model[bow_corpus]
results=[row[0] for row in obj]
len(results)
df=pd.DataFrame(columns=[0,1,2,3,4,5,6,7])
for result in results:
d={}
for element in result:
d[element[0]]=element[1]
insert=[0]*num_of_topics
for i in d.keys():
insert[i]=d[i]
df=df.append(pd.Series(insert),ignore_index=True)
# +
def get_topic_DF(ldamodel, corpus, bow_texts,original,num_of_topics):
topic_df = pd.DataFrame()
#extraemos las palabras clave por tema en un diccionario
topic_keywords={}
for topic_num in range(num_of_topics):
wp = ldamodel.show_topic(topic_num) #vector de tuplas de la forma (palabra, contribucion)
topic_keywords[topic_num] = ", ".join([word for word, prob in wp])
#para cada documento sacamos su tema dominante, su probabilidad y las palabras clave del tema
for i, row_list in enumerate(ldamodel[corpus]):
row = row_list[0]
row = sorted(row, key=lambda x: (x[1]), reverse=True)
dominant=row[0]
topic_num=dominant[0]
prob_topic=dominant[1]
topic_df = topic_df.append(pd.Series([int(topic_num), round(prob_topic,4), topic_keywords[topic_num]]), ignore_index=True)
#añadimos los textos originales al DF
contents = pd.Series(bow_texts)
original = pd.Series(original)
topic_df = pd.concat([topic_df, contents,original], axis=1)
topic_df.columns = ['Tema Principal', 'Contribucion', 'Palabras Clave','Texto Normalizado', 'Texto Original']
return(topic_df)
topic_df = get_topic_DF(lda_model, bow_corpus,CM,corpus,num_of_topics)
topic_df.head(10)
# -
df
df.columns=(["tema0","tema1","tema2","tema3","tema4","tema5","tema6","tema7"])
DF=pd.concat([df,topic_df],axis=1)
DF
DF.to_csv('data.csv', index = False)
| Streamlit vis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
dat = pd.read_csv("2015_City.csv", skiprows = 4, encoding = 'iso-8859-1')
# -
dat.head()
from matplotlib import pyplot as plt
plt.style.use('ggplot')
plt.hist(dat["Total Wages"], bins = 50)
plt.xlabel("Wages")
plt.ylabel("LOVE")
plt.show()
dat.sort_values(by="Total Wages", ascending=False)["Total Wages"].head()
# +
# remove the rows with total wages <= 0
new_dat = dat.loc[dat["Total Wages"] >= 18000]
# -
new_dat.sort_values(by="Total Wages", ascending=True)["Total Wages"].head(15)
len(new_dat), len(dat)
float(len(new_dat))/float(len(dat)) # removed 30% of our data! :O
plt.hist(new_dat["Total Wages"], bins = 20)
plt.xlabel("Wages")
plt.ylabel("LOVE")
plt.title("Full Time Workers")
plt.show()
# +
dat = pd.read_csv("2015_City.csv", skiprows = 4, encoding = 'iso-8859-1')
fnames = ["2009_City.csv","2010_City.csv","2011_City.csv", "2012_City.csv", "2013_City.csv", "2014_City.csv", "2015_City.csv"]
bigass_df = pd.DataFrame()
li = []
for f in fnames:
df = pd.read_csv(f, skiprows = 4, usecols = ["Year", "Total Wages"])
li.append(df)
bigass_df = pd.concat(li)
bigass_df.head()
# +
from ggplot import *
myplot = (ggplot(aes(x = "Total Wages", color = "Year"), data = bigass_df) \
+ geom_density(alpha = 0.2))
myplot
# -
| battle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from sklearn import tree
import graphviz
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from IPython.display import IFrame
df = pd.read_csv('./phishing.csv', sep=';')
#df.dropna(axis=0, how='any', inplace=True)
#df.describe()
#df.head(10)
#df.dtypes
# +
colnames = df.columns.get_values()
# explanatory variables (dataframe)
X = df.loc[:, 'having_IP_Address':'Statistical_report']
# response variable (series)
Y = df.loc[:, 'Result']
# +
# Experiment with min_samples_leaf to find the best model.
# number of folds for cross-validation
k = 10
for leaf in range(2001,-1,-500):
print("min_samples_leaf: %d" % leaf)
classifier = tree.DecisionTreeClassifier(max_depth=3, min_samples_leaf=leaf)
classifier.fit(X,Y)
Y_pred = classifier.predict(X)
cm = confusion_matrix(Y, Y_pred)
accuracy = (cm[0][0]+cm[1][1])/(cm[0][0]+cm[1][1]+cm[0][1]+cm[1][0])
print(" Accuracy calculated from the training set = %.3f" % (accuracy))
scores = cross_val_score(estimator=classifier,
X=X,
y=Y,
scoring="accuracy",
cv=k)
print(" Accuracy calculated using %d-fold cross validation = %.3f" % (k, scores.mean()))
# +
# Best value for min_samples_leaf was the smallest (1) when max depth of tree was 2 (to ensure a simple/compact tree)
# Validation accuracy was 90.4 %
# Visualize
dot_data = tree.export_graphviz(classifier, out_file=None, feature_names=colnames[:30], class_names=['legitimate', 'phishing'])
graph = graphviz.Source(dot_data)
graph.render("phishing_tree")
display(IFrame("./phishing_tree.pdf", width=900, height=700))
# +
# output confusion matrix
cm = confusion_matrix(Y, Y_pred)
print("Confusion matrix:\n",cm)
print(classification_report(Y, Y_pred, target_names=['legitimate', 'phishing']))
# -
# # INSTRUCTIONS
#
# IF website is using https - website a little more likely legitimate
# AND IF <33 % of the < a > tags have different domain as website or are empty - website very likely legitimate
# AND IF >33 % have different domain or empty
# AND IF <17 % links are in < Meta > < Script > and < Link > tags - website very likely legitimate
#
# IF website is not using https - website a little more likely phishing
# AND IF <33 % of the < a > tags have different domain as website or are empty - website more likely legitimate
# AND IF Domain Name Part Includes (-) Symbol - website likely legitimate
# AND IF >33 % have different domain or empty - website very likely phishing
# AND IF website has no traffic or is not listed in Alexa database - very likely phishing
# AND IF website has low/high traffic - still likely phishing
#
| 3_decisiontree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.13 64-bit (''data'': conda)'
# name: python3
# ---
from EduNLP.Tokenizer import PureTextTokenizer, TextTokenizer, get_tokenizer
# # TextTokenizer and PureTextTokenizer
#
# - ‘text’ Tokenizer ignores and skips the FormulaFigures and tokenize latex Formulas as Text
# - ‘pure_text’ Tokenizer symbolizes the FormulaFigures as [FUMULA] and tokenize latex Formulas as Text
# ## TextTokenizer
items = [{
"stem": "已知集合$A=\\left\\{x \\mid x^{2}-3 x-4<0\\right\\}, \\quad B=\\{-4,1,3,5\\}, \\quad$ 则 $A \\cap B=$",
"options": ["1", "2"]
}]
tokenizer = get_tokenizer("text") # tokenizer = TextTokenizer()
tokens = tokenizer(items, key=lambda x: x["stem"])
print(next(tokens))
items = ["有公式$\\FormFigureID{wrong1?}$,如图$\\FigureID{088f15ea-xxx}$,若$x,y$满足约束条件公式$\\FormFigureBase64{wrong2?}$,$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$"]
# +
tokenizer = get_tokenizer("text") # tokenizer = TextTokenizer()
tokens = [t for t in tokenizer(items)]
tokens
# -
# ## PureTextTokenizer
tokenizer = get_tokenizer("pure_text") # tokenizer = PureTextTokenizer()
tokens = [t for t in tokenizer(items)]
tokens
| examples/tokenizer/tokenizier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="m3OqarNqsXPB"
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="YKMh9yRnseeL" outputId="649e1208-01a1-4da4-f921-cde7969c528b"
a=np.array([(2,3,4,5),(4,5,6,7,)])
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="9I4v1mUitJg2" outputId="13f843b9-e4b0-473a-baf8-6d38724603c2"
print(a.dtype)
# + colab={"base_uri": "https://localhost:8080/"} id="anZFX5PtxZ_v" outputId="f00e28e2-16b8-4f3d-a272-024b2fa83be4"
print(a.size)
print(a.shape)
| Untitled2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Example usage
#
# Here we will demonstrate how to use `pycounts_syw` to count the words in a text file and plot the top 5 results.
# +
import pycounts_syw
print(pycounts_syw.__version__)
| docs/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Consider the data set below, for spam detection.
#
# We will use the Naive Bayes Classifier to learn from this data and predict new sentences.
#
# > Sentence | Spam
# > --- | ---
# > congrats you are selected | N
# > congrats you won lottery | Y
# > travel for free | Y
# > selected for credit cards | Y
# > very good | N
# > good night | N
# > lottery | Y
#
# In this context, each word can be treated as an attribute, and their values will be 0 or 1 depending on the absence or existence of the word in the sentence.
#
# For example, "congrats" will be attribute $a_0$, "you" will be attribute $a_1$, etc.
#
# > word | attribute | dictionary index (see python code below)
# > --- | --- | ---
# > congrats | $a_0$ | 0
# > you | $a_1$ | 1
# > are | $a_2$ | 2
# > selected | $a_3$ | 3
# > won | $a_4$ | 4
# > lottery | $a_5$ | 5
# > travel | $a_6$ | 6
# > for | $a_7$ | 7
# > free | $a_8$ | 8
# > credit | $a_9$ | 9
# > cards | $a_{10}$ | 10
# > very | $a_{11}$ | 11
# > good | $a_{12}$ | 12
# > night | $a_{13}$ | 13
#
#
#
#
# So, the sentence "you won free travel" would be
# > $\small (a_0=0, a_1=1, a_2=0, a_3=0, a_4=1, a_5=0, a_6=1, a_7=0, a_8=1, a_9=0, a_{10}=0, a_{11}=0, a_{12}=0, a_{13}=0)$
#
# or simply
# > $\small (0,1,0,0,1,0,1,0,1,0,0,0,0,0)$
#
# So, the probability that "you won free travel" is a spam can be written as:
# > $\small P(Spam=Yes| a_0=0, a_1=1, a_2=0, a_3=0, a_4=1, a_5=0, a_6=1, a_7=0, a_8=1, a_9=0, a_{10}=0, a_{11}=0, a_{12}=0, a_{13}=0)$
#
# or as
#
# > $\small P(Yes|0,1,0,0,1,0,1,0,1,0,0,0,0,0)$.
#
# Note that word ordering does not matter, and multiple occurances of the same word is simply represented as 1.
#
# +
import numpy as np
from sklearn.naive_bayes import MultinomialNB
#---------------------------------------------------------------------
# dictionary, to look up words from the data vector -- case sensitive!
#---------------------------------------------------------------------
dictionary = np.array(["congrats","you","are","selected","won","lottery","travel","for","free","credit","cards","very","good","night"])
#---------------------------------------
# vec2word: convert data vector to words
#---------------------------------------
def vec2word(vec):
"""
arguments: vec = np.array([0,1,...])
returns: string of sentence corresponsing to the vector (word may not be ordered properly)
"""
dictionary = np.array(["congrats","you","are","selected","won","lottery","travel","for","free","credit","cards","very","good","night"])
dict = {1:"congrats",2:"you",3:"are",4:"selected",5:"won",6:"lottery",7:"travel",8:"for",9:"free",10:"credit",11:"cards",12:"very",13:"good",14:"night" }
new_arr = np.array([[1,2,3,4,5,6,7,8,9,10,11,12,13,14]])
n_a = np.multiply(new_arr,vec)
n_a = np.delete(n_a, np.where(n_a == 0))
n_a = list(n_a)
a = []
for i in n_a:
a.append(dict[i])
print(a)
#--------------------------------
# spam data : enter your data here (SOL)
#--------------------------------
X = np.array([
[1,1,1,1,0,0,0,0,0,0,0,0,0,0], [1,1,0,0,1,1,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,1,1,1,0,0,0,0,0],[0,0,0,1,0,0,0,1,0,1,1,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,1,1,0],[0,0,0,0,0,0,0,0,0,0,0,0,1,1],[0,0,0,0,0,1,0,0,0,0,0,0,0,0]
])
y = np.array([0,1,1,1,0,0,1])
clf = MultinomialNB()
clf.fit(X, y)
print("Score (accuracy: 1.0 = 100%)= ",end="")
print(clf.score(X,y))
# -
# Providing 3 test sentences that are classified as "Spam" (output = 1).
#
# - These cannot be from the provided data set above.
# - Each sentence must be at least 4 words long.
# +
print("\nThis is test sentence 1: You won free travel cards")
test1 = np.array([[0,1,0,0,1,0,1,0,1,0,1,0,1,0]])
print(vec2word(test1)) #verifying that sentence is correct
print("The prediction is")
print(clf.predict(test1))
print("\nThis is test sentence 2: You are selected for lotttery")
test2 = np.array([[0,1,1,1,0,1,0,1,0,0,0,0,0,0]])
print(vec2word(test2)) #verifying that sentence is correct
print("The prediction is")
print(clf.predict(test2))
print("\nThis is test sentence 3: You won good credit cards")
test3 = np.array([[0,1,0,0,1,0,0,0,0,1,1,0,1,0]])
print(vec2word(test3)) #verifying that sentence is correct
print("The prediction is")
print(clf.predict(test3))
# -
# Providing 3 test sentences that are classified as "Not spam" (output = 0).
#
# - These cannot be from the provided data set above.
# - Each sentence must be at least 4 words long.
# +
print("\nThis is test sentence 1: You are selected for night")
test1 = np.array([[0,1,1,1,0,0,0,1,0,0,0,0,0,1]])
print(vec2word(test1)) #verifying that sentence is correct
print("The prediction is")
print(clf.predict(test1))
print("\nThis is test sentence 2: You are very good")
test2 = np.array([[0,1,1,0,0,0,0,0,0,0,0,1,1,0]])
print(vec2word(test2)) #verifying that sentence is correct
print("The prediction is")
print(clf.predict(test2))
print("\nThis is test sentence 3: Good for travel night")
test3 = np.array([[0,0,0,0,0,0,1,1,0,0,0,0,1,1]])
print(vec2word(test3)) #verifying that sentence is correct
print("The prediction is")
print(clf.predict(test3))
| Bayesian Learning .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # CheckAtlas examples : Evaluate and compare different atlases
# In this example, we show how to run checkatlas in a folder containing 3 different data type: Seurat, Scanpy, Cellranger. The three atlas files come from the PBMC 3K sample.
# ## Download datasets
# The Cellranger file is directly downloaded from 10xGenomics database.
# + language="bash"
# mkdir -p data3
# mkdir -p data3/pbmc_3k_cellranger
# mkdir -p data3/pbmc_3k_cellranger/outs
# cd data3/pbmc_3k_cellranger/outs
# curl -o filtered_feature_bc_matrix.h5 "https://cf.10xgenomics.com/samples/cell-exp/3.0.2/5k_pbmc_v3/5k_pbmc_v3_filtered_feature_bc_matrix.h5"
# -
# Scanpy version is downloaded from cellxgene github.
# + language="bash"
# cd data3/
# curl --location -o pbmc_3k_scanpy.h5ad "https://github.com/chanzuckerberg/cellxgene/raw/main/example-dataset/pbmc3k.h5ad"
# -
# Seurat version is downloaded from Satija's lab dropbox.
# + language="bash"
# cd data3/
# curl --location -o pbmc_3k_seurat.rds "https://www.dropbox.com/s/63gnlw45jf7cje8/pbmc3k_final.rds?dl=1"
# -
#
# ## Run checkatlas
# If checkatlas is installed in your environment, you just need to run this cell. This will produce all metric tables and figures needed.
# + language="bash"
# python -m checkatlas data3/
# -
# ## Run MultiQC
# Once checkatlas has been run, all tables and fig cazn be found in the checkatlas_files folder. MultiQC will retrieve these files and create the html summary files.
# WARNING: Install and run only MultiQC from https://github.com/becavin-lab/MultiQC/tree/checkatlas. Otherwise checkatlas files will not be taken into account.
# + language="bash"
# multiqc -f --cl-config "ignore_images: false" -c multiqc_config.yaml -n "CheckAtlas_example_3" -o "CheckAtlas_example_3" data3/
# -
# If multiqc ran without error an html report has been created in CheckAtlas_example1/CheckAtlas_example1.html<br>
# <big>Open it and check your atlases ! </big>
# +
from IPython.display import IFrame
IFrame(
src="CheckAtlas_example_3/CheckAtlas_example_3.html",
width="100%",
height="500px",
)
| examples/AtlasType_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="WaL0SHY4Za3P"
# ## Mount google drive
# + colab={"base_uri": "https://localhost:8080/"} id="yaEnTkj4pTJB" outputId="84f74f92-0f15-4676-b157-859a8e77fa87"
from google.colab import drive
drive.mount._DEBUG = False
drive.mount('/content/gdrive/')
# %ls /content/gdrive/'My Drive'/Deeplearning/Lab_ML_Projects/Ions_in_confinement/collectDatawhole.dat
# + [markdown] id="FjRE19Rsa9ZK"
# ## Load modules
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="afOr5jTha7qe" outputId="99097518-e145-491c-bede-36113242f71a"
#Lib imports
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('default')
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split as spliter
from sklearn import preprocessing
import joblib
import pickle
from sklearn.utils import shuffle
from collections import defaultdict
tf.__version__
# + [markdown] id="Lcv2Jw-CaCDp"
# ## Read the dataset
# + id="rlw9kjQUAkAH"
file_path= "/content/gdrive/My Drive/Deeplearning/Lab_ML_Projects/confinement2/data/"
with open(file_path+ 'data_dump_density_preprocessed_V2.pk', 'rb') as handle:
processed_all_data_preprocessed = pickle.load(handle)
# + [markdown] id="h3UjpYUVae8v"
# ## Preprocess the dataset and create numpy arrays from dictionary objects
#
# + colab={"base_uri": "https://localhost:8080/"} id="MKb9-ztUEu2F" outputId="e297bc83-d5fe-44ae-90ba-30c9ea630868"
input_data = []
output = []
errors = []
z_data = []
charge_data = []
NUM_OF_BINS = 502
#exlclude_paras = {"c": ["0.1", "0.25"]}
exlclude_paras = {"c": []}
for key, data in processed_all_data_preprocessed.items():
#print(key, data)
density_profiles = []
density_errors = []
z_data_values = []
input_names = key.split("_")[0::2]
input_paras = key.split("_")[1::2]
ignore_this = False
for key_p, params in exlclude_paras.items():
if input_paras[input_names.index(key_p)] in params:
ignore_this= True
break
if ignore_this:
continue
input_data.append(input_paras)
density_profiles.append(data['pos'][:,1])
density_profiles.append(data['neg'][:,1])
output.append(density_profiles)
density_errors.append(data['pos'][:,2])
density_errors.append(data['neg'][:,2])
errors.append(density_errors)
z_data_values.append(data['pos'][:,0])
z_data_values.append(data['neg'][:,0])
z_data.append(z_data_values)
#break
input_data = np.array(input_data)
output = np.array(output).reshape(-1,NUM_OF_BINS*2)
errors = np.array(errors).reshape(-1,NUM_OF_BINS*2)
z_data = np.array(z_data).reshape(-1,NUM_OF_BINS*2)
print("Input data shape: {}".format(input_data.shape))
print("Output data shape: {}".format(output.shape))
print("error bar data shape: {}".format(errors.shape))
print("Bin center data shape: {}".format(z_data.shape))
# + [markdown] id="63MgaHCTcYiL"
# ## Supporting functions
# + colab={"base_uri": "https://localhost:8080/", "height": 394} id="pcM9HAP0bSAm" outputId="4be9bb3f-992b-45e3-eb25-06fedd27cb9b"
def plot_density_profile(z_values, density, error_bars=None, label=None):
z_values = z_values.reshape(2,NUM_OF_BINS)
density = density.reshape(2,NUM_OF_BINS)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))
if error_bars is None:
ax.plot(z_values[0,:], density[0,:], label=str(label) + "-Pos")
ax.plot(z_values[1,:], density[1,:], label=str(label) + "-Neg")
else:
error_bars = error_bars.reshape(2,NUM_OF_BINS)
ax.errorbar(z_values[0,:], density[0,:], xerr=0.0, yerr=error_bars[0,:], capsize=2, label=str(label) + "-Pos")
ax.errorbar(z_values[1,:], density[1,:], xerr=0.0, yerr=error_bars[1,:], capsize=2, label=str(label) + "-Neg")
ax.axis('tight')
ax.xaxis.set_tick_params(labelsize='medium')
ax.yaxis.set_tick_params(labelsize='medium')
ax.set_xlabel('Z (nm)', fontsize=16)
ax.set_ylabel('Density (M)', fontsize=16)
ax.legend()
selected_index = np.random.randint(z_data.shape[0], size=1)[0]
plot_density_profile(z_data[selected_index], output[selected_index], errors[selected_index], label=input_data[selected_index])
# + [markdown] id="bPjHMC1qiMR2"
# ## Select data for model training
# + colab={"base_uri": "https://localhost:8080/"} id="4IxzxvE7iULO" outputId="22c4de52-7c30-486c-d0c1-f6db8f091c88"
train_test_split = 0.85
print("Input data shape: {}".format(input_data.shape))
print("Output data shape: {}".format(output.shape))
input_data_suff, output_suff, errors_suff, z_data_shuff = shuffle(input_data, output, errors, z_data)
train_test_split_ = int(input_data_suff.shape[0]*train_test_split)
x_train = input_data_suff[0:train_test_split_]#.astype("float64")
x_test = input_data_suff[train_test_split_:]#.astype("float64")
y_train = output_suff[0:train_test_split_]#.astype("float64")
y_test = output_suff[train_test_split_:]#.astype("float64")
error_train = errors_suff[0:train_test_split_]#.astype("float64")
error_test = errors_suff[train_test_split_:]#.astype("float64")
z_data_train = z_data_shuff[0:train_test_split_]#.astype("float64")
z_data_test = z_data_shuff[train_test_split_:]#.astype("float64")
#x_train, x_test, y_train, y_test = spliter.train_test_split(input_data, output, test_size=(1-train_test_split), random_state=100)
print("Train input: ", x_train.shape)
print("Train Output", y_train.shape)
print("Test input: ", x_test.shape)
print("Test Output", y_test.shape)
# + [markdown] id="xbnaoBPsly0f"
# ## Input feature scaling
# + colab={"base_uri": "https://localhost:8080/"} id="zCAgajdllyUq" outputId="a675e1b4-f99b-44e5-c158-63b4238c5ff2"
#scaler = preprocessing.MinMaxScaler() # s the probably the most famous scaling algorithm, and follows the following formula for each feature:
#scaler = preprocessing.StandardScaler() # assumes your data is normally distributed within each feature
#scaler = preprocessing.RobustScaler() # interquartile range, so if there are outliers in the data, you might want to consider the Robust Scaler
#scaler = preprocessing.Normalizer() # The normalizer scales each value by dividing each value by its magnitude in n-dimensional space for n number of features.
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
scaled_x_train = scaler.fit_transform(x_train)
#arr_transformed = sc.fit_transform(arr_selected)
scaled_x_test = scaler.transform(x_test)
# Save the scaler for future use
joblib.dump(scaler, file_path+'scaler_new_custom_errorV2.pkl')
# + [markdown] id="YnYjQyuTkVj4"
# ## Model parameters
# + id="lFz0nMfgpqIN"
# hyper parameters
learningRate = 0.0001
beta_1 = 0.9
beta_2 = 0.999
decay = 0.000000
batchSize = 32
dropout_rate=0.10
epochs= 20000
# Network Parameters
inputFeatures = 7
hiddenUnits1 = 256 # 1st layer number of neurons
hiddenUnits2 = 512 # 2nd layer number of neurons
#hiddenUnits3 = 512 # 3rd layer number of neurons
outputClasses = NUM_OF_BINS*2
# + [markdown] id="83Oe03pdlTMQ"
# ## ANN Model
# + [markdown] id="1e8OA3gZCJ1s"
# ### Custom loss
# + id="aIptCPHvKbvo"
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
# + id="QlHI7c6VwGKo"
#tf.reduce_mean(tf.square(y_actual - y_pred))
#<EMAIL>
def CustomLossWithErrorBars(errorBars):
#@<EMAIL>
def loss(y_actual, y_pred):
#print('Actual ',y_actual.shape)
#print('Pred ',y_pred.shape)
#print('errorBars ',errorBars.shape)
squared_difference_upper = 0.5*tf.square(y_actual + errorBars/2.0 - y_pred)
squared_difference_lower = 0.5*tf.square(y_actual - errorBars/2.0 - y_pred)
mean_loss = tf.reduce_mean(squared_difference_upper + squared_difference_lower, axis=-1) # Note the `axis=-1, last axis`
#print('mean_loss ',mean_loss.shape)
return mean_loss
return loss
# + id="gqpmqzHeRkbB"
#This is He initializer
initializer = tf.keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=None)
physical_p_input = tf.keras.Input(shape=(inputFeatures,))
errorbar_input = tf.keras.Input(shape=(outputClasses,))
#, kernel_initializer=initializer
dense1 = tf.keras.layers.Dense(hiddenUnits1, activation="relu", kernel_initializer=initializer)(physical_p_input)
drop1 = tf.keras.layers.Dropout(rate=dropout_rate)(dense1)
dense2 = tf.keras.layers.Dense(hiddenUnits2, activation="sigmoid", kernel_initializer=initializer)(drop1)
drop2 = tf.keras.layers.Dropout(rate=dropout_rate)(dense2)
#dense3 = tf.keras.layers.Dense(outputClasses, activation="sigmoid", kernel_initializer=initializer)(drop2)
#drop3 = tf.keras.layers.Dropout(rate=dropout_rate)(dense3)
density_output = tf.keras.layers.Dense(outputClasses, activation="relu", kernel_initializer=initializer)(drop2)
model = tf.keras.Model(inputs=[physical_p_input, errorbar_input], outputs=[density_output], name="ions_surrogate_model2")
#loss=CustomLossWithErrorBars(errorBars=errorbar_input)
#loss=tf.keras.losses.mean_squared_error
model.compile(loss=CustomLossWithErrorBars(errorBars=errorbar_input), optimizer=tf.keras.optimizers.Adam(lr=learningRate, beta_1=beta_1,beta_2=beta_2, decay=decay), experimental_run_tf_function=False)
# + [markdown] id="Vhti_CpUlbka"
# ## Training
# + colab={"base_uri": "https://localhost:8080/"} id="nnYNZyyFp_5Y" outputId="1f69afa1-6539-4ed3-9cb0-5cab56efdee0"
history = model.fit(x=[scaled_x_train, error_train], y=y_train, epochs=epochs, batch_size = batchSize, verbose = 1, shuffle=True, validation_data = ([scaled_x_test, error_test], y_test))
# + [markdown] id="za_D80bNa8np"
# ## Final loss values with tradional loss function
# * learningRate = 0.0005, batchSize = 32, hiddenUnits1 = 512, hiddenUnits2 = 512, dropout_rate=0.15: : 95.1% accuracy
# * learningRate = 0.0005, batchSize = 32, hiddenUnits1 = 256, hiddenUnits2 = 512, dropout_rate=0.15: loss: 0.0016 - val_loss: 0.0012: 95.1% accuracy
# * learningRate = 0.0005, batchSize = 32, hiddenUnits1 = 128, hiddenUnits2 = 512, dropout_rate=0.15: loss: 0.0021 - val_loss: 0.0019 : 85.1% accuracy
#
#
#
#
# + [markdown] id="V8LhOb8FleAc"
# ## Training and testing error
# + id="XaKw2czWn3yl" colab={"base_uri": "https://localhost:8080/", "height": 490} outputId="b14785e1-ea63-464f-a28c-c52bf9bfc697"
print(history.history.keys())
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.yscale('log')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="T87MapH6nQ3w" colab={"base_uri": "https://localhost:8080/", "height": 490} outputId="e079ff67-eaaa-4c15-ff82-5c730cdab25a"
print(history.history.keys())
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.yscale('log')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + [markdown] id="y2LAmJCKljWa"
# ## Model details and save the model
# + id="J3DJmrY649zV" colab={"base_uri": "https://localhost:8080/"} outputId="5ebfa1f5-01dd-421b-fc67-7f03b658d4dc"
model.summary()
model.save(file_path+ 'my_model_custom_error_v2_6.h5')
# + [markdown] id="k1bgoBt1n9gI"
# ## Load the models and predict
# + colab={"base_uri": "https://localhost:8080/"} id="GPcqYmPg5Yp8" outputId="2db81d2b-9adb-474a-ac5c-2a189919ef18"
scaler = joblib.load(file_path+'scaler_new_custom_errorV2.pkl')
#scaler = joblib.load(file_path+'scaler_new.pkl')
new_model = tf.keras.models.load_model(file_path+'my_model_custom_error_v2_5.h5', compile=False)
#new_model = tf.keras.models.load_model(file_path+'my_model_custom_error6.h5', compile=False) # this is 96 % accuracy model
#new_model = tf.keras.models.load_model(file_path+'my_model_custom_error4.h5', compile=False) # this is 94 % accuracy model
#new_model = tf.keras.models.load_model(file_path+ 'my_model_new.h5')
new_model.summary()
# + [markdown] id="DZftwz0PRPE_"
# # Evaluating testing accuracy within error bars
# + colab={"base_uri": "https://localhost:8080/", "height": 546} id="f1Laaq_4qLOp" outputId="f074e024-0166-403e-bb19-f946c8ecf160"
def evaluate_one_simulation(prediction, truth , errorbar):
cnt = 0;
for j in range(prediction.shape[0]):
if (abs(prediction[j] - truth[j]) <= errorbar[j]):
cnt = cnt+1
return cnt/prediction.shape[0]
def evaluate(prediction, truth , errorbar):
accuracy = []
for i in range(prediction.shape[1]):
cnt = 0;
for j in range(prediction.shape[0]):
if (abs(prediction[j,i] - truth[j,i]) <= errorbar[j,i]):
cnt = cnt+1;
accuracy.append(cnt/prediction.shape[0]);
return np.array(accuracy)
predictions = new_model.predict([scaler.transform(x_test), error_test])
acc = evaluate(predictions, y_test, error_test)
print(np.mean(acc))
plt.plot(acc*100.0)
plt.title('Accuracy Plot')
plt.ylabel('Accuracy %')
plt.xlabel('Prediction INX')
plt.show()
# + [markdown] id="RoxMu5vBRn_J"
# # Examples of prediction graphs for test data
# + colab={"base_uri": "https://localhost:8080/", "height": 448} id="ZIxcEpFSb23c" outputId="b1a07cd6-515f-402b-edca-165f61752828"
def plot_density_profile(ax, z_values, density, error_bars=None, label=None, title=None, color='b'):
z_values = z_values.reshape(2,NUM_OF_BINS)
density = density.reshape(2,NUM_OF_BINS)
if error_bars is None:
ax.plot(z_values[0,:], density[0,:], label=str(label) + "-Pos", color=color)
ax.plot(z_values[1,:], density[1,:], label=str(label) + "-Neg", color=color)
else:
error_bars = error_bars.reshape(2,NUM_OF_BINS)
ax.errorbar(z_values[0,:], density[0,:], xerr=0.0, yerr=error_bars[0,:], capsize=2, label=str(label) + "-Pos", color=color)
ax.errorbar(z_values[1,:], density[1,:], xerr=0.0, yerr=error_bars[1,:], capsize=2, label=str(label) + "-Neg", color=color)
print(title)
ax.set_title(title)
ax.axis('tight')
ax.xaxis.set_tick_params(labelsize='medium')
ax.yaxis.set_tick_params(labelsize='medium')
ax.set_xlabel('Z (nm)', fontsize=16)
ax.set_ylabel('Density (M)', fontsize=16)
#ax.legend()
selected_index = np.random.randint(x_test.shape[0], size=1)[0]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))
plot_density_profile(ax, z_data_test[selected_index], y_test[selected_index], error_test[selected_index], label="ground truth", title=str(x_test[selected_index]), color='b')
plot_density_profile(ax, z_data_test[selected_index], predictions[selected_index], error_bars=None, label="prediction", title="", color='r')
ax.legend()
print(evaluate_one_simulation(predictions[selected_index], y_test[selected_index] , error_test[selected_index]))
#x_inverse = scaler.inverse_transform(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="g-_43ASG9JW6" outputId="87c372d1-91c7-41cf-d71c-b27a7e87d445"
print(predictions[selected_index][0:200])
# + [markdown] id="_rj3-ERN8h8d"
# # Evaluating Train accuracy within error bars
# + colab={"base_uri": "https://localhost:8080/", "height": 490} id="oY9NDYXJ8hOU" outputId="b695767a-17c5-4055-9876-113d9fb2e957"
predictions_train = new_model.predict([scaler.transform(x_train), error_train])
acc = evaluate(predictions_train, y_train, error_train)
print(np.mean(acc))
plt.plot(acc*100.0)
plt.title('Accuracy Plot')
plt.ylabel('Accuracy %')
plt.xlabel('Prediction INX')
plt.show()
# + [markdown] id="6WjS3oqOxTzX"
# # Examples of prediction graphs for train data
# + colab={"base_uri": "https://localhost:8080/", "height": 452} id="pvdpC9oTxTR7" outputId="d827b4d5-8b10-4eb8-a645-5a6f57a38201"
def plot_density_profile(ax, z_values, density, error_bars=None, label=None, title=None, color='b'):
z_values = z_values.reshape(2,NUM_OF_BINS)
density = density.reshape(2,NUM_OF_BINS)
if error_bars is None:
ax.plot(z_values[0,:], density[0,:], label=str(label) + "-Pos", color=color)
ax.plot(z_values[1,:], density[1,:], label=str(label) + "-Neg", color=color)
else:
error_bars = error_bars.reshape(2,NUM_OF_BINS)
ax.errorbar(z_values[0,:], density[0,:], xerr=0.0, yerr=error_bars[0,:], capsize=2, label=str(label) + "-Pos", color=color)
ax.errorbar(z_values[1,:], density[1,:], xerr=0.0, yerr=error_bars[1,:], capsize=2, label=str(label) + "-Neg", color=color)
print(title)
ax.set_title(title)
ax.axis('tight')
ax.xaxis.set_tick_params(labelsize='medium')
ax.yaxis.set_tick_params(labelsize='medium')
ax.set_xlabel('Z (nm)', fontsize=16)
ax.set_ylabel('Density (M)', fontsize=16)
#ax.legend()
selected_index = np.random.randint(x_train.shape[0], size=1)[0]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))
plot_density_profile(ax, z_data_train[selected_index], y_train[selected_index], error_train[selected_index], label="ground truth", title=str(x_train[selected_index]), color='b')
plot_density_profile(ax, z_data_train[selected_index], predictions_train[selected_index], error_bars=None, label="prediction", title="", color='r')
ax.legend()
print(evaluate_one_simulation(predictions_train[selected_index], y_train[selected_index] , error_train[selected_index]))
#x_inverse = scaler.inverse_transform(X_test)
# + [markdown] id="Y2uZGOTBn00s"
# ## Integrated Charge Testing samples
# + colab={"base_uri": "https://localhost:8080/", "height": 508} id="YJaMumz6n0TE" outputId="1436e7c1-c343-4477-a974-5cae531be9c0"
def get_integrated_charge(density_pos, density_neg, clen_value, charge_on_wall_value):
bin_width_nm = abs(density_pos[0,0] - density_pos[1,0])
xx = density_pos[:,0] + clen_value/2
# integrated charge
yy_temp = (density_pos[:,1] - density_neg[:,1])*bin_width_nm*0.096
yy = []
charge_sum = charge_on_wall_value
for yy_temp_val in yy_temp:
charge_sum += yy_temp_val
yy.append(charge_sum)
yy = np.array(yy)
#Take half of the integrated charge profile
xx_half = xx[0:xx.shape[0]//2:1]
yy_half = yy[0:xx.shape[0]//2:1]
if density_pos.shape[1] == 3:
yyrr = np.sqrt(density_pos[:,2]**2 + density_neg[:,2]**2)*bin_width_nm*0.096
yyrr_half = yyrr[0:xx.shape[0]//2:1]
integrated_charge = np.column_stack((xx_half,yy_half,yyrr_half))
else:
integrated_charge = np.column_stack((xx_half,yy_half))
return integrated_charge
selected_index = np.random.randint(x_test.shape[0], size=1)[0]
label_ = x_test[selected_index]
#label_ = 'Z_4.2_p_1_n_-1_d_0.63_a_0.3075_c_1.0_i_-0.02'
#label_ = "Z_5_p_1_n_-1_d_0.2_a_0.63_c_2.0_i_-0.01"
print(label_)
z_values = z_data_test[selected_index].reshape(2,NUM_OF_BINS)
density = y_test[selected_index].reshape(2,NUM_OF_BINS)
errors_ = error_test[selected_index].reshape(2,NUM_OF_BINS)
density_pos = np.column_stack((z_values[0,:],density[0,:],errors_[0,:]))
density_neg = np.column_stack((z_values[1,:],density[1,:],errors_[1,:]))
clen_value = float(label_[0])
charge_on_wall_value = float(label_[-1])
#charge_on_wall_value = processed_all_data_preprocessed[label_]['surf_charge']
integrated_charge = get_integrated_charge(density_pos, density_neg, clen_value, charge_on_wall_value)
plt.figure('Distribution of integrated charge')
plt.title(str(label_))
plt.xlabel('Distance from left surface z + h/2 (nm)', fontsize=10)
plt.ylabel('Integrated charge S (C/m$^2$)', fontsize=10)
plt.errorbar(integrated_charge[:,0],integrated_charge[:,1],xerr=0.0, yerr=integrated_charge[:,2], c='b', ecolor='r', capsize=2, label="ground truth")
#------------------------------------------------------------
z_values = z_data_test[selected_index].reshape(2,NUM_OF_BINS)
density = predictions[selected_index].reshape(2,NUM_OF_BINS)
errors_ = error_test[selected_index].reshape(2,NUM_OF_BINS)
density_pos = np.column_stack((z_values[0,:],density[0,:],errors_[0,:]))
density_neg = np.column_stack((z_values[1,:],density[1,:],errors_[1,:]))
integrated_charge = get_integrated_charge(density_pos, density_neg, clen_value, charge_on_wall_value)
plt.plot(integrated_charge[:,0],integrated_charge[:,1], c='green', label="Prediction")
#print(np.column_stack((xx_half,yy_half)))
plt.legend()
# + [markdown] id="NNxFflZOkq3U"
# ## Integrated Charge Training samples
# + colab={"base_uri": "https://localhost:8080/", "height": 508} id="jveykH5D8xUK" outputId="38adb0ba-9626-4dba-9c3c-bcf0798d88d5"
selected_index = np.random.randint(x_train.shape[0], size=1)[0]
label_ = x_train[selected_index]
#label_ = 'Z_4.2_p_1_n_-1_d_0.63_a_0.3075_c_1.0_i_-0.02'
#label_ = "Z_5_p_1_n_-1_d_0.2_a_0.63_c_2.0_i_-0.01"
print(label_)
z_values = z_data_train[selected_index].reshape(2,NUM_OF_BINS)
density = y_train[selected_index].reshape(2,NUM_OF_BINS)
errors_ = error_train[selected_index].reshape(2,NUM_OF_BINS)
density_pos = np.column_stack((z_values[0,:],density[0,:],errors_[0,:]))
density_neg = np.column_stack((z_values[1,:],density[1,:],errors_[1,:]))
clen_value = float(label_[0])
charge_on_wall_value = float(label_[-1])
#charge_on_wall_value = processed_all_data_preprocessed[label_]['surf_charge']
integrated_charge = get_integrated_charge(density_pos, density_neg, clen_value, charge_on_wall_value)
plt.figure('Distribution of integrated charge')
plt.title(str(label_))
plt.xlabel('Distance from left surface z + h/2 (nm)', fontsize=10)
plt.ylabel('Integrated charge S (C/m$^2$)', fontsize=10)
plt.errorbar(integrated_charge[:,0],integrated_charge[:,1],xerr=0.0, yerr=integrated_charge[:,2], c='b', ecolor='r', capsize=2, label="ground truth")
#------------------------------------------------------------
z_values = z_data_train[selected_index].reshape(2,NUM_OF_BINS)
density = predictions_train[selected_index].reshape(2,NUM_OF_BINS)
errors_ = error_train[selected_index].reshape(2,NUM_OF_BINS)
density_pos = np.column_stack((z_values[0,:],density[0,:],errors_[0,:]))
density_neg = np.column_stack((z_values[1,:],density[1,:],errors_[1,:]))
integrated_charge = get_integrated_charge(density_pos, density_neg, clen_value, charge_on_wall_value)
plt.plot(integrated_charge[:,0],integrated_charge[:,1], c='green', label="Prediction")
#print(np.column_stack((xx_half,yy_half)))
plt.legend()
# + [markdown] id="rFlllSC8MD1u"
# ## Predict everything from the input parameters
# + colab={"base_uri": "https://localhost:8080/"} id="CtCGOVLjMMkn" outputId="7aa45112-ea35-40c0-ac4d-af2bf3eda205"
'''
def get_bin_values_for_input(salt_value, pion_value, nion_value, clen_value, ion_diam_value_pos, ion_diam_value_neg, charge_on_wall_value):
bin_width = 0.05
unit_len = min(ion_diam_value_pos, ion_diam_value_neg)
lz = clen_value/unit_len
number_of_bins = int(lz // bin_width)# floor
#bin_width = lz / number_of_bins
most_right_bin = bin_width/2.0
while (most_right_bin + bin_width/2.0) <= (lz/2+0.00001):
most_right_bin = np.round(most_right_bin + bin_width, 6)
most_left_bin = -most_right_bin
start =most_left_bin#-2.505
stop = most_right_bin#2.505
z_values = np.linspace(start, stop, num=502, endpoint=True)
z_values = np.append(z_values,z_values)*unit_len
return z_values
selected_index=np.random.randint(x_train.shape[0], size=1)[0]#1478##2#10#52#2914
print(selected_index, x_train[selected_index])
salt_value = 0.1
pion_value = 1
nion_value = -1
clen_value = 4.4
ion_diam_value_pos = 0.63
ion_diam_value_neg = 0.5225
charge_on_wall_value = -0.015
salt_value = float(x_train[selected_index][5])
pion_value = float(x_train[selected_index][1])
nion_value = float(x_train[selected_index][2])
clen_value = float(x_train[selected_index][0])
ion_diam_value_pos = float(x_train[selected_index][3])
ion_diam_value_neg = float(x_train[selected_index][4])
charge_on_wall_value = float(x_train[selected_index][6])
z_values = get_bin_values_for_input(salt_value, pion_value, nion_value, clen_value, ion_diam_value_pos, ion_diam_value_neg, charge_on_wall_value)
print(z_values)
np.sum(abs(z_data_train[selected_index] - z_values))
'''
# + colab={"base_uri": "https://localhost:8080/", "height": 941} id="4l8DUvXy_zs8" outputId="f2b3e05f-da7e-4c14-d23f-3f10334f4b41"
def get_bin_values_for_input(salt_value, pion_value, nion_value, clen_value, ion_diam_value_pos, ion_diam_value_neg, charge_on_wall_value):
bin_width = 0.05
unit_len = min(ion_diam_value_pos, ion_diam_value_neg)
lz = clen_value/unit_len
number_of_bins = int(lz // bin_width)# floor
#bin_width = lz / number_of_bins
most_right_bin = bin_width/2.0
while (most_right_bin + bin_width/2.0) <= (lz/2+0.00001):
most_right_bin = np.round(most_right_bin + bin_width, 6)
most_left_bin = -most_right_bin
start =most_left_bin#-2.505
stop = most_right_bin#2.505
z_values = np.linspace(start, stop, num=502, endpoint=True)
z_values = np.append(z_values,z_values)*unit_len
return z_values
def plot_density_profile(ax, z_values, density, label=None, title=None):
z_values = z_values.reshape(2,NUM_OF_BINS)
density = density.reshape(2,NUM_OF_BINS)
ax.plot(z_values[0,:], density[0,:], label=str(label) + "-Pos", color='b')
ax.plot(z_values[1,:], density[1,:], label=str(label) + "-Neg", color='r')
print(title)
ax.set_title(title)
ax.axis('tight')
ax.xaxis.set_tick_params(labelsize='medium')
ax.yaxis.set_tick_params(labelsize='medium')
ax.set_xlabel('Z (nm)', fontsize=16)
ax.set_ylabel('Density (M)', fontsize=16)
#ax.legend()
def get_integrated_charge(density_pos, density_neg, clen_value, charge_on_wall_value):
bin_width_nm = abs(density_pos[0,0] - density_pos[1,0])
xx = density_pos[:,0] + clen_value/2
# integrated charge
yy_temp = (density_pos[:,1] - density_neg[:,1])*bin_width_nm*0.096
yy = []
charge_sum = charge_on_wall_value
for yy_temp_val in yy_temp:
charge_sum += yy_temp_val
yy.append(charge_sum)
yy = np.array(yy)
#Take half of the integrated charge profile
xx_half = xx[0:xx.shape[0]//2:1]
yy_half = yy[0:xx.shape[0]//2:1]
if density_pos.shape[1] == 3:
yyrr = np.sqrt(density_pos[:,2]**2 + density_neg[:,2]**2)*bin_width_nm*0.096
yyrr_half = yyrr[0:xx.shape[0]//2:1]
integrated_charge = np.column_stack((xx_half,yy_half,yyrr_half))
else:
integrated_charge = np.column_stack((xx_half,yy_half))
return integrated_charge
###############--------------------------------------------------
salt_value = 2.0
pion_value = 1
nion_value = -1
clen_value = 4.4
ion_diam_value_pos = 0.63
ion_diam_value_neg = 0.5225
charge_on_wall_value = -0.02
z_values = get_bin_values_for_input(salt_value, pion_value, nion_value, clen_value, ion_diam_value_pos, ion_diam_value_neg, charge_on_wall_value)
input_parms = np.array([salt_value, pion_value, nion_value, clen_value, ion_diam_value_pos, ion_diam_value_neg, charge_on_wall_value]).reshape(1, -1)
error_train = np.zeros(shape = z_values.shape).reshape(1, -1)
predictions_new = new_model.predict([scaler.transform(input_parms), error_train])
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))
plot_density_profile(ax, z_values, predictions_new, label="prediction", title=input_parms)
ax.legend()
############-----------------------------------------
#charge_on_wall_value = processed_all_data_preprocessed[label_]['surf_charge']
z_values_ = z_values.reshape(2,NUM_OF_BINS)
density = predictions_new.reshape(2,NUM_OF_BINS)
density_pos = np.column_stack((z_values_[0,:],density[0,:]))
density_neg = np.column_stack((z_values_[1,:],density[1,:]))
integrated_charge = get_integrated_charge(density_pos, density_neg, clen_value, charge_on_wall_value)
plt.figure('Distribution of integrated charge')
plt.xlabel('Distance from left surface z + h/2 (nm)', fontsize=10)
plt.ylabel('Integrated charge S (C/m$^2$)', fontsize=10)
plt.plot(integrated_charge[:,0],integrated_charge[:,1], c='b', label="ground truth")
| python/surrogate2/Ions_surrogate_training_modified_loss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# language: python
# name: python3
# ---
# ## **Curso Intro Python**
#
# ##### Editor: <NAME>
# ##### Fecha: 07/02/2022
# ### Módulo 3: Usar lógica booleana
#
# #### **Problema 1:**
# Se solicita hacer un programa que determine si es necesario enviar una advertencia por el peligro de que un asteroide se acerca a una velocidad muy rápida. Sería peligroso si la velocidad es mayor a 25 km/s.
# +
# Lo primero que se debe hacer es leer la velocidad del asteroide para determinar
# si enviar o no enviar advertencia a la gente en la Tierra.
# Step 1: Declarar la variable de la velocidad del asteroide
velAsteroide = 49
#velAsteroide = float(input("Oficial ingrese la velocidad del asteroide según sus cálculos en km/s."))
# Step 2: Determinar si enviar la advertencia de acuerdo a si la velocidad del asteroide
# es mayor que 25 km/s.
if (velAsteroide > 25):
print("¡Houston tenemos problemas!, un asteroide con una velocidad de", velAsteroide, "km/s se dirige muy rápido a Tierra.")
else:
print("La velocidad del asteroide de", velAsteroide, "km/s no afectará la Tierra.")
# -
# #### **Problema 2:**
# Se solicita determinar si enviar un mensaje a la gente para que vea el rayo de luz cuando un asteroide va a una velocidad mayor o igual a 20 km/s.
#
# +
# Step 1: Declarar la variable de la velocidad del asteroide
velAsteroide = 19
#velAsteroide = float(input("Oficial, ingrese la velocidad del asteroide en km/s"))
# Step 2: Determinar si enviar el mensaje para que vean hacia el cielo el rayo de luz
# producido por el paso del asteroide a una velocidad mayor o igual a 20 km/s.
if (velAsteroide >= 20):
print("Gente en la Tierra miren hacia el cielo, un asteroide esta viajando a una velocidad de ", velAsteroide, "km/s.\n ¡¡¡Es fántastico!!!!")
else:
print("Por el momento no hay asteroides que observar en el cielo.")
# -
# #### **Problema 3:**
# Ahora se solicita crear un programa que indique si el asteroide es problema para la Tierra de acuerdo a su tamaño y velocidad.
# * Se considera que si el tamaño del asteroide es mayor que 25 pero menor que 1000 metros es peligroso.
# * Si la velocidad del asteroide es mayor que 25 km/s es peligroso y se debe enviar una advertencia.
# * Si el asteroide entra a la atmósfera y su velocidad es mayor o igual a 20 km/s, se debe indicar a la gente que pueden mirar hacia el cielo para ver el rayo de luz que se emite por su paso.
#
# +
# Step 1: Solicitar el tamaño y la velocidad del asteroide
tamanoAsteroide = float(input("Oficial, ingrese el tamaño del asteroide en metros [m]."))
print("Tamaño del asteroide:", tamanoAsteroide, "[m]")
velAsteroide = float(input("Oficial, ingrese la velocidad asteroide en kilómetros por segundo [km/s]."))
print("Velocidad del asteroide:", velAsteroide, "[km/s]")
# Step 2: Determinar si el tamaño y la velocidad del asteroide son peligro o no para la Tierra.
if (25 < tamanoAsteroide < 1000) and (velAsteroide > 25):
print("¡¡¡Peligro!!!!\n Un asteroide de", tamanoAsteroide, "m se acerca muy rápido a la Tierra con una velocidad de", velAsteroide, "km/s.")
elif(tamanoAsteroide < 25):
print("El asteroide de tamaño", tamanoAsteroide, "m no es posible verlo, no voltees al cielo.")
elif(velAsteroide >= 20):
print("El asteroide de tamaño", tamanoAsteroide, "m es posible verlo, ya que va a una velocidad de", velAsteroide, "km/s", "\n ¡¡Mira hacia el cielo es fantástico!!")
else:
print("El asteroide por el momento no es un peligro.")
| Module_3/Module_3_kata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# # Load Packages
# +
import sys
sys.path.append('..')
import matplotlib.pyplot as plt
# %matplotlib inline
from numpy_fracdiff import fracdiff
import numpy as np
from statsmodels.tsa.stattools import adfuller
import scipy.optimize
# -
# # Load Data
with np.load('data/demo1.npz') as data:
X = data['px']
#t = data['t']
X = X[1:] # chop 01-Jan
len(X)
# # Example
# Transform all $X$ time series with `fracdiff` by the fractal order $d=0.3$.
# Truncate at 100 (i.e. chop the first 100 NANs too).
Z = fracdiff(X, order=0.3, truncation=100)
#np.isnan(Z[100:]).sum(axis=0)
Z = Z[100:]
# Run the ADF test on all 0.3 fractal differentiated times series
for j in range(4):
adf, pval, _, _, _, _ = adfuller(Z[:, j], regression='c', autolag='BIC')
print("p-values: {:5.4f} | ADF: {:>6.3f}".format(pval, adf))
# # Backtracking
# For $d=1$ we usually get a stationary time series transform.
# Thus, let start at $d=1$ and reduce towards $d=0$,
# and stop when the p-value exceeds the threshold $\alpha=0.01$.
# +
# %%time
x = X[:, 0] # pick the 1st time series
n_steps = 30
order = 1
n_trunc = 100
alpha = 0.001
bestorder = order
for order in np.flip(np.arange(n_steps) / n_steps):
z = fracdiff(x, order=order, truncation=n_trunc)
stat, pval, _, _, crit, _ = adfuller(z[n_trunc:], regression='c', autolag='BIC')
print("d: {:5.4f} | DF:{: 7.4f} | crit:{: 7.4f} | p-val: {:1.2E}".format(
order, stat, crit['1%'], pval))
if (stat < crit['1%']) and (pval < alpha):
bestorder = order
else:
#break
pass
print(f"best d={bestorder}")
# -
# # Bisection
# We will use difference between the ADF test p-value and required threshold $\alpha$.
# The bisections requires the sign of this differences.
def loss_fn(d: float, alpha: float, x: np.array, n_trunc: int) -> float:
z = fracdiff(x, order=d, truncation=n_trunc)
stat, pval, _, _, crit, _ = adfuller(z[n_trunc:], regression='c', autolag='BIC')
return stat - (crit['1%'] * 1.0001) #+ (alpha - pval)
loss_fn(0, alpha, x, n_trunc), loss_fn(1, alpha, x, n_trunc)
# Also note, that the `xtol` parameter doesn't need to be super precise.
# We will abort if the p-value is 1% away from $\alpha$, i.e. `xtol=alpha*.01`
# +
x = X[:, 0] # pick the 1st time series
n_trunc = 100
alpha = 0.01
# %time d = scipy.optimize.bisect(loss_fn, 0.01, 1.5, args=(alpha, x, n_trunc), xtol=1e-04)
d
# -
# The Ridder method is faster than the bisection method.
# +
x = X[:, 0] # pick the 1st time series
n_trunc = 100
alpha = 0.01
# %time d = scipy.optimize.ridder(loss_fn, 0.01, 1.5, args=(alpha, x, n_trunc), xtol=1e-04)
d
# +
x = X[:, 0] # pick the 1st time series
n_trunc = 100
alpha = 0.01
# %time d = scipy.optimize.brenth(loss_fn, 0.01, 1.5, args=(alpha, x, n_trunc), xtol=1e-04)
d
# +
x = X[:, 0] # pick the 1st time series
n_trunc = 100
alpha = 0.01
# %time d = scipy.optimize.brentq(loss_fn, 0.01, 1.5, args=(alpha, x, n_trunc), xtol=1e-04)
d
# -
z = fracdiff(x, order=d, truncation=n_trunc)
stat, pval, _, _, crit, _ = adfuller(z[n_trunc:], regression='c', autolag='BIC')
print("d: {:5.4f} | DF:{: 7.4f} | crit:{: 7.4f} | p-val: {:1.2E}".format(
d, stat, crit['1%'], pval))
# # Squared Errors
# We will use the squared difference betweent the ADF test p-value and required threshold $\alpha$ as target function for a minimization problem.
#
# $$
# \min_d \; ({\rm DF(d) - crit})^2
# $$
def loss_fn(d: float, x: np.array, n_trunc: int) -> float:
z = fracdiff(x, order=d, truncation=n_trunc)
stat, pval, _, _, crit, _ = adfuller(z[n_trunc:], regression='c', autolag='BIC')
return (stat - crit['1%'])**2
# The newton method is kind of unstable depending on the start value `x0` (e.g. 0.0 and 1.0 will fail)
# +
x = X[:, 0] # pick the 1st time series
n_trunc = 100
alpha = 0.01
# %time d = scipy.optimize.newton(loss_fn, 0.5, args=(x, n_trunc), tol=(alpha*.01)**2, maxiter=500)
d
# -
def loss_fn2(d: float, x: np.array, n_trunc: int) -> float:
z = fracdiff(x, order=d, truncation=n_trunc)
stat, pval, _, _, crit, _ = adfuller(z[n_trunc:], regression='c', autolag='BIC')
return (stat - crit['1%'])**2 + pval**2
# +
x = X[:, 0] # pick the 1st time series
n_trunc = 100
alpha = 0.01
# %time d = scipy.optimize.fminbound(loss_fn, 0.01, 1.5, args=(x, n_trunc), xtol=1e-04, maxfun=200)
print(d)
# %time d = scipy.optimize.fminbound(loss_fn2, 0.01, 1.5, args=(x, n_trunc), xtol=1e-04, maxfun=200)
print(d)
# -
z = fracdiff(x, order=d, truncation=n_trunc)
stat, pval, _, _, crit, _ = adfuller(z[n_trunc:], regression='c', autolag='BIC')
print("d: {:5.4f} | DF:{: 7.4f} | crit:{: 7.4f} | p-val: {:1.2E}".format(
d, stat, crit['1%'], pval))
| examples/Select an Algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "435c2fca-5095-49c4-8c99-c462a84daba0", "showTitle": false, "title": ""}
#visualizando datasets
display(dbutils.fs.ls("/databricks-datasets"))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2327886e-1daf-4842-95ba-c280fc0e29a7", "showTitle": false, "title": ""}
#lendo o arquivo
arquivo="dbfs:/databricks-datasets/flights/"
df=spark\
.read.format("csv")\
.option("inferSchema","True")\
.option("header","True")\
.csv(arquivo)
df.printSchema()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3c344846-ba77-4397-ba76-39f6bbbeb92c", "showTitle": false, "title": ""}
#tipo do arquivo
type(df)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f7979814-1395-4b4c-9650-9451e845f682", "showTitle": false, "title": ""}
# retorna as 3 primeiras linhas
df.take(5)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "34ce8234-eebe-4ee4-80b5-2fd3ee1ad9d7", "showTitle": false, "title": ""}
#usando o comando display
display(df.show(3))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1ecf2bdb-5ca0-424d-99ec-b5ec89eb3abd", "showTitle": false, "title": ""}
#quantidade de linhas
df.count()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0af51d2d-5759-429e-ae2d-c8e7f4d8c1e4", "showTitle": true, "title": "Consultando dados do dataframe"}
from pyspark.sql.functions import max
#maior atraso
df.select(max("delay")).take(1)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d9590ea5-704e-4cc7-9dbe-65b2ddcd410c", "showTitle": false, "title": ""}
#delay menor que dois
df.filter("delay < 2").show(2)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8fdf2b06-21be-4cd1-b900-4e366461fd9e", "showTitle": false, "title": ""}
#filtrando delay menor que dois
df.where("delay < 2").show(2)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4c94af67-c6b4-4d4c-8faf-687521b72fd5", "showTitle": false, "title": ""}
#ordenando pela coluna
df.sort("delay").show(5)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7330ebbf-02c3-4306-91d2-f3a10643c5a1", "showTitle": false, "title": ""}
from pyspark.sql.functions import desc,asc,expr
#ordenado por ordem crescente
df.orderBy(expr("delay desc")).show(10)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fecf44a6-aa0c-4b71-b26c-466517d39f1d", "showTitle": false, "title": ""}
#estatistica descritiva
df.describe().show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "01476749-7406-44eb-9417-36615fab4cd9", "showTitle": false, "title": ""}
#interando sobre todas as linhas do dataframe
'''for i in df.collect():
print(i)
print(i[0],i[1]*2)'''
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "63ec9d09-7ff4-4dcd-bf13-0b24a3b212df", "showTitle": false, "title": ""}
#adicionando nova coluna no dataframe
df=df.withColumn("Nova Coluna",df['delay']+2)
df.show(10)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "988b2c5b-a328-46b6-b5a0-0590ae7e6630", "showTitle": false, "title": ""}
#removendo coluna
df=df.drop("Nova coluna")
df.show(10)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4ece1f51-adc9-49e0-b821-778a369f9a61", "showTitle": false, "title": ""}
#adicionando coluna
df=df.withColumn("Nova coluna",df['delay']+2)
#renomeando colunas
df.withColumnRenamed("Nova coluna","New column").show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ee8e3c95-9218-4e22-92d0-581ed56ce946", "showTitle": true, "title": "Trabalhando com Missing values"}
df.filter("delay is NULL").show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f51235c8-fc46-40bb-a54b-fe6f275ef7c6", "showTitle": false, "title": ""}
#filtrando valores missing
df.filter(df.delay.isNull()).show(10)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ab02c958-8b78-415c-85e6-1e1b5f788455", "showTitle": true, "title": "Preenchendo dados Missing"}
#preenchendo com valores 0
df.na.fill(value=0).show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "12c53e09-fc3f-4b06-8d77-c04b750019b8", "showTitle": false, "title": ""}
#prenchendo valores missing com valor 0 apenas da coluna delay
df.na.fill(value=0,subset=['delay']).show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d6786098-ae2b-44cf-b95c-a4459b1748fd", "showTitle": false, "title": ""}
#preenchendo os dados com valores de string vazia
df.na.fill("").show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b06af2c2-3d5c-4a11-81bb-6ee753caedad", "showTitle": false, "title": ""}
#removendo qualquer linha nula de qualquer coluna
df.na.drop().show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5d8d9faa-b959-44ac-8aa1-ff11aa17eb9b", "showTitle": false, "title": ""}
| Spark basic/notebooks/Spark basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=[]
import os
import re
import numpy as np
import scipy.io as sio
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker
import sys
# %matplotlib inline
import pandas as pd
import seaborn as sns
import xgboost as xgb
import time
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from aesthetics import params
mpl.rcParams.update(params)
plt.rcParams.update(params)
# + tags=[]
# load data
X = pd.read_pickle("../data/application3/user_age_X.pkl")
Y = pd.read_pickle("../data/application3/user_age_Y.pkl")
# select users that has at least 3 labels
index = np.sum(X == 1, axis=1) >=5
X = X[index]
Y = Y[index]
# + tags=[]
def age_to_labels(age, step=2):
upper = 50 + step
bins = np.arange(20, upper, step)
if age < 20:
return "20-"
elif age >= upper - step:
return "{}+".format(str(upper-step))
else:
index = np.argmax((bins - age)>0)
return "{}-{}".format(bins[index-1], bins[index])
# initialize the groups
y = Y.gender + Y.age.apply(age_to_labels)
groups, counts = np.unique(y, return_counts=True)
print("There are {} age groups in total".format(groups.shape[0]))
label_encoder = LabelEncoder()
label_encoder.fit(y)
y_ = label_encoder.transform(y)
# +
def evaluate_model_cv(X, y, measures, mapping,
params, niter=10, nfold=5, random_state=0):
"""
Evaluate the measures given a mapping.
"""
y_dist = compute_y_dist(y)
skf = StratifiedKFold(n_splits=nfold)
skf.get_n_splits(X, y)
K_ = len(mapping.inverse)
print("K'={}".format(K_))
params['num_class'] = K_
res = {measure:[] for measure in measures}
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
data_train = xgb.DMatrix(X_train, mapping.map(y_train))
data_test = xgb.DMatrix(X_test, mapping.map(y_test))
watchlist = [(data_train, 'train'), (data_test, 'eval')]
gbm = xgb.train(params, data_train, niter, evals=watchlist,
verbose_eval=niter)
y_pred = np.argmax(gbm.predict(data_test), axis=1)
for measure in measures:
if measure == "ITCA":
res[measure].append(measures[measure](y_test, y_pred, mapping, y_dist=y_dist))
else:
res[measure].append(measures[measure](mapping.map(y_test), y_pred))
return res
def greedy_search_cv(X, y, measures, params,
label_encoder, nfold=5, output="cv_check_point.pkl", niter=10):
n_classes = np.unique(y).shape[0]
cur_mapping = bidict({i:i for i in range(n_classes)})
path = {key: [] for key in measures}
path["mapping"] = []
# compute current measures
path["mapping"].append(cur_mapping)
y_ = label_encoder.transform(y)
res = evaluate_model_cv(X, y_, measures,
cur_mapping, params, niter=niter, nfold=nfold)
for measure in measures:
path[measure].append(res[measure])
while len(cur_mapping.inverse) > 2:
# generate next mapping
acc_l = []
itca_l = []
mapping_l = []
now = datetime.datetime.now()
print("========={}=============".format(now.strftime("%Y-%m-%d %H:%M:%S")))
print("Start search....")
for mapping_k in next_mapping(cur_mapping, label_encoder):
res = evaluate_model_cv(X, y_, measures,
mapping_k, params, niter=niter, nfold=nfold)
mapping_l.append(mapping_k)
acc_l.append(res["ACC"])
itca_l.append(res["ITCA"])
idx_max = np.argmax([np.mean(l) for l in itca_l])
# save the results to path
path["mapping"].append(mapping_l[idx_max])
path["ITCA"].append(itca_l[idx_max])
path["ACC"].append(acc_l[idx_max])
cur_mapping = mapping_l[idx_max]
# save results at each iteration
with open(output, 'wb') as handle:
check_point = dict()
check_point["mapping"] = [dict(m) for m in path["mapping"]]
check_point["ITCA"] = path["ITCA"]
check_point["ACC"] = path["ACC"]
pickle.dump(check_point, handle,
protocol=pickle.HIGHEST_PROTOCOL)
now = datetime.datetime.now()
print("========={}=============".format(now.strftime("%Y-%m-%d %H:%M:%S")))
print("Round={}||Write in {}".format(34 - len(cur_mapping.inverse), output))
return path
# -
# It takes >20 hours on a destop
path = greedy_search_cv(X.to_numpy(), y.to_numpy(), measures,
params, label_encoder, nfold=5, output="../data/application3/search_cv_check_point", niter=300)
# ## Figure 8: Results on TalkingData mobile user demographics dataset using XGBoost.
# + tags=[]
mapping = bidict(check_point["mapping"][-5])
for key in mapping.inverse:
age_group = label_encoder.inverse_transform(mapping.inverse[key])
print(age_group)
with open('../data/application3/search_cv_check_point', 'rb') as handle:
check_point = pickle.load(handle)
itca_mean = np.array([ np.mean(l) for l in check_point["ITCA"]])
itca_std = np.array([ np.std(l) for l in check_point["ITCA"]])
acc_mean = np.array([np.mean(l) for l in check_point["ACC"]])
acc_std = np.array([np.std(l) for l in check_point["ACC"]])
ind_max = np.argmax(itca_mean)
# + tags=[]
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(2, 2, width_ratios=[2, 3])
figure = plt.figure()
ax = plt.subplot(gs[:, 0 ]) # row 0, col 0
curve1, = ax.plot(itca_mean, label="ITCA", color="#F20708",
marker="o", linestyle="--", alpha=0.75)
factor = 1 / np.sqrt(5)
ax.fill_between(np.arange(itca_mean.size), itca_mean - itca_std * factor, itca_mean + itca_std * factor,
alpha=0.2, color="#F20708")
ax2 = ax.twinx()
curve2, = ax2.plot(acc_mean, label="ACC", color="#3C8ABE",
marker="v", linestyle="--", alpha=0.75)
ax2.fill_between(np.arange(acc_mean.size), acc_mean - acc_std * factor, acc_mean + acc_std * factor,
alpha=0.2, color="#3C8ABE")
curves = [curve1, curve2]
ax.axvline(x=ind_max, linestyle="--", color="#F20708", alpha=0.5)
ax2.set_xticks([0, 10, 20, 30])
ax2.set_xticklabels([34, 24, 14, 4])
ax.set_xlabel("$K$")
ax2.set_ylabel("ACC")
ax.set_ylabel("ITCA")
ax.legend(curves, [curve.get_label() for curve in curves])
ax3 = plt.subplot(gs[0, 1])
ax3.hist(age_male, np.arange(16, 80, 2), edgecolor='white', color="#4C92C3", label="Male")
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
# ax3.set_title("Male")
ax3.set_ylim([0, 1800])
ax3.legend()
#
ax4 = plt.subplot(gs[1, 1])
ax4.hist(age_female, np.arange(16, 80, 2), edgecolor='white', color="#FF983E", label="Female")
ax4.spines['right'].set_visible(False)
ax4.spines['top'].set_visible(False)
# ax4.set_title("Female")
ax4.legend()
ax4.set_ylim([0, 1600])
figure.set_size_inches(7, 3.25)
plt.tight_layout()
| notebooks/application3_prediction_of _user_demographics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# fMRI behavioral performance analysis of Learning Brain data
# --------------------------------------------------------------
#
# The code below allows to visualize and analize data from 6-week working memory training study. Participants were scanned four times while performing dual n-back (Jaeggi et al., 2018).
#
# Three performance measures were calculated:
#
# - accuracy
# - d-prime (dprime)
# - penallized reaction time (prt)
#
# <NAME> | Centre for Modern Interdisciplinary Technologies
#
# Last edited: 29.05.2017
# +
# Loading packages
library(psych)
library(tidyverse)
library(data.table)
library(nlme)
library(broom)
library(ez)
# Setting working directory
setwd("~/Dropbox/Projects/LearningBrain/")
# Loading data
performance <- read.csv("data/behavioral/LB_fmri_behaviour_tidy.csv")
performance$Condition <- factor(performance$Condition, labels = c('1-back', '2-back'))
performance$Group <- factor(performance$Group, levels = c('Experimental', 'Control'))
# Checking data
glimpse(performance)
summary(performance)
# -
#customizing theme for plotting
theme_training <- theme_bw() + theme(axis.text.y = element_text(size=25, colour='#262626ff'),
axis.text.x = element_text(size=25, colour='#262626ff'),
axis.title.y = element_text(size=25, colour='#262626ff'),
axis.title.x = element_text(size=25, colour='#262626ff'),
plot.title = element_text(hjust=0.5, size=25),
#panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour="#262626ff"),
panel.border = element_rect(colour = "#262626ff", fill=NA, size=1.8),
panel.background = element_rect(fill="transparent",colour=NA),
plot.background = element_rect(fill="transparent",colour=NA),
legend.key = element_rect(fill= "transparent", colour="transparent"),
strip.background =element_rect(fill="transparent", colour=NA),
strip.text = element_text(size=25),
axis.ticks = element_line(colour="#262626ff", size=1, 2),
axis.ticks.length = unit(.15, "cm"),
aspect.ratio = 1,
#legend.text=element_text(size=25)
)
# +
performance$Session <- as.factor(performance$Session)
performance$Session <- factor(performance$Session, labels = c('Naive', 'Early', 'Middle', 'Late'))
beh_mean <- performance %>%
group_by(Subject, Session, Condition, Group) %>%
summarize(pRT = mean(pRT), Dprime = mean(Dprime), Accuracy = mean(Accuracy)) #%>% ezANOVA(prt, within = c(session, n), between = group, wid = sub, detailed = TRUE)
write.csv(beh_mean, 'data/behavioral/LB_fmri_behaviour_mean_tidy.csv', row.names = FALSE)
# -
# Multilevel modelling (MLM) -- groups separately
# ===================================================
experimental <- beh_mean %>% filter(Group == 'Experimental')
control <- beh_mean %>% filter(Group == 'Control')
# +
# testing training progress for experimental group
baseline <- lme(pRT ~ 1, random = ~ 1 |Subject/Session/Condition, data = experimental, method = 'ML', control = list(opt = "optim"))
session <- update(baseline, .~. + Session)
condition <- update(session, .~. + Condition)
session_condition <- update(condition, .~. + Session:Condition)
#session_condition_quadr <- update(session_condition, .~. + I(Session^2))
anova(baseline, session, condition, session_condition)
# -
summary(session_condition)
# +
# testing training progress for experimental group
baseline <- lme(pRT ~ 1, random = ~ 1 |Subject/Session/Condition, data = control, method = 'ML', control = list(opt = "optim"))
session <- update(baseline, .~. + Session)
condition <- update(session, .~. + Condition)
session_condition <- update(condition, .~. + Session:Condition)
#session_condition_quadr <- update(session_condition, .~. + I(Session^2))
anova(baseline, session, condition, session_condition)
# +
p <- ggplot(beh_mean, aes(x = Session, y = pRT, color = Condition)) +
stat_summary(fun.y = mean, geom = 'point', size = 3) +
stat_summary(fun.y = mean, geom = 'line', size = 1.2, aes(group = Condition)) +
stat_summary(fun.data = mean_cl_boot, geom = 'errorbar', width = 0.2, size = 1.2) +
#scale_colour_manual(values=c("#00ab72", "#f68791")) +
scale_colour_manual(values=c('#919649', '#fc766a')) +
theme_training +
ylab('pRT(s)') +
xlab('') +
facet_wrap('~Group')
p
ggsave("/home/finc/Dropbox/Projects/LearningBrain/Figures_NEW/Figure_NEW_pRT.pdf", plot = p, width = 12, height = 6, dpi = 300)
# -
# testing training progress for control group
baseline <- lme(Dprime ~ 1, random = ~ 1 |Subject/Session/Condition, data = experimental, method = 'ML', control = list(opt = "optim"))
session <- update(baseline, .~. + Session)
condition <- update(session, .~. + Condition)
session_condition <- update(condition, .~. + Session:Condition)
anova(baseline, session, condition, session_condition)
# testing training progress for control group
baseline <- lme(Dprime ~ 1, random = ~ 1 |Subject/Session/Condition, data = control, method = 'ML', control = list(opt = "optim"))
session <- update(baseline, .~. + Session)
condition <- update(session, .~. + Condition)
session_condition <- update(condition, .~. + Session:Condition)
anova(baseline, session, condition, session_condition)
# +
baseline <- lme(pRT ~ 1, random = ~ 1|Subject/Session/Condition, data = beh_mean, method = 'ML', control = list(opt = "optim"), na.action = na.exclude)
condition <- update(baseline, .~. + Condition)
session <- update(condition, .~. + Session)
group <- update(session, .~. + Group)
condition_session <- update(group, .~. + Condition:Session)
condition_group <- update(condition_session, .~. + Condition:Group)
session_group <- update(condition_group, .~. + Session:Group)
condition_session_group <- update(session_group, .~. + Condition:Session:Group)
anova(baseline, condition, session, group, condition_session, condition_group, session_group, condition_session_group)
# -
summary(condition_session_group)
# +
p <- beh_mean %>% filter(Session %in% c('Naive', 'Late')) %>%
ggplot(aes(x = Group, y = pRT, fill = Session)) +
geom_point(aes(col = Session), position=position_jitterdodge(dodge.width=0.9), alpha = 0.6, size = 4) +
#geom_jitter(aes(col = Session), alpha = 0.6, size = 4, position=position_dodge(width=0.8)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position=position_dodge(width=0.8), size = 1) +
scale_fill_manual(values=c('#daa03d', '#755841')) +
scale_color_manual(values=c('#daa03d', '#755841')) +
facet_wrap(~Condition) +
ylim(0.4, 2.5) +
ylab('pRT(s)') +
xlab(' ') +
theme_training
p
ggsave("/home/finc/Dropbox/Projects/LearningBrain/Figures_NEW/Figure_NEW_pRT_ttests.pdf", plot = p, width = 12, height = 6, dpi = 300)
# +
beh_mean_nl <- beh_mean %>% filter(Session %in% c('Naive', 'Late'))
exp1 <- beh_mean_nl %>% filter(Group == 'Experimental') %>% filter(Condition == '1-back')
exp2 <- beh_mean_nl %>% filter(Group == 'Experimental') %>% filter(Condition == '2-back')
con1 <- beh_mean_nl %>% filter(Group == 'Control') %>% filter(Condition == '1-back')
con2 <- beh_mean_nl %>% filter(Group == 'Control') %>% filter(Condition == '2-back')
# -
t.test(exp1$pRT ~ exp1$Session, paired = TRUE)
t.test(exp2$pRT ~ exp2$Session, paired = TRUE)
t.test(con1$pRT ~ con1$Session, paired = TRUE)
t.test(con2$pRT ~ con2$Session, paired = TRUE)
# +
t.test(exp2[exp2$Session == 'Naive', ]$pRT - exp2[exp2$Session == 'Late', ]$pRT,
con2[con2$Session == 'Naive', ]$pRT - con2[con2$Session == 'Late', ]$pRT, paired = FALSE)
t.test(exp1[exp1$Session == 'Naive', ]$pRT - exp1[exp1$Session == 'Late', ]$pRT,
con1[con1$Session == 'Naive', ]$pRT - con1[con1$Session == 'Late', ]$pRT, paired = FALSE)
# -
t.test(exp1[exp1$Session == 'Late', ]$pRT, exp2[exp2$Session == 'Late', ]$pRT, paired = TRUE)
beh_mean %>%
group_by(Session, Group, Condition) %>% filter(Session %in% c('Naive', 'Late'))%>%
summarize(mpRT = mean(pRT), mDprime = mean(Dprime), mAcc = mean(Accuracy))
# D-PRIME
# ----------------------------
# +
baseline <- lme(Dprime ~ 1, random = ~ 1|Subject/Session/Condition, data = beh_mean, method = 'ML', control = list(opt = "optim"), na.action = na.exclude)
condition <- update(baseline, .~. + Condition)
session <- update(condition, .~. + Session)
group <- update(session, .~. + Group)
condition_session <- update(group, .~. + Condition:Session)
condition_group <- update(condition_session, .~. + Condition:Group)
session_group <- update(condition_group, .~. + Session:Group)
condition_session_group <- update(session_group, .~. + Condition:Session:Group)
anova(baseline, condition, session, group, condition_session, condition_group, session_group, condition_session_group)
# +
p <- ggplot(beh_mean, aes(x = Session, y = Dprime, color = Condition)) +
stat_summary(fun.y = mean, geom = 'point', size = 3) +
stat_summary(fun.y = mean, geom = 'line', size = 1.2, aes(group = Condition)) +
stat_summary(fun.data = mean_cl_boot, geom = 'errorbar', width = 0.2, size = 1.2) +
#scale_colour_manual(values=c("#00ab72", "#f68791")) +
scale_colour_manual(values=c('#919649', '#fc766a')) +
theme_training +
ylab('D-prime') +
xlab('') +
facet_wrap('~Group')
p
ggsave("/home/finc/Dropbox/Projects/LearningBrain/Figures_NEW/Figure_NEW_dprime.pdf", plot = p, width = 12, height = 6, dpi = 300)
# -
t.test(exp1$Dprime ~ exp1$Session, paired = TRUE)
t.test(exp2$Dprime ~ exp2$Session, paired = TRUE)
t.test(con1$Dprime ~ con1$Session, paired = TRUE)
t.test(con2$Dprime ~ con2$Session, paired = TRUE)
# +
t.test(exp2[exp2$Session == 'Naive', ]$Dprime - exp2[exp2$Session == 'Late', ]$Dprime,
con2[con2$Session == 'Naive', ]$Dprime - con2[con2$Session == 'Late', ]$Dprime, paired = FALSE)
t.test(exp1[exp1$Session == 'Naive', ]$Dprime - exp1[exp1$Session == 'Late', ]$Dprime,
con1[con1$Session == 'Naive', ]$Dprime - con1[con1$Session == 'Late', ]$Dprime, paired = FALSE)
# +
p <- beh_mean %>% filter(Session %in% c('Naive', 'Late')) %>%
ggplot(aes(x = Group, y = Dprime, fill = Session)) +
geom_point(aes(col = Session), position=position_jitterdodge(dodge.width=0.9), alpha = 0.6, size = 4) +
#geom_jitter(aes(col = Session), alpha = 0.6, size = 4, position=position_dodge(width=0.8)) +
geom_boxplot(alpha = 0.4, outlier.shape = NA, position=position_dodge(width=0.8), size = 1) +
scale_fill_manual(values=c('#daa03d', '#755841')) +
scale_color_manual(values=c('#daa03d', '#755841')) +
facet_wrap(~Condition) +
ylim(0, 6.2) +
ylab('D-prime') +
xlab(' ') +
theme_training
p
ggsave("/home/finc/Dropbox/Projects/LearningBrain/Figures_NEW/Figure_NEW_dprime_ttests.pdf", plot = p, width = 12, height = 5, dpi = 300)
# -
| 01-behavioral_data_analysis/.ipynb_checkpoints/02-fmri_performance_analysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from iwatlas import sshdriver
from iwatlas import harmonics
from iwatlas import stratification as strat
from sfoda.utils.mynumpy import grad_z
import xarray as xr
import pandas as pd
import numpy as np
from datetime import datetime
from scipy.optimize import least_squares
from tqdm import tqdm
import matplotlib.pyplot as plt
# +
basedir = '/home/suntans/cloudstor/Data/IWAtlas'
sshfile = '{}/NWS_2km_GLORYS_hex_2013_2014_SSHBC_Harmonics.nc'.format(basedir)
ampfile = '{}/NWS_2km_GLORYS_hex_2013_2014_Amplitude_Atlas.nc'.format(basedir)
climfile = '{}/NWS_2km_GLORYS_hex_2013_2014_Climatology.nc'.format(basedir)
# Output filename
N2file = '{}/NWS_2km_GLORYS_hex_2013_2014_Stratification_Atlas_v2.1.nc'.format(basedir)
na = 3 # Number of Annual Harmonics
BASETIME = np.datetime64('1990-01-01 00:00:00')
clim = sshdriver.load_ssh_clim(climfile)
amp = sshdriver.load_ssh_clim(ampfile)
clim, amp
# -
# Vertical grid is not stored in the climatology file so we will calculate it here
dz = clim._ds['dz'].values
nz = dz.shape[0]
dzw = np.zeros((nz+1,))
dzw[1::] = dz
z_w = np.cumsum(dzw)
z_r = 0.5*(z_w[0:-1] + z_w[1::])
z_r
# Load the density data
Nk = clim._ds.Nk.values
rhoall = clim._ds['rho'].values[:]
dv = clim._ds['dv'].values[:]
def double_gaussian_N2_v2(z, beta):
w1 = beta[6]
w2 = 1-w1
return beta[0,...] + beta[1,...] * (w1*np.exp(- ((z+beta[2,...])/beta[3,...])**2 ) +\
w2*np.exp(-((z+beta[4,...])/beta[5,...])**2 ) )
# +
# Initialise the output variables
nt, nz, Nc = rhoall.shape
nparams = 7
N2_params = np.zeros((nt,nparams,Nc))
N2_err = np.zeros((nt, Nc))
RHO0 = 1024
GRAV = 9.81
cff1 = -GRAV/RHO0
for ii in tqdm(range(Nc)):
initguess = [1e-5, 1e-4, 4, 2, 6.5, 2,0.5]
# Skip shallow water
if dv[ii]<100:
continue
nk = Nk[ii]
z = z_r[0:nk]
zpr = -np.log(z)
for tt in range(nt):
bounds = [ (1e-6, 1e-5, 1, 0.1, 1.4, 0.1,0.05), (1e-1, 1e-1, 6, 6, 8, 6,0.95)]
rho = rhoall[tt,0:nk,ii]
N2 = cff1*grad_z(rho,-z)
N2fit, f0, err = strat.fit_rho_lsq(N2, zpr, double_gaussian_N2_v2, bounds, initguess)
# Use the last time-step as
#initguess = f0
N2_params[tt,:,ii] = f0
N2_err[tt,ii] = err
# +
# plt.figure()
# plt.plot(zpr, N2,'.')
# plt.plot(zpr, N2fit)
# N2_params[tt,:,ii]
# +
# Test a harmonic fit
tsec = (clim._ds.time.values - BASETIME).astype(float)*1e-9
na = 4
aa, Aa, Ba, frq_all = strat.seasonal_harmonic_fit(N2_params, tsec, na)
# -
texstr = r"N^2(z) = \beta_0 + \beta_1 \left( \beta_6* \exp \left[- \left( \frac{z+\beta_2}{\beta_3} \right)^2 \right] + (1-\beta_6)*\exp \left[- \left( \frac{z+\beta_4}{\beta_5} \right)^2 \right] \right)"
# +
# Create an output dataset like the input amplitude data set
new_ds = amp._ds.copy()
# Drop a few variables
new_ds = new_ds.drop(labels=['amp_b_re','amp_b_im','alpha_n','cn','omega','N2'])
new_ds
# +
# Update some attributes
new_ds.attrs['Created'] = str(datetime.now())
new_ds.attrs['Title'] = 'SUNTANS density stratification climatology'
new_ds.attrs['Author'] = '<NAME> (<EMAIL>)'
new_ds.attrs.update({'density_func':'double_gaussian_N2_v2'})
new_ds.attrs.update({'density_func_tex':texstr})
new_ds.attrs.update({'Number_Annual_Harmonics':na})#
new_ds.attrs.update({'ReferenceDate':BASETIME.astype(str)})
#ReferenceDate
new_ds.attrs
# +
# Convert the N2_params array to a DataArray
params = range(nparams)
omega = frq_all
dims = ('time','Nparams','Nc')
# coords = {'time':new_ds.time.values, 'Nparams':params, 'xv':range(Nc)}
ds_N2 = xr.DataArray(N2_params, dims=dims, attrs={'long_name':'N2 fit parameters'})
ds_omega = xr.DataArray(frq_all, dims=('Nannual',) )
ds_params = xr.DataArray(params, dims=('Nparams',) )
dims = ('time','Nc')
# coords = {'time':new_ds.time.values, 'Nparams':params, 'xv':range(Nc)}
ds_N2_err = xr.DataArray(N2_err, dims=dims, attrs={'long_name':'N2 fit error'})
dims = ('Nparams','Nc')
ds_N2_mu = xr.DataArray(aa, dims=dims, attrs={'long_name':'N2 fit mean parameters'})
dims = ('Nannual','Nparams','Nc')
ds_N2_re = xr.DataArray(Aa, dims=dims, attrs={'long_name':'N2 fit real harmonic parameters'})
ds_N2_im = xr.DataArray(Ba, dims=dims, attrs={'long_name':'N2 fit imaginary harmonic parameters'})
ds2 = xr.Dataset({'N2_t':ds_N2,'omegaA':ds_omega,'params':ds_params,
'N2_mu':ds_N2_mu,'N2_re':ds_N2_re,'N2_im':ds_N2_im,'N2_err':ds_N2_err}).set_coords(['omegaA','params'])
ds2
# -
compflags = {'zlib':True, 'complevel':5}
encoding = {'N2_t':compflags, 'N2_mu':compflags, 'N2_re':compflags,'N2_im':compflags,'N2_err':compflags,}
new_ds.merge(ds2).to_netcdf(N2file, encoding=encoding)
# +
# #ds2.to_netcdf?
# -
xr.open_dataset(N2file)
| sandpit/create_N2_clim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import os
os.chdir(r'/Users/ryanmccrickerd/desktop/rypy')
import numpy as np
import pandas as pd
import rypy as rp
from matplotlib import pyplot as plt
from scipy.stats import norm
from scipy.special import gamma
import mpl
% matplotlib inline
nx = np.newaxis
mpl.config(scale=1.5,print_keys=False)
# RHS = pd.read_csv('RoughHestonSurface.csv')
# RHS = pd.read_csv('rHeston (V0=0.02, nu=0.3,H=0.1,rho=-0.7).csv')
# RHS = pd.read_csv('rHeston (V0=0.02, nu=0.3,H=0.05,rho=-0.7).csv')
# RHS = pd.read_csv('rHeston V0=0.02nu=0.3H=0.01rho=-0.7.csv')
RHS = pd.read_csv('rHeston-V0=0.02-nu=0.3-H=0.05-rho=-0.7.csv')
RHS
K = np.array(RHS.iloc[:,0])
K
# I think vol-vol $v=1$ makes sense given EAJ has set $\lambda=\nu$.
# +
# ϵ,TS = 16/256,'16d'
# σ,ρ,v,κ = np.sqrt(0.02),-0.7,1.5,1/ϵ
# # ϵ,TS = 0,'0d'
# # σ,ρ,v,κ = np.sqrt(0.02),-0.7,1,np.inf
# θ = σ,ρ,v,κ
# +
v0 = 0.02
ν = 0.3
H = -0.0#0.05
ρ = -0.7
νH = ν*(0.5 - H)**(H - 0.5)
TS = '1d'
ϵ = 3/256
α = νH*ϵ**(H + 0.5)
β = 1/ϵ
γ = v0
θ = α,β,γ,v0,ρ
# -
νH
T = np.array([1/52,2/52,1/12,3/12,6/12,1])[:,nx]
M = ['1W','2W','1M','3M','6M','1Y']
# Δ = np.linspace(1,99,21)[nx,:]/100
# k = norm.ppf(Δ)*σ*np.sqrt(T)
k = np.log(np.arange(0.30,1.24,0.01))*np.ones_like(T)
pd.DataFrame(k,index=M,columns=k[0,:])
C = rp.pricer(T,k,θ)
BSV = rp.surface(T,k,C)
pd.DataFrame(BSV,index=M,columns=k[0,:])
plt.rcParams['figure.figsize'] = [2*1.618*2,2*3]
plt.rcParams['legend.loc'] = 'upper right'
plot,axes = plt.subplots(3,2)
n = -1
for i in range(3):
for j in range(2):
n += 1
axes[i,j].plot(np.log(K),100*np.array(RHS.iloc[:,n+1]),'bo')
axes[i,j].plot(k[n,:],100*BSV[n,:])
axes[i,j].set_ylabel(r'$\bar{\sigma}(k,\tau=%.3f)$'%T[n,0])
axes[i,j].set_ylim([0,50])
axes[0,1].set_title(r'$\varepsilon=\mathrm{%s}.$'%TS)
axes[0,1].set_title(r'$H = 0.05,\ \ \varepsilon=\mathrm{%s}.$'%TS)
axes[1,1].legend([r'$\mathrm{rHeston}$',r'$\mathrm{Mechkov}$'])
axes[2,1].set_xlabel(r'$k$')
plt.tight_layout()
fname = r'rHeston-v-Mechkov-%s'%TS
plt.savefig(fname)
| notebooks/20190502-rHeston.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 11
# batch size 256 lr 1e-3, normed weighted, non-rotated, cartesian, SWA test
# ### Import modules
# %matplotlib inline
from __future__ import division
import sys
import os
os.environ['MKL_THREADING_LAYER']='GNU'
sys.path.append('../')
from Modules.Basics import *
from Modules.Class_Basics import *
# ## Options
# +
classTrainFeatures = ['DER_mass_MMC', 'DER_mass_transverse_met_lep', 'DER_mass_vis', 'DER_pt_h', 'DER_deltaeta_jet_jet', 'DER_mass_jet_jet', 'DER_prodeta_jet_jet', 'DER_deltar_tau_lep', 'DER_pt_tot', 'DER_sum_pt', 'DER_pt_ratio_lep_tau', 'DER_met_phi_centrality', 'DER_lep_eta_centrality', 'PRI_met_pt', 'PRI_met_sumet', 'PRI_jet_num', 'PRI_jet_all_pt', 'PRI_tau_px', 'PRI_tau_py', 'PRI_tau_pz', 'PRI_lep_px', 'PRI_lep_py', 'PRI_lep_pz', 'PRI_jet_leading_px', 'PRI_jet_leading_py', 'PRI_jet_leading_pz', 'PRI_jet_subleading_px', 'PRI_jet_subleading_py', 'PRI_jet_subleading_pz', 'PRI_met_px', 'PRI_met_py']
classModel = 'modelSwish'
varSet = "basic_features"
nSplits = 10
ensembleSize = 10
ensembleMode = 'loss'
maxEpochs = 200
compileArgs = {'loss':'binary_crossentropy', 'optimizer':'adam'}
trainParams = {'epochs' : 1, 'batch_size' : 256, 'verbose' : 0}
modelParams = {'version':classModel, 'nIn':len(classTrainFeatures), 'compileArgs':compileArgs, 'mode':'classifier'}
print "\nTraining on", len(classTrainFeatures), "features:", [var for var in classTrainFeatures]
# -
# ## Import data
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
trainData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'train.hdf5', "r+"),
inputPipe=inputPipe, augRotMult=16)
# ## Determine LR
lrFinder = batchLRFind(trainData, getModel, modelParams, trainParams,
lrBounds=[1e-5,1e-1], trainOnWeights=True, verbose=0)
compileArgs['lr'] = 1e-3
# ## Train classifier
class SWA(Callback):
'''Based on fastai version'''
def __init__(self, swa_start):
super(SWA, self).__init__()
self.swa_model = None
self.swa_start = swa_start
self.epoch = -1
self.swa_n = -1
self.active = False
def on_train_begin(self, logs={}):
if isinstance(self.swa_model, types.NoneType):
self.swa_model = self.model.get_weights()
self.epoch = 0
self.swa_n = 0
def on_epoch_end(self, metrics, logs={}):
if (self.epoch + 1) >= self.swa_start:
if self.swa_n == 0:
print "SWA beginning"
self.active = True
self.update_average_model()
self.swa_n += 1
self.epoch += 1
def update_average_model(self):
# update running average of parameters
model_params = self.model.get_weights()
swa_params = self.swa_model
for model_param, swa_param in zip(model_params, swa_params):
swa_param *= self.swa_n
swa_param += model_param
swa_param /= (self.swa_n + 1)
def batchTrainClassifier(batchYielder, nSplits, modelGen, modelGenParams, trainParams,
cosAnnealMult=0, reverseAnneal=False, plotLR=False, reduxDecay=False,
annealMomentum=False, reverseAnnealMomentum=False, plotMomentum=False,
oneCycle=False, ratio=0.25, reverse=False, lrScale=10, momScale=10, plotOneCycle=False, scale=30, mode='sgd',
swaStart=-1,
trainOnWeights=True,
saveLoc='train_weights/', patience=10, maxEpochs=10000,
verbose=False, logoutput=False):
os.system("mkdir " + saveLoc)
os.system("rm " + saveLoc + "*.h5")
os.system("rm " + saveLoc + "*.json")
os.system("rm " + saveLoc + "*.pkl")
os.system("rm " + saveLoc + "*.png")
os.system("rm " + saveLoc + "*.log")
if logoutput:
old_stdout = sys.stdout
log_file = open(saveLoc + 'training_log.log', 'w')
sys.stdout = log_file
start = timeit.default_timer()
results = []
histories = []
binary = None
if not isinstance(batchYielder, BatchYielder):
print "HDF5 as input is depreciated, converting to BatchYielder"
batchYielder = BatchYielder(batchYielder)
if cosAnnealMult: print "Using cosine annealing"
if trainOnWeights: print "Training using weights"
for fold in xrange(nSplits):
foldStart = timeit.default_timer()
print "Running fold", fold+1, "/", nSplits
os.system("rm " + saveLoc + "best.h5")
best = -1
bestLR = -1
reduxDecayActive = False
tmpPatience = patience
epochCounter = 0
subEpoch = 0
stop = False
lossHistory = []
trainID, testID = getFolds(fold, nSplits) #Get fold indeces for training and testing for current fold
model = None
model = modelGen(**modelGenParams)
model.reset_states #Just checking
callbacks = []
if cosAnnealMult:
cosAnneal = CosAnneal(math.ceil(len(batchYielder.source['fold_0/targets'])/trainParams['batch_size']), cosAnnealMult, reverseAnneal)
callbacks.append(cosAnneal)
if annealMomentum:
cosAnnealMomentum = CosAnnealMomentum(math.ceil(len(batchYielder.source['fold_0/targets'])/trainParams['batch_size']), cosAnnealMult, reverseAnnealMomentum)
callbacks.append(cosAnnealMomentum)
if oneCycle:
oneCycle = OneCycle(math.ceil(len(batchYielder.source['fold_0/targets'])/trainParams['batch_size']), ratio=ratio, reverse=reverse, lrScale=lrScale, momScale=momScale, scale=scale, mode=mode)
callbacks.append(oneCycle)
if swaStart >= 0:
swa = SWA(swaStart)
swaModel = modelGen(**modelGenParams)
callbacks.append(swa)
for epoch in xrange(maxEpochs):
for n in trainID: #Loop through training folds
trainbatch = batchYielder.getBatch(n) #Load fold data
subEpoch += 1
if binary == None: #First run, check classification mode
binary = True
nClasses = len(np.unique(trainbatch['targets']))
if nClasses > 2:
print nClasses, "classes found, running in multiclass mode\n"
trainbatch['targets'] = utils.to_categorical(trainbatch['targets'], num_classes=nClasses)
binary = False
else:
print nClasses, "classes found, running in binary mode\n"
if trainOnWeights:
model.fit(trainbatch['inputs'], trainbatch['targets'],
class_weight = 'auto', sample_weight=trainbatch['weights'],
callbacks = callbacks, **trainParams) #Train for one epoch
testbatch = batchYielder.getBatch(testID) #Load testing fold
if swaStart >= 0 and swa.active:
swaModel.set_weights(swa.swa_model)
loss = swaModel.evaluate(testbatch['inputs'], testbatch['targets'], sample_weight=testbatch['weights'], verbose=0)
else:
loss = model.evaluate(testbatch['inputs'], testbatch['targets'], sample_weight=testbatch['weights'], verbose=0)
else:
model.fit(trainbatch['inputs'], trainbatch['targets'],
class_weight = 'auto',
callbacks = callbacks, **trainParams) #Train for one epoch
testbatch = batchYielder.getBatch(testID) #Load testing fold
if swaStart >= 0 and swa.active:
swaModel.set_weights(swa.swa_model)
loss = swaModel.evaluate(testbatch['inputs'], testbatch['targets'], verbose=0)
else:
loss = model.evaluate(testbatch['inputs'], testbatch['targets'], verbose=0)
lossHistory.append(loss)
if loss <= best or best < 0: #Save best
best = loss
if cosAnnealMult:
if cosAnneal.lrs[-1] > 0:
bestLR = cosAnneal.lrs[-1]
else:
bestLR = cosAnneal.lrs[-2]
epochCounter = 0
if swaStart >= 0 and swa.active:
swaModel.save_weights(saveLoc + "best.h5")
else:
model.save_weights(saveLoc + "best.h5")
if reduxDecayActive:
cosAnneal.lrs.append(float(K.get_value(model.optimizer.lr)))
if verbose:
print '{} New best found: {}'.format(subEpoch, best)
elif cosAnnealMult and not reduxDecayActive:
if cosAnneal.cycle_end:
epochCounter += 1
else:
epochCounter += 1
if reduxDecayActive:
lr = 0.8*float(K.get_value(model.optimizer.lr))
cosAnneal.lrs.append(lr)
K.set_value(model.optimizer.lr, lr)
if epochCounter >= tmpPatience: #Early stopping
if cosAnnealMult and reduxDecay and not reduxDecayActive:
print 'CosineAnneal stalling after {} epochs, entering redux decay at LR={}'.format(subEpoch, bestLR)
model.load_weights(saveLoc + "best.h5")
cosAnneal.lrs.append(bestLR)
K.set_value(model.optimizer.lr, bestLR)
tmpPatience = 10
epochCounter = 0
callbacks = []
reduxDecayActive = True
else:
if verbose:
print 'Early stopping after {} epochs'.format(subEpoch)
stop = True
break
if stop:
break
model.load_weights(saveLoc + "best.h5")
histories.append({})
histories[-1]['val_loss'] = lossHistory
results.append({})
results[-1]['loss'] = best
if binary:
testbatch = batchYielder.getBatch(testID) #Load testing fold
if not isinstance(testbatch['weights'], types.NoneType):
results[-1]['wAUC'] = 1-roc_auc_score(testbatch['targets'],
model.predict(testbatch['inputs'], verbose=0),
sample_weight=testbatch['weights'])
results[-1]['AUC'] = 1-roc_auc_score(testbatch['targets'],
model.predict(testbatch['inputs'], verbose=0))
print "Score is:", results[-1]
if plotLR: cosAnneal.plot_lr()
if plotMomentum: cosAnnealMomentum.plot_momentum()
if plotOneCycle: oneCycle.plot()
print("Fold took {:.3f}s\n".format(timeit.default_timer() - foldStart))
model.save(saveLoc + 'train_' + str(fold) + '.h5')
with open(saveLoc + 'resultsFile.pkl', 'wb') as fout: #Save results
pickle.dump(results, fout)
print("\n______________________________________")
print("Training finished")
print("Cross-validation took {:.3f}s ".format(timeit.default_timer() - start))
plotTrainingHistory(histories, save=saveLoc + 'loss_history.png')
for score in results[0]:
mean = uncertRound(np.mean([x[score] for x in results]), np.std([x[score] for x in results])/np.sqrt(len(results)))
print "Mean", score, "= {} +- {}".format(mean[0], mean[1])
print("______________________________________\n")
if logoutput:
sys.stdout = old_stdout
log_file.close()
return results, histories
results, histories = batchTrainClassifier(trainData, nSplits, getModel, modelParams, trainParams, swaStart=150,
patience=50, cosAnnealMult=0, reduxDecay=True, trainOnWeights=True,
maxEpochs=maxEpochs, verbose=1)
# ## Construct ensemble
with open('train_weights/resultsFile.pkl', 'r') as fin:
results = pickle.load(fin)
ensemble, weights = assembleEnsemble(results, ensembleSize, ensembleMode, compileArgs)
# ## Response on val data
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = False, reflect = True, augRotMult=16)
pred = []
for i in xrange(10):
tmpPred = []
for aug in range(valData.augMult):
batch = valData.getTestBatch(i, aug)['inputs']
tmpPred.append(ensemblePredict(batch, ensemble, weights, n=1))
pred.append(np.array(tmpPred))
tPred = np.concatenate(pred, axis=1)
df = pandas.DataFrame(getFeature('targets', valData.source), columns=['target'])
for p in xrange(tPred.shape[0]):
df['pred_'+ str(p)] = tPred[p,:]
df.loc[:,'pred_mean'] = np.mean(df[[x for x in df.columns if 'pred' in x]], axis=1)
# ### Rot 8
len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)])/len(df[(df.target == 1)])
len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])/len(df[(df.target == 0)])
(len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])+len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)]))/len(df)
# ### Ref
len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)])/len(df[(df.target == 1)])
len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])/len(df[(df.target == 0)])
(len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])+len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)]))/len(df)
# ### RotRef 4
len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)])/len(df[(df.target == 1)])
len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])/len(df[(df.target == 0)])
(len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])+len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)]))/len(df)
# ### RotRef 8
len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)])/len(df[(df.target == 1)])
len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])/len(df[(df.target == 0)])
(len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])+len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)]))/len(df)
# ### RotRef 10
len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)])/len(df[(df.target == 1)])
len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])/len(df[(df.target == 0)])
(len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])+len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)]))/len(df)
# ### RotRef 16
len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)])/len(df[(df.target == 1)])
len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])/len(df[(df.target == 0)])
(len(df[(df.target == 0) & (df.pred_0 < df.pred_mean)])+len(df[(df.target == 1) & (df.pred_0 > df.pred_mean)]))/len(df)
def amsScanQuick(inData, wFactor=250000./50000.):
s = np.sum(inData.loc[inData['gen_target'] == 1, 'gen_weight'])
b = np.sum(inData.loc[inData['gen_target'] == 0, 'gen_weight'])
tIIs = inData['pred_class'].argsort()
amss = np.empty([len(tIIs)])
amsMax = 0
threshold = 0.0
for tI in range(len(tIIs)):
# don't forget to renormalize the weights to the same sum
# as in the complete training set
amss[tI] = AMS(max(0,s * wFactor),max(0,b * wFactor))
if amss[tI] > amsMax:
amsMax = amss[tI]
threshold = inData['pred_class'].values[tIIs[tI]]
#print tI,threshold
if inData.loc[:, 'gen_target'].values[tIIs[tI]]:
s -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
else:
b -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
print amsMax, threshold
return amsMax, threshold
# ## Response on validation data base
valData = BatchYielder(h5py.File(dirLoc + 'val.hdf5', "r+"))
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot-Ref 4
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=4)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot-Ref 8
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=8)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Ref
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = False, reflect = True, augRotMult=8)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot 4
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = False, augRotMult=4)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot 5
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = False, augRotMult=5)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot 6
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = False, augRotMult=6)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot 8
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = False, augRotMult=8)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot 10
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = False, augRotMult=10)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot 16
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = False, augRotMult=16)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot-Ref 16
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=16)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot-Ref 3
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=3)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# ## Response on validation data Rot-Ref 5
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=5)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=5, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source))
# # Final run
batchEnsemblePredict(ensemble, weights,
RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'testing.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=8),
ensembleSize=5, verbose=1)
saveTest(0.9632657, 'Day_10_basic_rotref8_5')
# !kaggle competitions submit -c higgs-boson -f ../Data/Day_10_basic_rotref8_5_test.csv -m"Day_10 basic rotref8 5"
# ## Response on validation data Rot-Ref 8 10
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=8)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=10, verbose=0)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
cut = amsScanQuick(convertToDF(valData.source))[1]
trainFrame = pandas.DataFrame()
trainFrame['gen_target'] = getFeature('targets', trainData.source)
trainFrame['gen_weight'] = getFeature('weights', trainData.source)
valFrame = convertToDF(valData.source)
sigFactor = (len(trainFrame.loc[trainFrame.gen_target == 1, 'gen_weight'])+len(valFrame.loc[valFrame.gen_target == 1, 'gen_weight']))/len(valFrame.loc[valFrame.gen_target == 1, 'gen_weight'])
bkgFactor = (len(trainFrame.loc[trainFrame.gen_target == 0, 'gen_weight'])+len(valFrame.loc[valFrame.gen_target == 0, 'gen_weight']))/len(valFrame.loc[valFrame.gen_target == 0, 'gen_weight'])
print sigFactor, bkgFactor
def amsScanQuick(inData, sigFactor=250000./50000., bkgFactor=250000./50000.):
s = np.sum(inData.loc[inData['gen_target'] == 1, 'gen_weight'])
b = np.sum(inData.loc[inData['gen_target'] == 0, 'gen_weight'])
tIIs = inData['pred_class'].argsort()
amss = np.empty([len(tIIs)])
amsMax = 0
threshold = 0.0
for tI in range(len(tIIs)):
# don't forget to renormalize the weights to the same sum
# as in the complete training set
amss[tI] = AMS(max(0,s * sigFactor),max(0,b * bkgFactor))
if amss[tI] > amsMax:
amsMax = amss[tI]
threshold = inData['pred_class'].values[tIIs[tI]]
#print tI,threshold
if inData.loc[:, 'gen_target'].values[tIIs[tI]]:
s -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
else:
b -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
print amsMax, threshold
return amsMax, threshold
amsScanQuick(valFrame, sigFactor, bkgFactor)
# +
def amsScan(inData, scale=False):
best = [0,-1]
ams = []
for index, row in inData.iterrows():
s = wFactor*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 1), 'gen_weight'])
b = wFactor*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 0), 'gen_weight'])
ams.append(AMS(s, b))
if ams[-1] > best[1]:
best = [row['pred_class'], ams[-1]]
print best
return ams, best
def foldAMSScan(inData, N=10):
kf = StratifiedKFold(n_splits=N, shuffle=True)
folds = kf.split(inData, inData['gen_target'])
bests = []
for i, (train, test) in enumerate(folds):
bests.append(amsScan(inData.iloc[test], (np.sum(inData[(inData['gen_target'] == 1)]['gen_weight']), np.sum(inData[(inData['gen_target'] == 0)]['gen_weight'])))[1])
print "Fold {}, best AMS {} at cut of {}. Total weights Signal:Bkg. {}:{}".format(i, bests[-1][1], bests[-1][0],
np.sum(inData.iloc[test][inData.gen_target == 1]['gen_weight']),
np.sum(inData.iloc[test][inData.gen_target == 0]['gen_weight']))
print "Mean cut", np.average([x[0] for x in bests], weights=[1/x[1] for x in bests]), "mean AMS", np.average([x[1] for x in bests], weights=[1/x[1] for x in bests])
return bests
# -
amsScanQuick(convertToDF(valData.source), 1, 1)
bests = foldAMSScan(convertToDF(valData.source))
print np.average([x[0] for x in bests])
print np.average([x[0] for x in bests], weights=[1/x[1] for x in bests])
print np.average([x[0] for x in bests], weights=[x[1] for x in bests])
print np.average([x[0] for x in bests], weights=[1.7448610687361392-x[1] for x in bests])
print np.average([x[0] for x in bests], weights=[1/np.abs(1.7448610687361392-x[1]) for x in bests])
batchEnsemblePredict(ensemble, weights,
RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'testing.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=8),
ensembleSize=10, verbose=1)
saveTest(0.9557010754710975, 'Day_10_M_rotref8_10')
# !kaggle competitions submit -c higgs-boson -f ../Data/Day_10_M_rotref8_10_test.csv -m"Day_10 minus rotref8 10"
# ## Response on validation data Rot-Ref 16 10
with open(dirLoc + 'inputPipe.pkl', 'r') as fin:
inputPipe = pickle.load(fin)
valData = RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'val.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=16)
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=10, verbose=1)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source)),
roc_auc_score(getFeature('targets', valData.source), getFeature('pred', valData.source), sample_weight=getFeature('weights', valData.source)))
amsScanQuick(convertToDF(valData.source), 1, 1)
bests = foldAMSScan(convertToDF(valData.source))
print 'Mean', np.average([x[0] for x in bests])
print 'Inverse-AMS-weighted mean', np.average([x[0] for x in bests], weights=[1/x[1] for x in bests])
print 'AMS-weighted mean', np.average([x[0] for x in bests], weights=[x[1] for x in bests])
print 'AMS-Difference-weighted mean', np.average([x[0] for x in bests], weights=[1.7472080023156094-x[1] for x in bests])
print 'Inverse AMS-Difference-weighted mean', np.average([x[0] for x in bests], weights=[1/np.abs(1.7472080023156094-x[1]) for x in bests])
batchEnsemblePredict(ensemble, weights,
RotationReflectionBatch(classTrainFeatures, h5py.File(dirLoc + 'testing.hdf5', "r+"), inputPipe=inputPipe,
rotate = True, reflect = True, augRotMult=16),
ensembleSize=10, verbose=1)
saveTest(0.95, 'Day_10_95_rotref16_10')
# !kaggle competitions submit -c higgs-boson -f ../Data/Day_10_95_rotref16_10_test.csv -m"Day_10 95 rotref16 10"
| Classifiers/Day11/Day_11_SWA-150.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import os
import sys
import skimage.draw
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# -
dataset = "/allen/programs/celltypes/workgroups/em-connectomics/gayathrim/nuclei_segmentation/data/maskrcnn"
weights = "/allen/programs/celltypes/workgroups/em-connectomics/gayathrim/nuclei_segmentation/logs/nuclei20190811T1750/mask_rcnn_nuclei_0160.h5"
logs = "/allen/programs/celltypes/workgroups/em-connectomics/gayathrim/nuclei_segmentation/logs"
command = "test"
image = "/allen/programs/celltypes/workgroups/em-connectomics/gayathrim/nuclei_segmentation/data/maskrcnn/val/images/data_19_2.png"
# +
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR)
from mrcnn import model as modellib, utils, visualize
from nuclei import NucleiDataset, NucleiConfig, NucleiInferenceConfig
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# +
def test(model, image_path):
print("Running on {}".format(image_path))
# Read image
image = skimage.io.imread(image_path)
image = skimage.color.gray2rgb(image)
print(image.shape)
# Remove alpha channel, if it has one
if image.shape[-1] == 4:
image = image[..., :3]
# detect objects
r = model.detect([image], verbose=1)[0]
# Display results
ax = get_ax(1)
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
["Background", "nuclei"], r['scores'], ax=ax,
title="Predictions")
# +
# Configurations
print(command)
if command == "train":
config = NucleiConfig()
else:
class InferenceConfig(NucleiInferenceConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
IMAGE_MIN_DIM = 256
IMAGE_RESIZE_MODE = "none"
DETECTION_MIN_CONFIDENCE = 0.7
config = InferenceConfig()
config.display()
# Create model
if command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=logs)
weights_path = weights
# Load weights
print("Loading weights ", weights_path)
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True)
# +
# Train or evaluate
if command == "train":
train(model, dataset)
elif command == "test":
test(model, image_path=image)
else:
print("'{}' is not recognized. "
"Use 'train' or 'test'".format(command))
# -
| samples/nuclei/test_nuclei.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pydev
# language: python
# name: pydev
# ---
# ### Load Amazon Data into Spark DataFrame
# %pyspark
from pyspark import SparkFiles
url = "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Video_Games_v1_00.tsv.gz"
spark.sparkContext.addFile(url)
video_games_df = spark.read.csv(SparkFiles.get("amazon_reviews_us_Video_Games_v1_00.tsv.gz"), sep="\t", header=True)
video_games_df.show()
# ### Size of data
# %pyspark
# Row Count
video_games_df.count()
# ### Cleaned up DataFrames to match tables
# %pyspark
from pyspark.sql.functions import to_date
# Review DataFrame
review_id_df = video_games_df.select(["review_id", "customer_id", "product_id", "product_parent", to_date("review_date", 'yyyy-MM-dd').alias("review_date")])
review_id_df.show()
# %pyspark
products_df = video_games_df.select(["product_id", "product_title"]).drop_duplicates()
# %pyspark
reviews_df = video_games_df.select(["review_id", "review_headline", "review_body"])
reviews_df.show(10)
# %pyspark
customers_df = video_games_df.groupby("customer_id").agg({"customer_id": "count"}).withColumnRenamed("count(customer_id)", "customer_count")
customers_df.show()
# %pyspark
vine_df = video_games_df.select(["review_id", "star_rating", "helpful_votes", "total_votes", "vine"])
vine_df.show(10)
# ### Push to AWS RDS instance
# %pyspark
mode = "append"
jdbc_url="jdbc:postgresql://<endpoint>:5432/my_data_class_db"
config = {"user":"root", "password": "<password>", "driver":"org.postgresql.Driver"}
# %pyspark
# Write review_id_df to table in RDS
review_id_df.write.jdbc(url=jdbc_url, table='review_id_table', mode=mode, properties=config)
# %pyspark
# Write products_df to table in RDS
products_df.write.jdbc(url=jdbc_url, table='products', mode=mode, properties=config)
# %pyspark
# Write customers_df to table in RDS
customers_df.write.jdbc(url=jdbc_url, table='customers', mode=mode, properties=config)
# %pyspark
# Write vine_df to table in RDS
vine_df.write.jdbc(url=jdbc_url, table='vines', mode=mode, properties=config)
| level_one_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tokenization
#
# So, since the UDF approach wasn't fruitful, we will use Stanford's PTBTokenizer.
#
# Downloading either CoreNLP or the stanford-parser is required https://stanfordnlp.github.io/CoreNLP/
#
# PySpark creates multiple .txt files when writing the examples. The PTBTokenizer is able to tokenize multiple files by reading a master file, so let's create them.
# !ls reviews.txt/*.txt > reviews.txt/files.txt
# !cat reviews.txt/files.txt
# !ls context.txt/*.txt > context.txt/files.txt
# !cat context.txt/files.txt
# !CLASSPATH=~/dev/stanford-parser-full-2018-10-17/stanford-parser.jar java edu.stanford.nlp.process.PTBTokenizer -lowerCase -preserveLines -fileList reviews.txt/files.txt > tokenized_reviews.txt
# !CLASSPATH=~/dev/stanford-parser-full-2018-10-17/stanford-parser.jar java edu.stanford.nlp.process.PTBTokenizer -lowerCase -preserveLines -fileList context.txt/files.txt > tokenized_context.txt
# Check the line counts
# !cat tokenized_reviews.txt | wc -l
# !cat tokenized_context.txt | wc -l
# We now need to split these files into a train, validation and test set
# !./split.sh
# We now have a training data set with 6 million examples, on top of that we have a validation data set with 15k examples. We can use the remaining data for testing.
| 02_fake_news_tokenization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="3789d571-2f34-4e7c-b84f-2983ebece1df" _uuid="e9b749390e101cd435028b3049f126fea962d7b7"
# Data fields
# Each row of the training data contains a click record, with the following features.
#
# ip: ip address of click.
#
# app: app id for marketing.
#
# device: device type id of user mobile phone (e.g., iphone 6 plus, iphone 7, huawei mate 7, etc.)
#
# os: os version id of user mobile phone
#
# channel: channel id of mobile ad publisher
#
# click_time: timestamp of click (UTC)
#
# attributed_time: if user download the app for after clicking an ad, this is the time of the app download
#
# is_attributed: the target that is to be predicted, indicating the app was downloaded
#
# Note that ip, app, device, os, and channel are encoded.
#
# The test data is similar, with the following differences:
#
# click_id: reference for making predictions
#
# is_attributed: not included
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import gc
import datetime
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# + _cell_guid="81ffb929-3a3c-49ee-8b6e-352aa1d8ea0d" _uuid="a51624aa399b0c83140b7d7b3241817624d0a253"
data_types = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'bool'
}
# + _cell_guid="a17dcc35-988c-478c-9df3-7fc55f9cf1dd" _uuid="0ea1c008bbe65b4009ef4237ea2fc7de092df53c"
sample_train = pd.read_csv('../input/train_sample.csv', dtype=data_types)
# + _cell_guid="b48b5ce5-1a1c-400e-ae31-45681fadb97c" _uuid="6c7d2dcf748b4b31997430b93c3950afdd762d00"
sample_train.info()
# + _cell_guid="4b318e08-e5d0-4a94-b9c2-3c6b28225672" _uuid="27b3d034c1c2618ebb763f2bd2d8b7b3f08fce97"
(sample_train.groupby(['ip']).ip.count() == 1).values
# + _cell_guid="7ddc1ad0-86fc-4f28-a081-72cbd565d57f" _uuid="c3fb5746480f8ac5fd8dedd3a340c78ce5d20fc4"
(sample_train.groupby('ip').is_attributed.sum()/sample_train.groupby('ip').is_attributed.count()).value_counts()
# + _cell_guid="f6b9e5a1-f7c7-4edd-a954-9a65f69b70de" _uuid="6ebe135adbb1ad7fc8cbb2cbd1b5210ea3f686f6"
sample_train['click_time'] = pd.to_datetime(sample_train['click_time'])
sample_train['attributed_time'] = pd.to_datetime(sample_train['attributed_time'])
# + _cell_guid="91f0718d-586b-49f9-903b-97d556e4b33e" _uuid="696f2b26ed87d5555afb9ebbc882f38fecf04036"
sample_train.info()
# + _cell_guid="00f4251c-1b58-48e5-ac49-1d5b0db71641" _uuid="88483d619133325071bc9333e65f7201fa7ed7b8"
sample_click_list = sample_train[~sample_train['attributed_time'].isnull()].sort_values(['ip', 'click_time']).reset_index().drop(['index', 'is_attributed'], axis =1)
sample_click_list
# + _cell_guid="c228778a-7888-4742-9423-737c00af6120" _uuid="117ca63cb7ec1632359b74ce4e0f2885124368f1"
sample_click_list.ip.unique().size
# + _cell_guid="a1fc4735-8373-4229-9e3b-f44e68aab72e" _uuid="190265d7d730c0945b1e64ae87932753fe0a0a75"
for col in sample_train:
print (col,'\t',sample_train[col].unique().size)
# + _cell_guid="e0068105-5e9d-474f-880e-5be423e03945" _uuid="9c86d74ee86dd7affff3994ccc07298722705d89"
sample_click_list.os.sort_values().unique()
# + [markdown] _cell_guid="b980ef5f-5a59-463b-8a05-cc3d14854a21" _uuid="6a38cd9b35b4e69d73cf7b2606f62d586da10d8a"
# **Visualization**
# + _cell_guid="20b6cb3a-b74d-45f3-ba72-fec1ebec2961" _uuid="92c1f557149cb420042f614fcd0831cbeb9bfdf3"
fig, axarr = plt.subplots(2,2, figsize=(24,8))
(sample_train.os.value_counts()[:8]/sample_train.os.count()).plot.bar(color='mediumvioletred', ax =axarr[0][0], fontsize = 16)
axarr[0][0].set_title('Most common OS')
(sample_click_list.os.value_counts()[:8]/sample_click_list.os.count()).plot.bar(color='gold', ax =axarr[0][1], fontsize = 16)
axarr[0][1].set_title('Most common OS of clicks')
(sample_train.device.value_counts()[:8]/sample_train.device.count()).plot.bar(color='mediumvioletred', ax =axarr[1][0], fontsize = 16)
axarr[1][0].set_title('Most common Devices')
(sample_click_list.device.value_counts()[:8]/sample_click_list.device.count()).plot.bar(color='gold', ax =axarr[1][1], fontsize = 16)
axarr[1][1].set_title('Most common Devices of clicks')
plt.subplots_adjust(hspace=.3)
sns.despine(bottom=True, left=True)
fig_2, axarr_2 = plt.subplots(2,2, figsize=(24,8))
(sample_train.app.value_counts()[:8]/sample_train.app.count()).plot.bar(color='mediumvioletred', ax =axarr_2[0][0], fontsize = 16)
axarr_2[0][0].set_title('Most common app')
(sample_click_list.app.value_counts()[:8]/sample_click_list.app.count()).plot.bar(color='gold', ax =axarr_2[0][1], fontsize = 16)
axarr_2[0][1].set_title('Most common app of clicks')
(sample_train.channel.value_counts()[:8]/sample_train.channel.count()).plot.bar(color='mediumvioletred', ax =axarr_2[1][0], fontsize = 16)
axarr_2[1][0].set_title('Most common Channels')
(sample_click_list.channel.value_counts()[:8]/sample_click_list.channel.count()).plot.bar(color='gold', ax =axarr_2[1][1], fontsize = 16)
axarr_2[1][1].set_title('Most common Channels of clicks')
plt.subplots_adjust(hspace=.3)
sns.despine(bottom=True, left=True)
# + _cell_guid="74146526-307c-4206-b24f-15f4668b5929" _uuid="72de60ab89ecaac5bb4e0f9145e63d45802afb3a"
corr_whole = sample_train.corr()
axis_whole = plt.axes()
sns.heatmap(corr_whole, annot=True, ax=axis_whole)
axis_whole.set_title('Entire Sample Dataset Features')
# + _uuid="03deacccc2cc5be4fee17aef8be14ee5cef876cc"
temp_df = sample_train.copy()
# + _uuid="fceb09bf6dc2ccc00a0692dbe35a07726c1c0251"
#temp_df.click_time.map(lambda x : print (x, '\t\t', x.round('0.25H')))
temp_df['click_time_round_hour'] = temp_df['click_time'].dt.round('H')
temp_df['click_time_round_half'] = temp_df['click_time'].dt.round('0.5H')
temp_df['click_time_round_qtr'] = temp_df['click_time'].dt.round('15min')
temp_df[['click_time', 'click_time_round_hour', 'click_time_round_half', 'click_time_round_qtr']]
# + _uuid="75f86464c8263a0c15d7811b360570033bff6308"
fig, axx = plt.subplots(3,2, figsize = (30, 20))
temp_df.groupby('click_time_round_hour').click_time.count().plot(ax = axx[0][0], color = 'mediumvioletred', fontsize = 12)
axx[0][0].set_title('Clicks by Hour')
axx[0][0].set_xlabel('Time', fontsize = 16)
axx[0][0].set_ylabel('Clicks', fontsize = 16)
temp_df.groupby('click_time_round_hour').attributed_time.count().plot(ax = axx[0][1], color = 'darkgreen', fontsize = 12)
axx[0][1].set_title('Downloads by Hour')
axx[0][1].set_xlabel('Time', fontsize = 16)
axx[0][1].set_ylabel('Downloads', fontsize = 16)
temp_df.groupby('click_time_round_half').click_time.count().plot(ax = axx[1][0], color = 'mediumvioletred', fontsize = 12)
axx[1][0].set_title('Clicks by Half Hour')
axx[1][0].set_xlabel('Time', fontsize = 16)
axx[1][0].set_ylabel('Clicks', fontsize = 16)
temp_df.groupby('click_time_round_half').attributed_time.count().plot(ax = axx[1][1], color = 'darkgreen', fontsize = 12)
axx[1][1].set_title('Downloads by Half Hour')
axx[1][1].set_xlabel('Time', fontsize = 16)
axx[1][1].set_ylabel('Downloads', fontsize = 16)
temp_df.groupby('click_time_round_qtr').click_time.count().plot(ax = axx[2][0], color = 'mediumvioletred', fontsize = 12)
axx[2][0].set_title('Clicks by Quarter Hour')
axx[2][0].set_xlabel('Time', fontsize = 16)
axx[2][0].set_ylabel('Clicks', fontsize = 16)
temp_df.groupby('click_time_round_qtr').attributed_time.count().plot(ax = axx[2][1], color = 'darkgreen', fontsize = 12)
axx[2][1].set_title('Downloads by Quarter Hour')
axx[2][1].set_xlabel('Time', fontsize = 16)
axx[2][1].set_ylabel('Downloads', fontsize = 16)
plt.subplots_adjust(hspace=.5)
sns.despine(bottom=True, left=True)
# + _uuid="93e5e38feea55d2d41b16c9b390d2fe71e3a14f1"
gc.collect()
# + _uuid="20127489c9248577cec25fd5fbd1b943eee76441"
temp_df['temp_time'] = pd.to_datetime(temp_df.click_time.map(lambda x : str(x).split(' ')[1]))
# + _uuid="1b5a620ce88a96a267a3fddeedec4ab48ba4f5c5"
temp_df['temp_time_hour'] = temp_df['temp_time'].dt.round('H')
temp_df['temp_time_half'] = temp_df['temp_time'].dt.round('0.5H')
temp_df['temp_time_qtr'] = temp_df['temp_time'].dt.round('15min')
# + _uuid="80b3c226441913d28ca7d1ff8d8fd82242f022c0"
temp_df['temp_time_hour'] = temp_df['temp_time_hour'].map(lambda x: x.time())
temp_df['temp_time_half'] = temp_df['temp_time_half'].map(lambda x: x.time())
temp_df['temp_time_qtr'] = temp_df['temp_time_qtr'].map(lambda x: x.time())
# + _uuid="c88cdfc585ee634b7dce2e94baa2b88d127eea3b"
temp_df[['temp_time','temp_time_hour','temp_time_half','temp_time_qtr']].head()
# + _uuid="cb08f597c94484fd9bc05517acdd88e79ec499fd"
fig, axx = plt.subplots(3,2, figsize = (30, 20))
temp_df.groupby('temp_time_hour').click_time.count().plot(ax = axx[0][0], color = 'mediumvioletred', fontsize = 12)
axx[0][0].set_title('Clicks by Hour')
axx[0][0].set_xlabel('Time', fontsize = 16)
axx[0][0].set_ylabel('Clicks', fontsize = 16)
temp_df.groupby('temp_time_hour').attributed_time.count().plot(ax = axx[0][1], color = 'darkgreen', fontsize = 12)
axx[0][1].set_title('Downloads by Hour')
axx[0][1].set_xlabel('Time', fontsize = 16)
axx[0][1].set_ylabel('Downloads', fontsize = 16)
temp_df.groupby('temp_time_half').click_time.count().plot(ax = axx[1][0], color = 'mediumvioletred', fontsize = 12)
axx[1][0].set_title('Clicks by Half Hour')
axx[1][0].set_xlabel('Time', fontsize = 16)
axx[1][0].set_ylabel('Clicks', fontsize = 16)
temp_df.groupby('temp_time_half').attributed_time.count().plot(ax = axx[1][1], color = 'darkgreen', fontsize = 12)
axx[1][1].set_title('Downloads by Half Hour')
axx[1][1].set_xlabel('Time', fontsize = 16)
axx[1][1].set_ylabel('Downloads', fontsize = 16)
temp_df.groupby('temp_time_qtr').click_time.count().plot(ax = axx[2][0], color = 'mediumvioletred', fontsize = 12)
axx[2][0].set_title('Clicks by Quarter Hour')
axx[2][0].set_xlabel('Time', fontsize = 16)
axx[2][0].set_ylabel('Clicks', fontsize = 16)
temp_df.groupby('temp_time_qtr').attributed_time.count().plot(ax = axx[2][1], color = 'darkgreen', fontsize = 12)
axx[2][1].set_title('Downloads by Quarter Hour')
axx[2][1].set_xlabel('Time', fontsize = 16)
axx[2][1].set_ylabel('Downloads', fontsize = 16)
plt.subplots_adjust(hspace=.5)
sns.despine(bottom=True, left=True)
# + [markdown] _uuid="0fa17ffdcb116e44a76298e3142a9bd5d51244f6"
# **Target Encoding** (May need to +1 smoothing?)
# + _uuid="2e63429d01816f92291765cc10a3b6fca16b6349"
temp_df.columns
# + _uuid="f27a8be87a7ed84a822ebe8d91409c8592e399f2"
temp_df[temp_df.channel == 3].is_attributed.value_counts()
# + _uuid="2cff97e2fd682f0fadd1207841f3b310595dfb6e"
target_ip = (temp_df[temp_df.is_attributed == True].ip.value_counts()/temp_df.ip.value_counts()).fillna(0)
target_app = (temp_df[temp_df.is_attributed == True].app.value_counts()/temp_df.app.value_counts()).fillna(0)
target_device = (temp_df[temp_df.is_attributed == True].device.value_counts()/temp_df.device.value_counts()).fillna(0)
target_os = (temp_df[temp_df.is_attributed == True].os.value_counts()/temp_df.os.value_counts()).fillna(0)
target_channel = (temp_df[temp_df.is_attributed == True].channel.value_counts()/temp_df.channel.value_counts()).fillna(0)
target_temp_hour = (temp_df[temp_df.is_attributed == True].temp_time_hour.value_counts()/temp_df.temp_time_hour.value_counts()).fillna(0)
target_temp_half = (temp_df[temp_df.is_attributed == True].temp_time_half.value_counts()/temp_df.temp_time_half.value_counts()).fillna(0)
target_temp_qtr = (temp_df[temp_df.is_attributed == True].temp_time_qtr.value_counts()/temp_df.temp_time_qtr.value_counts()).fillna(0)
# + _uuid="6794c5df6cd3e7653884db8f34983300d5916d0d"
list(target_temp_hour[temp_df.temp_time_hour].values)
# + _uuid="63acbf519ab82763d9b08a55d34df8c15a8c5c7d"
temp_df['target_ip'] = target_ip[temp_df.ip].reset_index().drop('index',axis=1)
temp_df['target_app'] = target_app[temp_df.app].reset_index().drop('index',axis=1)
temp_df['target_device'] = target_device[temp_df.device].reset_index().drop('index',axis=1)
temp_df['target_os'] = target_os[temp_df.os].reset_index().drop('index',axis=1)
temp_df['target_channel'] = target_channel[temp_df.channel].reset_index().drop('index',axis=1)
temp_df['target_temp_time_hour'] = list(target_temp_hour[temp_df.temp_time_hour].values)
temp_df['target_temp_time_half'] = list(target_temp_half[temp_df.temp_time_half].values)
temp_df['target_temp_time_qtr'] = list(target_temp_qtr[temp_df.temp_time_qtr].values)
#temp_df['target_temp_time_hour'] = target_temp_hour[temp_df.temp_time_hour].reset_index().drop('index',axis=1)
#temp_df['target_temp_time_half'] = target_temp_half[temp_df.temp_time_half].reset_index().drop('index',axis=1)
#temp_df['target_temp_time_qtr'] = target_temp_qtr[temp_df.temp_time_qtr].reset_index().drop('index',axis=1)
# + [markdown] _uuid="14fc07ddcef1620121efb80b5c0cab21a5e9cdb2"
# assert temp_time_half_list.size == 100000
# assert ip_list.size == 100000
# assert temp_time_hour_list.size == 100000
# assert temp_time_qtr_list.size == 100000
# assert channel_list.size == 100000
# assert os_list.size == 100000
# assert app_list.size == 100000
# assert device_list.size == 100000
# + _uuid="da1692fd2ccdf5240feebeef86b497807a53ca41"
#temp_df['dummy'] = ip_list.reset_index().drop('index', axis=1)
# + _uuid="f9c2475a55aee788e262f61f7e04f4d750fd7173"
temp_df.head()
# + _uuid="845d2811b32e042f305a6acc3520359c765e84de"
| TalkingData/EDA #1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="dWppQEITgjVJ" colab_type="code" outputId="a0db4933-2483-493b-9918-7df34d1852db" executionInfo={"status": "ok", "timestamp": 1583442909492, "user_tz": -60, "elapsed": 7496, "user": {"displayName": "<NAME>0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 255}
# !pip install --upgrade tables
# !pip install eli5
# + id="KajG5wvYgusH" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
# + [markdown] id="zsWTaCZ6icfb" colab_type="text"
# # Wczytywanie danych
# + id="CwAyxFI6h6QE" colab_type="code" outputId="080de6df-9860-4048-f0f2-ec7e158f53f7" executionInfo={"status": "ok", "timestamp": 1583442909497, "user_tz": -60, "elapsed": 1083, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd "/content/drive/My Drive/Colab Notebooks/dataworkshop_matrix/Matrix_2_cars"
# + id="Ey5pymceisBr" colab_type="code" outputId="917537ff-6679-493f-e81a-e6899b30c50a" executionInfo={"status": "ok", "timestamp": 1583444553720, "user_tz": -60, "elapsed": 2519, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df = pd.read_hdf('data/car.h5')
df.shape
# + id="NoggRkx-i_XI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="f08730d6-4970-4c6c-cb42-ef1330682ba6" executionInfo={"status": "ok", "timestamp": 1583442919121, "user_tz": -60, "elapsed": 922, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
df.columns
# + [markdown] id="dqqTi6MFjaD2" colab_type="text"
# # Dummy model
# + id="PHVJiljhjvul" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="965687be-5023-43eb-dbf7-d813d4fb4510" executionInfo={"status": "ok", "timestamp": 1583443058224, "user_tz": -60, "elapsed": 857, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
df.select_dtypes(np.number).columns
# + id="7e2q86jzjInm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8180667e-5e91-48e7-fc67-6c2532bb7947" executionInfo={"status": "ok", "timestamp": 1583443285271, "user_tz": -60, "elapsed": 893, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
feat = [ 'car_id' ]
X = df[ feat ].values
y = df['car_id'].values
model = DummyRegressor()
model.fit(X,y)
y_pred = model.predict(X)
mae(y, y_pred)
# + id="jkndTkS5j8TV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6bbfc460-a7a8-4e0c-fe5a-0f85a8c92bd4" executionInfo={"status": "ok", "timestamp": 1583443403984, "user_tz": -60, "elapsed": 943, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
[ x for x in df.columns if 'price' in x]
# + id="dkeKjCc4lOBj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="41b97acf-6041-45c7-b748-882979737814" executionInfo={"status": "ok", "timestamp": 1583443439997, "user_tz": -60, "elapsed": 931, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
df['price_currency'].value_counts()
# + id="jwBMfcAzlWwf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="403f3026-43f3-4110-cf32-7010b337308a" executionInfo={"status": "ok", "timestamp": 1583444570800, "user_tz": -60, "elapsed": 930, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
df = df[ df['price_currency'] != 'EUR' ]
df.shape
# + [markdown] id="lWbJQpkrl9qI" colab_type="text"
# Features
# + id="ZhgFsXSml4UY" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
# + colab_type="code" outputId="f69ae683-ee9f-4711-bdf6-3d5d204c025d" executionInfo={"status": "ok", "timestamp": 1583444762908, "user_tz": -60, "elapsed": 903, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} id="k9H8pa5SoknC" colab={"base_uri": "https://localhost:8080/", "height": 34}
cat_feats = [x for x in df.columns if SUFFIX_CAT in x]
cat_feats = [x for x in cat_feats if 'price' not in x]
len(cat_feats)
# + id="YQo94u3xoqiJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a2be5ebe-c2e8-4f31-d140-504f5d55c8bb" executionInfo={"status": "ok", "timestamp": 1583444776608, "user_tz": -60, "elapsed": 5019, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
X = df[cat_feats].values
y = df['price_value'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
np.mean(scores)
# + id="4Dz-SByApOK4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="23bf381a-e023-41f9-82c0-12e1b8e595e0" executionInfo={"status": "ok", "timestamp": 1583445018869, "user_tz": -60, "elapsed": 41108, "user": {"displayName": "<NAME>0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}}
m = DecisionTreeRegressor(max_depth=5)
m.fit(X, y)
imp = PermutationImportance(m, random_state=0).fit(X,y)
eli5.show_weights(imp, feature_names=cat_feats)
# + id="L9jsIU_9rBcu" colab_type="code" colab={}
| Matrix_2_cars/M2_day3_simple_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ee
# language: python
# name: ee
# ---
# # Sentinel-2 Image Downloading
#
# The notebook presents the data download script from Google Earth Engine, formatting the images for preprocessing.
# ### How to Install
# + [markdown] jupyter={"source_hidden": true}
# 1. Install conda environment.
#
# ```
# conda env create -f processing_environment.yml
# conda activate ee
# ```
#
# 2. Install kernel.
#
# ```
# python -m ipykernel install --user --name ee --display-name "ee kernel"
# ```
#
# 3. In new notebook from jupylab, select kernel 'ee kernel'
#
# Source on how to install ee: https://developers.google.com/earth-engine/python_install-conda
# -
# ### How to Add New Areas
# + [markdown] jupyter={"source_hidden": true}
# In utils/gee_settings.py
# 1. In 'areas' list, include area, removing spaces i.e. Villa del Rosario > villadelrosario
# 2. In BBOX dict, add bounding box arranged as a list of 4 numbers, upper left and lower right
# 3. In CLOUD_PARAMS dict, specify cloud filter and if will be masked or not
# 4. In admin2RefN, add name in Admin Boundary shapefile
#
# Once downloaded file shows in gs://immap-gee
# 1. check if the area is split into multiple files
# 2. If yes, add area to multi-part list in Section Input params
# -
# ## Load tools
# +
import geopandas as gpd
from fiona.crs import to_string
import pathlib
from tqdm import tqdm
import sys
sys.path.insert(0, '../utils')
from gee import sen2median, deflatecrop1
from gee_settings import BBOX, CLOUD_PARAMS, admin2RefN
data_dir = "../data/"
# +
adm_dir = data_dir + 'admin_bounds/'
img_dir = data_dir + 'images/'
tmp_dir = data_dir + 'tmp/'
dirs = [adm_dir, img_dir, tmp_dir]
for dir_ in dirs:
with pathlib.Path(dir_) as path:
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
# get area shape file
# # !gsutil cp gs://immap-masks/admin_boundaries/admin_bounds.gpkg {adm_dir}
gdf = gpd.read_file(adm_dir + 'admin_bounds.gpkg')
fcrs = to_string({'init': 'epsg:4326', 'no_defs': True})
gdf.crs = fcrs
# -
# ## Input params
# +
PRODUCT = 'COPERNICUS/S2' # L1C
years = ['2015-2016', '2017-2018', '2019-2020']
def get_minmaxdt(year_str):
list_ = year_str.split('-')
return list_[0] + '-01-01', list_[1] + '-12-31'
areas = [
# 'riohacha', 'maicao', 'uribia',
# 'arauca', 'arauquita', 'cucuta', 'tibu', 'soacha', #'villadelrosario', 'saravena',
# 'bogota', 'sabanalarga', 'soledad', 'santamarta', 'barranquilla',
# 'inirida','puertocarreno2', 'bucaramanga', 'monteria', 'fonseca',
# 'fortul', 'fundacion', 'malambo', 'manaure', 'ocana', 'pasto', 'puertosantander', 'saravena', 'villadelrosario', 'tame', 'yopal',
# 'sabanalargaatlantico', 'cumbal', 'cali',
# 'valledupar', 'cienaga', 'sanjuandelcesar', 'baranoa', 'convencion', 'albania', 'santotomas', 'polonuevo', 'elbanco', 'dibulla', 'turbaco', 'cartagena', 'planadas', 'medellin', 'puertocolombia',
# 'facatativa','bosconia','puertogaitan','tubara','lapazcesar','cota','sanmarcos','pitalito','agustincodazzi','floridablanca','piedecuesta','itagui','sincelejo','palmira','bello',
'pereira',
'chia',
'pamplona',
'rionegroantioquia',
'lospatios',
'envigado',
'magangue',
'armenia',
'jamundi',
'barrancabermeja',
'zipaquira',
'ibague',
'chinacota',
'barrancas',
'tunja',
'dosquebradas',
'tumaco',
'mosquera',
'manizales',
'ipiales',
'giron',
'villavicencio',
'madrid',
]
multipart = ['arauca', 'tibu', 'bogota', 'puertocarreno2']
# -
# ## Download from GEE
for area in areas:
for year in years:
cloud_pct, mask = CLOUD_PARAMS[area][year]
min_dt, max_dt = get_minmaxdt(year)
sen2median(
BBOX[area],
FILENAME = f'gee_{area}_{year}',
min_dt = min_dt,
max_dt = max_dt,
cloud_pct = cloud_pct,
mask = mask,
PRODUCT = PRODUCT,
verbose = 1
)
# ## Deflate and crop
# create shapefiles for cropping
for area in areas:
area1 = gdf[gdf['admin2RefN'] == admin2RefN[area]]
area1.to_file(adm_dir + area + '.shp')
# +
# collect filenames to be processed
files_ = []
for area in areas:
for year in years:
if area in multipart:
# just get the largest part
files_.append(f'gee_{area}_{year}0000000000-0000000000')
else:
files_.append(f'gee_{area}_{year}')
# -
for f in tqdm(files_):
deflatecrop1(
raw_filename = f,
output_dir = img_dir,
adm_dir = adm_dir,
tmp_dir = tmp_dir,
bucket = 'gs://immap-images/20200613/',
clear_local = True
)
1+1
| notebooks/00_Data_Download.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# + [markdown] deletable=true editable=true
# ###### December 2019 - <NAME>
#
# Diagonalizing the Hamiltonian matrix for the transverse field Ising model to find the energy eigenvalues and eigenkets. Calculate the groundstate magnetization.
#
# We will use the same Hamiltonian convention as the QMC program:
# $$
# H = -J\sum_{\langle i j \rangle} \sigma^z_i \sigma^z_j - B \sum_i \sigma^x_i - h \sum_i \sigma^z_i
# $$
# where ${\bf \sigma}_i$ are Pauli operators. In this convention, the 1+1 CFT is at $B/J = 1$ and $h=0$.
# + deletable=true editable=true
using LinearAlgebra
# + deletable=true editable=true
N = 6
Dim = 2^N
J = 1. #exchange interaction
B = 1. #transverse field
h = 0. #longitudinal field
Hamiltonian = zeros(Dim,Dim) #This is your 2D Hamiltonian matrix
Spin2 = 0 #give it a scope outside of the loop
for Ket = 0:Dim-1 #Loop over Hilbert Space
Diagonal = 0.
for SpinIndex = 0:N-2 #Loop over spin index (base zero, stop one spin before the end of the chain)
Spin1 = 2*((Ket>>SpinIndex)&1) - 1
NextIndex = SpinIndex + 1
Spin2 = 2*((Ket>>NextIndex)&1) - 1
Diagonal = Diagonal - J*Spin1*Spin2 - h*Spin1 #spins are +1 and -1
end
Diagonal = Diagonal - h*Spin2 #this is the spin at the end of the chain
Hamiltonian[Ket+1,Ket+1] = Diagonal
for SpinIndex = 0:N-1
bit = 2^SpinIndex #The "label" of the bit to be flipped
Bra = Ket ⊻ bit #Binary XOR flips the bit
Hamiltonian[Bra+1,Ket+1] = -B
end
end
Hamiltonian = Hermitian(Hamiltonian);
# + [markdown] deletable=true editable=true
# In the Julia LinearAlgebra package, the eigen function finds eigenvalues and eigenvectors. They are ordered; i.e. the groundstate energy corresponds to index 1
# + deletable=true editable=true
Diag = eigen(Hamiltonian);
# + deletable=true editable=true
GroundState = Diag.vectors[:, 1]; #this gives the groundstate eigenvector
Diag.values[1] / N
# + deletable=true editable=true
##### Calculate the groundstate magnetization <m^2> in the Z direction
magnetization = zeros(Dim)
abs_mag = zeros(Dim)
mag_squared = zeros(Dim)
SumSz = dropdims(sum(@. (2 * (((0:Dim-1) >> (0:N-1)') & 1) - 1); dims=2); dims=2)
AbsSumSz = abs.(SumSz)
SumSzSq = abs2.(SumSz)
magnetization = SumSz' * abs2.(Diag.vectors)
abs_mag = AbsSumSz' * abs2.(Diag.vectors)
mag_squared = SumSzSq' * abs2.(Diag.vectors)
(magnetization[1] / N), (abs_mag[1] / N), (mag_squared[1] / (N*N))
# + deletable=true editable=true tags=[]
beta_vals = [20,15,10,5,3,2,1,0.8,0.5,0.2]
ED =zeros(Float64,length(beta_vals))
idx = 1
for β in beta_vals
weights = exp.(-β * Diag.values)
Z = sum(weights)
E = dot(Diag.values, weights) / (N*Z)
C = (β^2 * ((dot(Diag.values .^2, weights) / Z) - (N*E)^2))
# magnetization of thermal state
M = dot(weights, magnetization) / (N*Z)
M_abs = dot(weights, abs_mag) / (N*Z)
M2 = dot(weights, mag_squared) / (N*N*Z)
println(β," ",E," ",C," ",M," ",M_abs," ",M2)
#ED[idx,1] = β
ED[idx,1] = M2
idx += 1
end
# + deletable=true editable=true
| notebooks/TFIMdiag.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Read data
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
snips = pd.read_csv('complete.csv')
asr = pd.read_csv('snips_ASRcomplete.csv', index_col=0)
snips.head()
asr.head()
# ### Merge GT and ASR data
# +
snips = snips.reset_index()
snips['speakerId'] = snips['index'].apply(lambda x: '{0:04}'.format(x))
snips['speakerId'] = snips['speakerId'].apply(lambda x: 'sample'+str(x))
snips['sampleId'] = snips['path'].apply(lambda x: int(x.split("/")[-1].split(".")[0]))
snips['sampleId'] = snips['sampleId'].apply(lambda x: '{0:04}'.format(x))
snips['uniqueId'] = snips[['speakerId', 'sampleId']].agg('-'.join, axis=1)
snips = snips.drop(columns='index')
snips_asr = pd.merge(snips,asr,on='uniqueId')
snips_asr
# -
# ### Split into train/valid/test (80/10/10)
# +
intent_text_list = [semantics[12:].split("'")[0] for semantics in snips_asr['semantics']] #"SwitchLightOn"
snips_asr['intent'] = intent_text_list
train, valtest = train_test_split(snips_asr, test_size=0.2, random_state=42)
valid, test = train_test_split(valtest, test_size=0.5, random_state=42)
# -
len(train), len(valid), len(test)
# ### Save the original (this dataset serves as gt_gt)
train[['path','transcription','asr_transcript','semantics', 'intent']].to_csv('train_data.csv')
valid[['path','transcription','asr_transcript','semantics', 'intent']].to_csv('valid_data.csv')
test[['path','transcription','asr_transcript','semantics', 'intent']].to_csv('test_data.csv')
# ### Merge the ground truth and asr transcripts into ONE column
# +
train_raw = pd.read_csv("train_data.csv", index_col=0)
train_asr = pd.read_csv("train_data.csv", index_col=0)
valid_raw = pd.read_csv("valid_data.csv", index_col=0)
valid_asr = pd.read_csv("valid_data.csv", index_col=0)
test_raw = pd.read_csv("test_data.csv", index_col=0)
test_asr = pd.read_csv("test_data.csv", index_col=0)
# -
train_raw['type'] = 'raw'
train_raw = train_raw.drop(columns=['asr_transcript'])
train_asr['type'] = 'asr'
train_asr = train_asr.drop(columns=['transcription'])
train_asr.columns = ['path', 'transcription', 'semantics', 'intent', 'type']
display(train_raw.head(5))
display(train_asr.head(5))
valid_raw['type'] = 'raw'
valid_raw = valid_raw.drop(columns=['asr_transcript'])
valid_asr['type'] = 'asr'
valid_asr = valid_asr.drop(columns=['transcription'])
valid_asr.columns = ['path', 'transcription', 'semantics', 'intent', 'type']
display(valid_raw.head(5))
display(valid_asr.head(5))
test_raw['type'] = 'raw'
test_raw = test_raw.drop(columns=['asr_transcript'])
test_asr['type'] = 'asr'
test_asr = test_asr.drop(columns=['transcription'])
test_asr.columns = ['path', 'transcription', 'semantics', 'intent', 'type']
display(test_raw.head(5))
display(test_asr.head(5))
train_combined = pd.concat([train_raw, train_asr])
valid_combined = pd.concat([valid_raw, valid_asr])
test_combined = pd.concat([test_raw, test_asr])
len(train_combined), len(valid_combined), len(test_combined)
len(train_raw), len(valid_raw), len(test_raw)
train_combined_sh = shuffle(train_combined, random_state=42)
valid_combined_sh = shuffle(valid_combined, random_state=42)
test_combined_sh = shuffle(test_combined, random_state=42)
# ### Save gt_asr
# +
# train_raw[['path','transcription','semantics', 'intent', 'type']].to_csv('train_data.csv')
# valid_raw[['path','transcription','semantics', 'intent', 'type']].to_csv('valid_data.csv')
# test_asr[['path','transcription','semantics', 'intent', 'type']].to_csv('test_data.csv')
# -
# ### Save gtasr_gt
# +
# train_combined_sh[['path','transcription','semantics', 'intent', 'type']].to_csv('train_data.csv')
# valid_combined_sh[['path','transcription','semantics', 'intent', 'type']].to_csv('valid_data.csv')
# test_raw[['path','transcription','semantics', 'intent', 'type']].to_csv('test_data.csv')
# -
# ### Save gtasr_asr
# +
# train_combined_sh[['path','transcription','semantics', 'intent', 'type']].to_csv('train_data.csv')
# valid_combined_sh[['path','transcription','semantics', 'intent', 'type']].to_csv('valid_data.csv')
# test_asr[['path','transcription','semantics', 'intent', 'type']].to_csv('test_data.csv')
| dataprep/data_preprocessing_Snips.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.10 64-bit (''tensorflow2_p36'': conda)'
# name: python3
# ---
# + [markdown] id="TA21Jo5d9SVq"
#
#
# 
#
# [](https://githubtocolab.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/NER_NO.ipynb)
#
#
#
# + [markdown] id="CzIdjHkAW8TB"
# # **Detect entities in Norwegian text**
# + [markdown] id="wIeCOiJNW-88"
# ## 1. Colab Setup
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="CGJktFHdHL1n" outputId="cf68c838-66c6-4577-812e-3e460b39fb70"
# Install PySpark and Spark NLP
# ! pip install -q pyspark==3.1.2 spark-nlp
# Install Spark NLP Display lib
# ! pip install --upgrade -q spark-nlp-display
# + [markdown] id="eCIT5VLxS3I1"
# ## 2. Start the Spark session
# + id="sw-t1zxlHTB7"
import json
import pandas as pd
import numpy as np
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
spark = sparknlp.start()
# + [markdown] id="9RgiqfX5XDqb"
# ## 3. Select the DL model
# + id="LLuDz_t40be4"
# If you change the model, re-run all the cells below.
# Applicable models: norne_840B_300, norne_6B_300, norne_6B_100
MODEL_NAME = "norne_840B_300"
# + [markdown] id="2Y9GpdJhXIpD"
# ## 4. Some sample examples
# + id="vBOKkB2THdGI"
# Enter examples to be transformed as strings in this list
text_list = [
"""<NAME> III (født 28. oktober 1955) er en amerikansk forretningsmagnat, programvareutvikler, investor og filantrop. Han er mest kjent som medgründer av Microsoft Corporation. I løpet av sin karriere hos Microsoft hadde Gates stillingene som styreleder, administrerende direktør (CEO), president og sjef programvarearkitekt, samtidig som han var den største individuelle aksjonæren fram til mai 2014. Han er en av de mest kjente gründere og pionerene i mikrodatarevolusjon på 1970- og 1980-tallet. Han er født og oppvokst i Seattle, Washington, og grunnla Microsoft sammen med barndomsvennen <NAME> i 1975, i Albuquerque, New Mexico; det fortsatte å bli verdens største programvare for datamaskinprogramvare. Gates ledet selskapet som styreleder og administrerende direktør til han gikk av som konsernsjef i januar 2000, men han forble styreleder og ble sjef for programvarearkitekt. I løpet av slutten av 1990-tallet hadde Gates blitt kritisert for sin forretningstaktikk, som har blitt ansett som konkurransedyktig. Denne uttalelsen er opprettholdt av en rekke dommer. I juni 2006 kunngjorde Gates at han skulle gå over til en deltidsrolle hos Microsoft og på heltid ved Bill & <NAME> Foundation, den private veldedige stiftelsen som han og kona, <NAME>, opprettet i 2000. [ 9] Han overførte gradvis arbeidsoppgavene sine til <NAME> og <NAME>. Han trakk seg som styreleder for Microsoft i februar 2014 og tiltrådte et nytt verv som teknologirådgiver for å støtte den nyutnevnte administrerende direktøren Satya Nadella.""",
"""<NAME> er et oljemaleri fra 1500-tallet skapt av Leonardo. Det holdes på Louvre i Paris."""
]
# + [markdown] id="XftYgju4XOw_"
# ## 5. Define Spark NLP pipeline
# + colab={"base_uri": "https://localhost:8080/"} id="lBggF5P8J1gc" outputId="0be5a10f-010f-4e18-854e-b09b7d99268e"
document_assembler = DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
tokenizer = Tokenizer() \
.setInputCols(['document']) \
.setOutputCol('token')
# The wikiner_840B_300 is trained with glove_840B_300, so the embeddings in the
# pipeline should match. Same applies for the other available models.
if MODEL_NAME == "norne_840B_300":
embeddings = WordEmbeddingsModel.pretrained('glove_840B_300', lang='xx') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
elif MODEL_NAME == "norne_6B_300":
embeddings = WordEmbeddingsModel.pretrained('glove_6B_300', lang='xx') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
elif MODEL_NAME == "norne_6B_100":
embeddings = WordEmbeddingsModel.pretrained('glove_100d') \
.setInputCols(['document', 'token']) \
.setOutputCol('embeddings')
ner_model = NerDLModel.pretrained(MODEL_NAME, 'no') \
.setInputCols(['document', 'token', 'embeddings']) \
.setOutputCol('ner')
ner_converter = NerConverter() \
.setInputCols(['document', 'token', 'ner']) \
.setOutputCol('ner_chunk')
nlp_pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
embeddings,
ner_model,
ner_converter
])
# + [markdown] id="mv0abcwhXWC-"
# ## 6. Run the pipeline
# + id="EYf_9sXDXR4t"
empty_df = spark.createDataFrame([['']]).toDF('text')
pipeline_model = nlp_pipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({'text': text_list}))
result = pipeline_model.transform(df)
# + [markdown] id="UQY8tAP6XZJL"
# ## 7. Visualize results
# + colab={"base_uri": "https://localhost:8080/", "height": 585} id="Ar32BZu7J79X" outputId="c2df6946-ab41-40c4-a2d3-7efa48fb290b"
from sparknlp_display import NerVisualizer
NerVisualizer().display(
result = result.collect()[0],
label_col = 'ner_chunk',
document_col = 'document'
)
| tutorials/streamlit_notebooks/NER_NO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %pylab inline
import time
from collections import defaultdict
import json
import pandas as pd
import os
import sys
stdout = sys.stdout
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = stdout
import numpy as np
import requests
import json
from datetime import datetime
from textblob import TextBlob
#import seaborn as sns
# +
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#import seaborn.apionly as sns
#import imp
#imp.reload(mpl); imp.reload(plt); imp.reload(sns)
# -
# # Date of creation as 0 point
# ## Non-award scientists
df_b=pd.read_csv("baseline_revisions.csv",sep="\t")
df_b.info()
df_b=df_b[["article","text_len","timestamp"]]
df_b=df_b[~df_b.text_len.isnull()]
df_b.timestamp=pd.to_datetime(df_b.timestamp)
print len(df_b)
df_b.timestamp=df_b.timestamp.apply(lambda x: x.date() )
df_b.drop_duplicates(["timestamp","article"],keep="first",inplace=True)
print len(df_b)
# +
df_b_w=pd.DataFrame(columns=["time_w","text_len","article","week"])
g=df_b.groupby("article")
for article_name,series in g:
print article_name
created=series.timestamp.min()
print created
index=pd.date_range(start=created,end=datetime.today(),freq="W")
#print index
#series.timestamp=series.timestamp.apply(lambda x: x.date() )
#series.drop_duplicates("timestamp",keep="first",inplace=True)
df1=pd.DataFrame(index,columns=["time_w"])
df1.time_w=df1.time_w.apply(lambda x: x.date() )
df1["text_len"]=df1.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
df1["article"]=article_name
df1["week"]=df1.index
df_b_w=pd.concat([df_b_w,df1])
# +
#group data by week number
df_b_w.info()
g=df_b_w.groupby("week")
b_w_st_dict=defaultdict()
for week_name,series in g:
#print week_name
#print series.text_len.mean()
#print series.sem().text_len
#print series.describe().text_len
b_w_st_dict[week_name]={"mean_w":series.text_len.mean(),"std_1":series.text_len.sem(),
"count":series.count().text_len}
#create df
df_b_w_st=pd.DataFrame.from_dict(b_w_st_dict,orient="index")
df_b_w_st=df_b_w_st[df_b_w_st["count"]>10]
df_b_w_st.info()
# -
plt.errorbar(df_b_w_st.index.values, df_b_w_st.mean_w, df_b_w_st.std_1,linestyle='None')
# linewidth = 1)#,linecolor="r",elinewidth=1)#linestyle='None')#, marker='^')
plt.plot(df_b_w_st.index.values, df_b_w_st.mean_w,"b",linewidth = 1)
plt.xlim(0,df_b_w_st.iloc[-1].name)
plt.ylim(0,5000)
plt.xlabel("week")
plt.ylabel("word counts")
plt.title("Length of articles of non-awarded scientists. 0 week is the week of article creation");
# ## Awarded scientists
df_s=pd.read_csv("seed_revisions.csv",sep="\t")
df_s.info()
df_s=df_s[["article","text_len","timestamp"]]
df_s=df_s[~df_s.text_len.isnull()]#change text to text_len
df_s.timestamp=pd.to_datetime(df_s.timestamp)
print len(df_s)
df_s.timestamp=df_s.timestamp.apply(lambda x: x.date() )
df_s.drop_duplicates(["timestamp","article"],keep="first",inplace=True)
print len(df_s)
# +
df_s_w=pd.DataFrame(columns=["time_w","text_len","article","week"])
g=df_s.groupby("article")
for article_name,series in g:
print article_name
created=series.timestamp.min()
print created
index=pd.date_range(start=created,end=datetime.today(),freq="W")
#print index
#series.timestamp=series.timestamp.apply(lambda x: x.date() )
#series.drop_duplicates("timestamp",keep="first",inplace=True)
df1=pd.DataFrame(index,columns=["time_w"])
df1.time_w=df1.time_w.apply(lambda x: x.date() )
df1["text_len"]=df1.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
df1["article"]=article_name
df1["week"]=df1.index
df_s_w=pd.concat([df_s_w,df1])
# +
#group data by week number
df_s_w.info()
g=df_s_w.groupby("week")
s_w_st_dict=defaultdict()
for week_name,series in g:
#print week_name
#print series.text_len.mean()
#print series.sem().text_len
#print series.describe().text_len
s_w_st_dict[week_name]={"mean_w":series.text_len.mean(),"std_1":series.text_len.sem(),
"count":series.count().text_len}
#create df
df_s_w_st=pd.DataFrame.from_dict(s_w_st_dict,orient="index")
df_s_w_st=df_s_w_st[df_s_w_st["count"]>10]
df_s_w_st.info()
# -
plt.errorbar(df_s_w_st.index.values, df_s_w_st.mean_w, df_s_w_st.std_1,linestyle='None')
plt.plot(df_s_w_st.index.values, df_s_w_st.mean_w,"b",linewidth = 1)
plt.xlim(0,df_s_w_st.iloc[-1].name)
plt.ylim(0,5000)
plt.xlabel("week")
plt.ylabel("word counts")
plt.title("Length of articles of awarded scientists. 0 week is the week of article creation");
# +
fig, (ax0, ax1) = plt.subplots(ncols=2, sharey=True,figsize=[20,10])
ax0.errorbar(df_b_w_st.index.values, df_b_w_st.mean_w, df_b_w_st.std_1,linestyle='None')
ax0.plot(df_b_w_st.index.values, df_b_w_st.mean_w,"b",linewidth = 1)
ax0.set_xlim(0,df_b_w_st.iloc[-1].name)
ax0.set_ylim(10,5000)
ax0.set_xlabel("week")
ax0.set_ylabel("word counts")
ax0.set_title("Length of articles of non-awarded scientists. 0 week is the week of article creation")
ax1.errorbar(df_s_w_st.index.values, df_s_w_st.mean_w, df_s_w_st.std_1,linestyle='None')
ax1.plot(df_s_w_st.index.values, df_s_w_st.mean_w,"b",linewidth = 1)
ax1.set_xlim(0,df_s_w_st.iloc[-1].name)
#plt.ylim(0,5000)
ax1.set_xlabel("week")
#plt.ylabel("word counts")
ax1.set_title('Length of articles of awarded scientists. 0 week is the week of article creation')
plt.subplots_adjust(wspace=0.01, hspace=0.01)
plt.show()
# -
# # Date of award as 0 point
with open("seed_creation_date.json","r") as f:
sci_aw_dict=json.load(f,encoding="utf-8")
# +
df_sci_aw=pd.DataFrame.from_dict(sci_aw_dict,orient="index")
df_sci_aw.index=df_sci_aw.apply(lambda x: x.name.split("/")[-1],axis=1)
df_sci_aw.Award_date=pd.to_datetime(df_sci_aw.Award_date)
df_sci_aw.Award_date=df_sci_aw.Award_date.apply(lambda x: x.date() )
df_s_w_aw=pd.DataFrame(columns=["time_w","text_len","article","week"])
g=df_s.groupby("article")
for article_name,series in g:
print article_name
created=series.timestamp.min()
#print created
#after award
index=pd.date_range(start=df_sci_aw.loc[article_name].Award_date,end=datetime.today(),freq="W")
#print index
#before award
index2=pd.date_range(end=df_sci_aw.loc[article_name].Award_date,start=created,freq="W")
#print index2
#series.timestamp=series.timestamp.apply(lambda x: x.date() )
#series.drop_duplicates("timestamp",keep="first",inplace=True)
df1_1=pd.DataFrame(index,columns=["time_w"])
df1_1.time_w=df1_1.time_w.apply(lambda x: x.date() )
if (created - df_sci_aw.loc[article_name].Award_date)>(created -created):
df1_1=df1_1[df1_1.time_w>=created]
df1_1["text_len"]=df1_1.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
#skip df1_2
df1=df1_1[["time_w","text_len"]]
print "==> created after award"
else:
#print df1_1
df1_2=pd.DataFrame(index2,columns=["time_w"])
df1_2.time_w=df1_2.time_w.apply(lambda x: x.date() )
df1_2.index=df1_2.apply(lambda x: x.name-len(df1_2),axis=1)
#print df1_2
df1_1["text_len"]=df1_1.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
#print df1_1
df1_2["text_len"]=df1_2.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
df1=pd.concat([df1_1,df1_2])
df1["article"]=article_name
df1["week"]=df1.index
df_s_w_aw=pd.concat([df_s_w_aw,df1])
# +
#group data by week number
df_s_w_aw.info()
g=df_s_w_aw.groupby("week")
s_w_st_dict=defaultdict()
for week_name,series in g:
#print week_name
#print series.text_len.mean()
#print series.sem().text_len
#print series.describe().text_len
s_w_st_dict[week_name]={"mean_w":series.text_len.mean(),"std_1":series.text_len.sem(),
"count":series.count().text_len}
#create df
df_s_w_st_aw=pd.DataFrame.from_dict(s_w_st_dict,orient="index")
print len(df_s_w_st_aw)
df_s_w_st_aw=df_s_w_st_aw[df_s_w_st_aw["count"]>10]
print len(df_s_w_st_aw)
df_s_w_st_aw.info()
# -
plt.errorbar(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w, df_s_w_st_aw.std_1,linestyle='None')
plt.plot(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w,"b",linewidth = 1)
#plt.xlim(0,df_s_w_st.iloc[-1].name)
#plt.ylim(0,5000)
plt.xlabel("week")
plt.ylabel("word counts")
plt.title("Length of articles of awarded scientists. 0 week is the week of award");
print df_s_w_st_aw.loc[1]
print df_s_w_st_aw.loc[0]
print df_s_w_st_aw.loc[-1]
print df_s_w_st_aw.loc[-2]
# # Non-awarded scientists
# +
#df_b2=df_b[["article","text_len","timestamp"]]
#df_b2.timestamp=df_b2.timestamp.apply(lambda x: x.date())
#df_b2.drop_duplicates(["article","timestamp"],keep="first",inplace=True)
#df_b2.info()
# -
def random_date(start, end, position=None):
start, end = pd.Timestamp(start), pd.Timestamp(end)
delta = (end - start).total_seconds()
if position is None:
offset = np.random.uniform(0., delta)
else:
offset = position * delta
offset = pd.offsets.Second(offset)
t = start + offset
return t
#start=df_b.timestamp.min()
start=df_sci_aw.Award_date.min()
end=df_sci_aw.Award_date.max()
#end=df_b.timestamp.max()
print df_b.timestamp.min()
print df_b.timestamp.max()
print df_sci_aw.Award_date.min()
print df_sci_aw.Award_date.max()
# +
df_s_w_naw=pd.DataFrame(columns=["time_w","text_len","article","week"])
g=df_b.groupby("article")
for article_name,series in g:
print article_name
created=series.timestamp.min()
#print created
#after award
award_date=random_date(start, end)
index=pd.date_range(start=award_date,end=datetime.today(),freq="W")
#print index
#before award
index2=pd.date_range(end=award_date,start=created,freq="W")
#print index2
#series.timestamp=series.timestamp.apply(lambda x: x.date() )
#series.drop_duplicates("timestamp",keep="first",inplace=True)
df1_1=pd.DataFrame(index,columns=["time_w"])
df1_1.time_w=df1_1.time_w.apply(lambda x: x.date() )
if (created -award_date.date())>(created-created):
df1_1=df1_1[df1_1.time_w>=created]
df1_1["text_len"]=df1_1.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
#skip df1_2
df1=df1_1[["time_w","text_len"]]
print "==> created after award"
else:
#print df1_1
df1_2=pd.DataFrame(index2,columns=["time_w"])
df1_2.time_w=df1_2.time_w.apply(lambda x: x.date() )
df1_2.index=df1_2.apply(lambda x: x.name-len(df1_2),axis=1)
#print df1_2
df1_1["text_len"]=df1_1.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
#print df1_1
df1_2["text_len"]=df1_2.time_w.apply(lambda x: series[series.timestamp<=x].iloc[0].text_len)
df1=pd.concat([df1_1,df1_2])
df1["article"]=article_name
df1["week"]=df1.index
df_s_w_naw=pd.concat([df_s_w_naw,df1])
# +
#group data by week number
df_s_w_naw.info()
g=df_s_w_naw.groupby("week")
s_w_st_dict=defaultdict()
for week_name,series in g:
#print week_name
#print series.text_len.mean()
#print series.sem().text_len
#print series.describe().text_len
s_w_st_dict[week_name]={"mean_w":series.text_len.mean(),"std_1":series.sem().text_len,
"count":series.count().text_len}
#create df
df_s_w_st_naw=pd.DataFrame.from_dict(s_w_st_dict,orient="index")
print len(df_s_w_st_naw)
df_s_w_st_naw=df_s_w_st_naw[df_s_w_st_naw["count"]>10]
print len(df_s_w_st_naw)
df_s_w_st_naw.info()
# +
#plt.errorbar(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w, df_s_w_st_naw.std_1,linestyle='None')
#plt.plot(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w,"b",linewidth = 1)
#plt.xlim(0,df_s_w_st_naw.iloc[-1].name)
#plt.ylim(0,5000)
#plt.xlabel("week")
#plt.ylabel("word counts")
#plt.title("Length of articles of non-awarded scientists. 0 week is the week of award");
# -
plt.errorbar(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w, df_s_w_st_naw.std_1,linestyle='None')
plt.plot(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w,"b",linewidth = 1)
#plt.xlim(0,df_s_w_st_naw.iloc[-1].name)
#plt.ylim(0,5000)
plt.xlabel("week")
plt.ylabel("word counts")
plt.title("Length of articles of non-awarded scientists. 0 week is the week of award");
#save
df_s_w_st_aw.to_csv("data/awarded_inf_supply_words_data_forplot.csv",sep="\t")
df_s_w_st_naw.to_csv("data/non-awarded_inf_supply_words_data_forplot.csv",sep="\t")
df_s_w_st.to_csv("data/awarded_inf_supply_words_0week_data_forplot.csv",sep="\t")
df_b_w_st.to_csv("data/non-awarded_inf_supply_words_0week_data_forplot.csv",sep="\t")
# +
#load
df_s_w_st=pd.DataFrame.from_csv("data/awarded_inf_supply_words_0week_data_forplot.csv",sep="\t")
df_b_w_st=pd.DataFrame.from_csv("data/non-awarded_inf_supply_words_0week_data_forplot.csv",sep="\t")
df_s_w_st_aw=pd.DataFrame.from_csv("data/awarded_inf_supply_words_data_forplot.csv",sep="\t")
df_s_w_st_naw=pd.DataFrame.from_csv("data/non-awarded_inf_supply_words_data_forplot.csv",sep="\t")
# +
fig, (ax0, ax1) = plt.subplots(ncols=2, sharey=True,figsize=[20,10])
ax0.errorbar(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w, df_s_w_st_aw.std_1,alpha=0.7,label='_nolegend_',
linestyle='None')
ax0.plot(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w,"b",linewidth = 1,label="awarded scientist")
ax0.set_xlim(df_s_w_st_aw.iloc[0].name,df_s_w_st_aw.iloc[-1].name)
ax0.set_ylim(10,5000)
ax0.set_xlabel("week")
ax0.set_ylabel("word counts")
ax0.set_title("Length of article; 0 week is the week of award")
ax0.errorbar(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w, df_s_w_st_naw.std_1,c="lightgreen",alpha=0.2,
label='_nolegend_',linestyle='None')
ax0.plot(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w,"g",linewidth = 1,label="non-awarded scientist")
ax0.legend(loc=2)
#df_b_w_st
#df_s_w_st
ax1.errorbar(df_s_w_st.index.values, df_s_w_st.mean_w, df_s_w_st.std_1,alpha=0.7,label='_nolegend_',
linestyle='None')
ax1.plot(df_s_w_st.index.values, df_s_w_st.mean_w,"b",linewidth = 1,label="awarded scientist")
ax1.errorbar(df_b_w_st.index.values, df_b_w_st.mean_w, df_b_w_st.std_1,c="lightgreen",alpha=0.2,
label='_nolegend_',linestyle='None')
ax1.plot(df_b_w_st.index.values, df_b_w_st.mean_w,"g",linewidth = 1,label="non-awarded scientist")
ax1.legend(loc=2)
ax1.set_xlim(df_s_w_st.iloc[0].name,df_s_w_st.iloc[-1].name)
#plt.ylim(0,5000)
ax1.set_xlabel("week")
#plt.ylabel("word counts")
ax1.set_title('Length of article; 0 week is the week when article was created')
plt.subplots_adjust(wspace=0.01, hspace=0.01)
plt.show()
# +
from matplotlib.container import ErrorbarContainer
from matplotlib.legend_handler import HandlerErrorbar
fig, (ax0, ax1) = plt.subplots(ncols=2, sharey=True,figsize=[20,10])
ax0.errorbar(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w, df_s_w_st_aw.std_1,c="b",alpha=0.7,#lw=2,
linestyle='None',label="awarded scientist, std error")#,label='_nolegend_')
ax0.plot(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w,"darkblue",linewidth = 1,label="awarded scientist, mean")
ax0.set_xlim(df_s_w_st_aw.iloc[0].name,df_s_w_st_aw.iloc[-1].name)
ax0.set_ylim(10,4500)
ax0.set_xlabel("week")
ax0.set_ylabel("word counts")
ax0.set_title("Length of article; 0 week is the week of award")
ax0.errorbar(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w, df_s_w_st_naw.std_1,c="green",alpha=0.7,#lw=2,
linestyle='None',label="non-awarded scientist, std error")#label='_nolegend_'
ax0.plot(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w,"lime",linewidth = 1,label="non-awarded scientist, mean")
ax0.plot([0,0],[10,5000],"r--",label="week of award")
leg=ax0.legend(loc=2,handler_map={ErrorbarContainer: HandlerErrorbar(numpoints=15,xpad=0.1)})
for lh in leg.legendHandles:
lh.set_alpha(1)
ax1.errorbar(df_s_w_st.index.values, df_s_w_st.mean_w, df_s_w_st.std_1,c="b",alpha=0.8,lw=2,
linestyle='None',label="awarded scientist, std error")#,label='_nolegend_')
ax1.plot(df_s_w_st.index.values, df_s_w_st.mean_w,"darkblue",linewidth = 1,label="awarded scientist, mean")
ax1.errorbar(df_b_w_st.index.values, df_b_w_st.mean_w, df_b_w_st.std_1,c="green",alpha=0.8,lw=2,
linestyle='None',label="non-awarded scientist, std error")#label='_nolegend_'
ax1.plot(df_b_w_st.index.values, df_b_w_st.mean_w,"lime",linewidth = 1,label="non-awarded scientist, mean")
#ax1.legend(loc=2)
leg=ax1.legend(loc=2,handler_map={ErrorbarContainer: HandlerErrorbar(numpoints=15,xpad=0.1)})
for lh in leg.legendHandles:
lh.set_alpha(1)
ax1.set_xlim(df_s_w_st.iloc[0].name,df_s_w_st.iloc[-1].name)
#plt.ylim(0,5000)
ax1.set_xlabel("week")
#plt.ylabel("word counts")
ax1.set_title('Length of article; 0 week is the week when the article about scientists was created')
plt.subplots_adjust(wspace=0.01, hspace=0.01)
plt.tight_layout()
plt.show()
# -
# +
from matplotlib.container import ErrorbarContainer
from matplotlib.legend_handler import HandlerErrorbar
fig, (ax0, ax1) = plt.subplots(ncols=2, sharey=True,figsize=[8,4],dpi=500)
ax0.errorbar(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w, df_s_w_st_aw.std_1,c="b",alpha=0.7,#lw=2,
linestyle='None',label="Awarded scientist, std error")#,label='_nolegend_')
ax0.plot(df_s_w_st_aw.index.values, df_s_w_st_aw.mean_w,"darkblue",linewidth = 1,label="Awarded scientist, mean")
ax0.set_xlim(df_s_w_st_aw.iloc[0].name,df_s_w_st_aw.iloc[-1].name-10)
ax0.set_ylim(10,4500)
ax0.set_xlabel("Week")
ax0.set_ylabel("Word counts")
#ax0.set_title("Length of article; 0 week is the week of award")
ax0.errorbar(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w, df_s_w_st_naw.std_1,c="green",alpha=0.7,#lw=2,
linestyle='None',label="Non-awarded scientist, std error")#label='_nolegend_'
ax0.plot(df_s_w_st_naw.index.values, df_s_w_st_naw.mean_w,"lime",linewidth = 1,label="Non-awarded scientist, mean")
ax0.plot([0,0],[10,5000],"r--",label="Week of award")
leg=ax0.legend(loc=2,handler_map={ErrorbarContainer: HandlerErrorbar(numpoints=15,xpad=0.1)})
for lh in leg.legendHandles:
lh.set_alpha(1)
ax1.errorbar(df_s_w_st.index.values, df_s_w_st.mean_w, df_s_w_st.std_1,c="b",alpha=0.8,lw=2,
linestyle='None',label="Awarded scientist, std error")#,label='_nolegend_')
ax1.plot(df_s_w_st.index.values, df_s_w_st.mean_w,"darkblue",linewidth = 1,label="Awarded scientist, mean")
ax1.errorbar(df_b_w_st.index.values, df_b_w_st.mean_w, df_b_w_st.std_1,c="green",alpha=0.8,lw=2,
linestyle='None',label="Non-awarded scientist, std error")#label='_nolegend_'
ax1.plot(df_b_w_st.index.values, df_b_w_st.mean_w,"lime",linewidth = 1,label="Non-awarded scientist, mean")
#ax1.legend(loc=2)
leg=ax1.legend(loc=2,handler_map={ErrorbarContainer: HandlerErrorbar(numpoints=15,xpad=0.1)})
for lh in leg.legendHandles:
lh.set_alpha(1)
ax1.set_xlim(df_s_w_st.iloc[0].name,df_s_w_st.iloc[-1].name)
#plt.ylim(0,5000)
ax1.set_xlabel("Week")
#plt.ylabel("word counts")
#ax1.set_title('Length of article; 0 week is the week when the article about scientists was created')
plt.tight_layout()
plt.subplots_adjust(wspace=0.03, hspace=0.03)
#plt.savefig("plots/word_count.pdf",dpi=500)
plt.savefig("plots/word_count.png",dpi=500)
plt.show()
# -
print df_s_w_st_naw.loc[2]
print df_s_w_st_naw.loc[1]
print df_s_w_st_naw.loc[0]
print df_s_w_st_naw.loc[-1]
print df_s_w_st_naw.loc[-2]
| Analyze word counts over time.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="74l7lcFQk4kT"
# ## Setup
#
# + [markdown] colab_type="text" id="ixh2Tyl1FHaj"
# In this first cell we''ll load the necessary libraries and setup some logging and display options.
# + colab={} colab_type="code" id="JaCENoitkiXK"
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# %matplotlib inline
# -
# Define a function that displays the contents of a NetCDF.
def display_netcdf(nc_path):
# Open NetCDF as xarray DataSet and print the info attribute.
print(xr.open_dataset(nc_path).info)
# Define a function that creates a copy of a NetCDF.
def netcdf_copy(path_orig,
path_copy):
# Open the NetCDF as an xarray DataSet and write it back out as NetCDF.
xr.open_dataset(path_orig, decode_times=False).to_netcdf(path=path_copy)
# Copy the labels NetCDF into a corresponding predicted labels NetCDF. We expect to update the label variable's values in this NetCDF with values predicted by an AI model that we'll develop and train below.
# +
# paths to the computed and predicted labels NetCDF files
path_labels = 'C:/home/cam_learn/fv091x180L26_dry_HS.cam.h1.2000-12-27-00000_lowres.nc'
path_predictions = 'C:/home/cam_learn/fv091x180L26_dry_HS.cam.h2.2000-12-27-00000_lowres.nc'
# make a copy of the computed labels NetCDF as the label predictions NetCDF
netcdf_copy(path_labels, path_predictions)
display_netcdf(path_predictions)
# + [markdown] colab_type="text" id="y0gBz25Glf-3"
# Next we'll load our flow variables (features) and time tendency forcings (labels) datasets into Xarray Dataset objects.
# + colab={} colab_type="code" id="_cC_-nNSlWIO"
path_features = 'C:/home/cam_learn/fv091x180L26_dry_HS.cam.h0.2000-12-27-00000_lowres.nc'
ds_features = xr.open_dataset(path_features, decode_times=False)
ds_labels = xr.open_dataset(path_labels, decode_times=False)
# -
ds_features.info
ds_labels.info
# Look at the time variable in order to work out the initial date, number of steps, units, etc.
ds_features.variables['time']
# Make sure we have the same time values for the targets data.
if (ds_features.variables['time'].values != ds_labels.variables['time'].values).any():
print('ERROR: Non-matching time values')
else:
print("OK: time values match as expected")
# #### Define a function that creates a Series of timestamp values corresponding to the NetCDF's time values.
from datetime import datetime, timedelta
def extract_timestamps(ds,
year,
month,
day):
# Cook up an initial datetime object based on our specified initial date.
initial = datetime(2000, 12, 27)
# Create an array of datetime objects from the time values (assumed to be in units of days since the inital date).
times = ds.variables['time'].values
datetimes = np.empty(shape=times.shape, dtype='datetime64[m]')
for i in range(datetimes.size):
datetimes[i] = initial + timedelta(days=times[i])
# Put the array into a Series and return it.
return pd.Series(datetimes)
# #### Create a Series of datetime values from the DataSet's (NetCDF's) time coordinate variable's values.
# These will be used as time indices corresponding to feature and label values.
timestamps = extract_timestamps(ds_features, 2000, 12, 27)
timestamps.head()
# ## Feature and target selection
# As features we'll use the following flow variables:
#
# * U (west-east (zonal) wind, m/s)
# * V (south-north (meridional) wind, m/s)
# * T (temperature, K)
# * PS (surface pressure, Pa)
#
# Time tendency forcings are the targets (labels) that our model should learn to predict.
#
# * PTTEND (time tendency forcing value corresponding to the temperature variable)
# * PUTEND (time tendency forcing value corresponding to zonal wind)
# * PVTEND (time tendency forcing value corresponding to meridional wind)
#
# Eventually we'll train/fit our model for an entire global 3-D grid, but for this example we'll select all lat/lon/time combinations for a single level (elevation).
ps = pd.Series(ds_features.variables['PS'].values[:, :, :].flatten())
t = pd.Series(ds_features.variables['T'].values[:, 0, :, :].flatten())
u = pd.Series(ds_features.variables['U'].values[:, 0, :, :].flatten())
v = pd.Series(ds_features.variables['V'].values[:, 0, :, :].flatten())
pttend = pd.Series(ds_labels.variables['PTTEND'].values[:, 0, :, :].flatten())
putend = pd.Series(ds_labels.variables['PUTEND'].values[:, 0, :, :].flatten())
pvtend = pd.Series(ds_labels.variables['PVTEND'].values[:, 0, :, :].flatten())
# Convert to Pandas DataFrames containing inputs (features) and outputs (label/target) for use when predicting time tendency forcings.
df_features = pd.DataFrame({'timestamp': timestamps,
'PS': ps,
'T': t,
'U': u,
'V': v})
df_features.set_index('timestamp', inplace=True)
df_features.head()
df_targets = pd.DataFrame({'timestamp': timestamps,
'PTTEND': pttend,
'PUTEND': putend,
'PVTEND': pvtend})
df_targets.set_index('timestamp', inplace=True)
df_targets.head()
# ## Split the data into training and testing datasets
# We'll use an initial split of 75% for training and 25% for testing.
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df_features, df_targets, test_size=0.25, random_state=4)
# ## Create the linear regression model
from sklearn import linear_model
model = linear_model.LinearRegression()
# ## Train and evaluate the model
# Train the model by fitting to the training dataset. Then predict the labels using the test features, and get the RMSE compared against the test labels.
# + colab={} colab_type="code" id="Q6waMx-cMg71"
# fit the model
history = model.fit(x_train, y_train)
# root mean square error
rmse = np.sqrt(np.mean((model.predict(x_test) - y_test)**2))
print("RMSE:\n{}".format(rmse))
model.
# -
# ## Build a corresponding predictions dataset
# + [markdown] colab={} colab_type="code" id="wVzN6_fWZDJn"
# Next we'll loop over each level of the features/labels datasets and use each of these in turn to train the model, which we'll then use to compute/predict corresponding labels. These predictions will be written into the predictions NetCDF (TODO along with the RMSE vs. original labels as separate/corresponding variables).
# -
# #### Define a function to extract a feature dataset for a specific level incorporating the feature variables (PS, T, U, and V).
#
# (Incorporating timestamps in case they're useful later when utilizing models that are relevant to timeseries, but in this first use case, i.e. with a simple linear regression model, this appears to be inconsequential/superfluous.)
def extract_features(ds_features,
level_index,
timestamps):
# Create a DataFrame from the feature variables PS, T, U, and V.
ps = pd.Series(ds_features.variables['PS'].values[:, :, :].flatten())
t = pd.Series(ds_features.variables['T'].values[:, level_index, :, :].flatten())
u = pd.Series(ds_features.variables['U'].values[:, level_index, :, :].flatten())
v = pd.Series(ds_features.variables['V'].values[:, level_index, :, :].flatten())
df_features = pd.DataFrame({'timestamp': timestamps,
'PS': ps,
'T': t,
'U': u,
'V': v})
df_features.set_index('timestamp', inplace=True)
return df_features
# #### Define a function to extract training and test datasets for a specific level and single target variable, using all feature variables (PS, T, U, and V).
#
# (Incorporating timestamps in case they're useful later when utilizing models that are relevant to timeseries, but in this first use case, i.e. with a simple linear regression model, this appears to be inconsequential/superfluous.)
def extract_train_test(ds_features,
ds_labels,
timestamps,
level_index,
label,
test_percentage):
# Create a DataFrame from the feature variables PS, T, U, and V.
df_features = extract_features(ds_features, level_index, timestamps)
# Create a labels DataFrame from the specified label variable.
target = pd.Series(ds_labels.variables[label].values[:, level_index, :, :].flatten())
df_target = pd.DataFrame({'timestamp': timestamps,
label: target})
df_target.set_index('timestamp', inplace=True)
# Pull training and test datasets from the features and target DataFrames.
x_train, x_test, y_train, y_test = train_test_split(df_features,
df_target,
test_size=test_percentage,
random_state=4)
return x_train, x_test, y_train, y_test
# #### Determine original shape of a single level
# Get the original shape of the variables (assumed to be the same for features and labels),
# and use this to establish the shape we'll use for reshaping our predictions, which will
# initally come out of the model as a flattened array.
shape = ds_labels.variables['PVTEND'].values.shape
shape_single_level = (shape[0], shape[2], shape[3])
# ### Train the model per level/label and predict values, writing the predictions to NetCDF.
# +
# Open the predictions NetCDF as an xarray DataSet.
ds_predictions = xr.open_dataset(path_predictions, lock=False)
# Loop over levels, using a level index
for lev_ix in range(ds_features.dims['lev']):
# Loop over each label, in order to predict each in isolation
# TODO is this necessary/optimal, or can we just as well do all three simultaneously? TEST THIS
# i.e. is univariate any more accurate than multivariate when using sklearn's LRM?
for forcing_label in ['PTTEND', 'PUTEND', 'PVTEND']:
# Extract training and testing datasets.
x_train, x_test, y_train, y_test = extract_train_test(ds_features,
ds_labels,
timestamps,
lev_ix,
forcing_label,
0.25)
# Train the model, then predict the relevant forcing tendencies using the trained model.
model.fit(x_train, y_train)
input_features = extract_features(ds_features, lev_ix, timestamps)
predictions = model.predict(input_features)
# Write the prediction values into the NetCDF at the relevant level.
values = np.reshape(predictions, shape_single_level)
ds_predictions.variables[forcing_label].values[:, lev_ix, :, :] = values
# Write the predictions dataset back as a NetCDF, overwriting the previous copy.
ds_predictions.to_netcdf('C:\\home\\cam_learn\\fv091x180L26_dry_HS.cam.PREDICTED.2000-12-27-00000_lowres.nc', mode='w')
# -
# ### Comparison with computed results
# At this point we can compare the predictions against the computed labels to determine how accurately the linear regression model matched the results of the climate model. Once we're satisfied that the errors are marginal then we can use the LRM to predict labels using new inputs for the same features, and then test to se how closely the predictions match to the known results for those inputs. The goals are to develop an appropriate AI model (not ncessarily a linear regression model as is used above) and to determine the optimal parameters of the AI model where it can replace (and hopefully improve upon the results) of the existing climate model.
| notebooks/model_learn_linear_sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Importing VTK data to be displayed as Mesh
#
# Additional requirements for this example: `vtk`
#
# We will also use K3D's bundled color maps.
# +
import k3d
import os
import vtk
from k3d.helpers import download
filename = download("https://raw.githubusercontent.com/naucoin/VTKData/master/Data/Quadratic/CylinderQuadratic.vtk")
model_matrix = (
10.0, 0.0, 0.0, 0.0,
0.0, 10.0, 0.0, 0.0,
0.0, 0.0, 10.0, 0.0,
0.0, 0.0, 0.0, 1.0
)
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
geometryFilter = vtk.vtkGeometryFilter()
geometryFilter.SetInputData(reader.GetOutput())
geometryFilter.Update()
plot = k3d.plot()
cylinder = k3d.vtk_poly_data(geometryFilter.GetOutput(), color_attribute=('pressure', -100, 7200),
color_map=k3d.basic_color_maps.Jet, model_matrix=model_matrix)
plot += cylinder
plot.display()
# -
cylinder.color_map = k3d.matplotlib_color_maps.Blues
cylinder.color_map = k3d.paraview_color_maps.Rainbow_Desaturated
| examples/vtk2.ipynb |
;; ---
;; jupyter:
;; jupytext:
;; text_representation:
;; extension: .clj
;; format_name: light
;; format_version: '1.5'
;; jupytext_version: 1.14.4
;; kernelspec:
;; display_name: Lein-Clojure
;; language: clojure
;; name: lein-clojure
;; ---
(ns demo
(:require [metaprob-viz.notebook :as notebook]
[metaprob-viz.viz :as viz]))
;; ### start visualization server.
;; The second argument is the port on which the server should listen.
(viz/start-server! 8081)
;; ### declare a visualization
;;
;; There must be a "renderer" available at the indicated path (relative to the root of the running server process).
(def vid (viz/add-viz! "public/vue/dist/" [[-2.0 -1.0 0 1.0 2.0]
[-2.0 -1.0 0 1.0 2.0]]))
(viz/viz-url vid)
;; ### open the visualization in this notebook
;; This will update in real time as it's updated. Initially there's nothing to visualize, so it'll be empty
(notebook/open-in-notebook vid)
;; ### Open visualization in new tab
;; In certain instances, you may want to open this visualization in a new tab. We can't do that programmatically, but we can provide a link to the user so they can do it themselves (this can be extended to include javascript to open the link in a popup rather than tab).
(notebook/open-in-tab vid)
;; ### Add traces to visualization
;; First we'll define a couple of example traces, then we'll use `put-trace!` to add them to the visualization.
;; +
;; define a couple of example traces
(def trace-0 {:slope 1.4
:intercept 0
:inlier_std 0.2
:outlier_std 1.2
:outliers [false false true false true]})
(def trace-1 {:slope 0.5
:intercept -1
:inlier_std 0.1
:outlier_std 2.1
:outliers [false false true false true]})
;; add those traces to the visualization
(def t0-id (viz/put-trace! vid trace-0))
(def t1-id (viz/put-trace! vid trace-1))
;; -
;; ### Update existing trace
;; `put-trace!` takes an optional third argument- a trace ID. With the `dist` renderer, reusing an existing trace ID will lead the renderer to update the existing trace in place, enabling animation. This example will replace trace-1's
;; plot with trace-0's data:
(viz/put-trace! vid trace-0 t1-id)
;; ...and this will return the plot to trace-1's data:
(viz/put-trace! vid trace-1 t1-id)
;; ### Add static trace with display-in-notebook
;; `display-in-notebook` adds a static version of a visualization to the notebook.
;;
;; This function requires that either `open-in-tab` or `open-in-notebook` has been previously called with this visualization ID.
;; +
(def vid2 (viz/add-viz! "public/vue/dist/" [[-2.0 -1.0 0 1.0 2.0]
[-2.0 -1.0 0 1.0 2.0]]))
(notebook/open-in-tab vid2)
;; +
(viz/put-trace! vid2 trace-0)
(viz/put-trace! vid2 trace-1)
(notebook/display-in-notebook vid2)
;; -
;; ### Save visualization outside the notebook
;; Note that the path to the file to save is from the root of the server clojure process, not the root of the notebook.
(viz/save-to-file vid "tutorial/example-viz.html")
;; ### Remove traces from visualization
;; Note that the static visualization (from `display-in-notebook`) is unaffected, but the dynamic visualzation (from `open-in-notebook`) is
(viz/delete-trace! vid t0-id)
(viz/delete-trace! vid t1-id)
;; ### Stop the visualization server
(viz/stop-server!)
| tutorial/.ipynb_checkpoints/Visualization-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1">Introduction</a></span></li><li><span><a href="#Part-1:-Importing-Libraries" data-toc-modified-id="Part-1:-Importing-Libraries-2">Part 1: Importing Libraries</a></span></li><li><span><a href="#Part-2:-Importing-Dataset" data-toc-modified-id="Part-2:-Importing-Dataset-3">Part 2: Importing Dataset</a></span><ul class="toc-item"><li><span><a href="#2.1.-Transformations-on-the-Dataset-(Scaling,-Cropping,-Flipping)" data-toc-modified-id="2.1.-Transformations-on-the-Dataset-(Scaling,-Cropping,-Flipping)-3.1">2.1. Transformations on the Dataset (Scaling, Cropping, Flipping)</a></span></li><li><span><a href="#2.2.-Label-Mapping" data-toc-modified-id="2.2.-Label-Mapping-3.2">2.2. Label Mapping</a></span></li></ul></li><li><span><a href="#Part-3:-Building-and-Training-the-Neural-Net" data-toc-modified-id="Part-3:-Building-and-Training-the-Neural-Net-4">Part 3: Building and Training the Neural Net</a></span><ul class="toc-item"><li><span><a href="#3.1.-Choosing-the-Model" data-toc-modified-id="3.1.-Choosing-the-Model-4.1">3.1. Choosing the Model</a></span></li><li><span><a href="#3.2.-Setting-up-the-Parameters" data-toc-modified-id="3.2.-Setting-up-the-Parameters-4.2">3.2. Setting up the Parameters</a></span></li><li><span><a href="#3.3.-Setting-Criterion-and-Optimizer" data-toc-modified-id="3.3.-Setting-Criterion-and-Optimizer-4.3">3.3. Setting Criterion and Optimizer</a></span></li><li><span><a href="#3.4.-Training-the-Model" data-toc-modified-id="3.4.-Training-the-Model-4.4">3.4. Training the Model</a></span></li></ul></li><li><span><a href="#Part-4:-Testing-our-Network" data-toc-modified-id="Part-4:-Testing-our-Network-5">Part 4: Testing our Network</a></span></li><li><span><a href="#Part-5:-Saving-the-Trained-Model" data-toc-modified-id="Part-5:-Saving-the-Trained-Model-6">Part 5: Saving the Trained Model</a></span></li><li><span><a href="#Part-6:-Loading-the-Trained-Model" data-toc-modified-id="Part-6:-Loading-the-Trained-Model-7">Part 6: Loading the Trained Model</a></span></li><li><span><a href="#Part-7:-Inference-for-Classification" data-toc-modified-id="Part-7:-Inference-for-Classification-8">Part 7: Inference for Classification</a></span><ul class="toc-item"><li><span><a href="#7.1.-Image-Preprocessing" data-toc-modified-id="7.1.-Image-Preprocessing-8.1">7.1. Image Preprocessing</a></span></li></ul></li><li><span><a href="#Part-8:-Class-Prediction" data-toc-modified-id="Part-8:-Class-Prediction-9">Part 8: Class Prediction</a></span></li><li><span><a href="#Part-9:-Sanity-Checking" data-toc-modified-id="Part-9:-Sanity-Checking-10">Part 9: Sanity Checking</a></span></li><li><span><a href="#End" data-toc-modified-id="End-11">End</a></span></li></ul></div>
# -
# # Introduction
#
# The last 50 years indicate that artificial intelligence (AI) algorithms will be incorporated into more and more everyday applications. For example, we might want to include an image classifier in a smart phone app. To do this, we'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
#
# In this project, I am going to train an image classifier to recognize _different species of flowers_. We can imagine using something like this in a mobile app that tells us the name of the flower our camera is pointing at. In practice, we would train this classifier, then export it for use in our application. I will be using the [102 Category Flower Dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) from the [Visual Geometry Group](http://www.robots.ox.ac.uk/~vgg/) of the [University of Oxford](http://www.ox.ac.uk/). You can see a few examples below.
#
# <img src="images/flowers.png" width=500px>
#
# The project is broken down into multiple steps:
#
# * Load and preprocess the image dataset.
# * Train the image classifier on our dataset.
# * Use the trained classifier to predict image content.
#
# When I complete this project, I will have an application that can be trained on _any_ set of labeled images. I will also turn it into a __command line application__.
# # Part 1: Importing Libraries
# The libraries that we will need
import json
import torch
import argparse
import numpy as np
from torch import nn, optim
from torchvision import datasets, transforms, models
import torch.nn.functional as F
from PIL import Image
# # Part 2: Importing Dataset
#
# Here I will use `torchvision` to load the data. You might want to refer to the [documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html). Note that the data __must__ be included alongside this notebook, otherwise you might run into problems. You can download the dataset (329 MB) by simply clicking [here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts: training, validation, and testing. For the training, I am going to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
#
# The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this we don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
#
# The pre-trained networks we will be using were trained on the `ImageNet` dataset where each color channel was normalized separately. For all three sets we'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the `ImageNet` images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
data_dir = "flowers"
train_dir = data_dir + "/train"
valid_dir = data_dir + "/valid"
test_dir = data_dir + "/test"
# ## 2.1. Transformations on the Dataset (Scaling, Cropping, Flipping)
# +
# Define our transforms for the training, validation, and testing sets
training_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
validation_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
training_data = datasets.ImageFolder(train_dir, transform=training_transforms)
validation_data = datasets.ImageFolder(valid_dir, transform=validation_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
# Using the image datasets and the transforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(training_data, batch_size=32, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=True)
# -
# ## 2.2. Label Mapping
#
# We will also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a `JSON` object which we can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give us a dictionary that is mapping the integer encoded categories to the actual names of the flowers.
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# # Part 3: Building and Training the Neural Net
#
# Now that the data is ready, it's time to build and train the classifier. As usual, we should use one of the __pretrained models__ from `torchvision.models` to get the image features. I am going to build and train a new feed-forward classifier using those features.
#
# In this part, I am going to:
# * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html).
# * Define a new, untrained feed-forward network as a classifier, using `ReLU` activations and dropout.
# * Train the classifier layers using backpropagation using the pre-trained network to get the features.
# * Track the loss and accuracy on the validation set to determine the best hyperparameters.
# ## 3.1. Choosing the Model
# +
# We have a couple of options: AlexNet, VGG-19, ResNet-18, Densenet-121.
# I chose the ones that have relatively fewer layers for the sake of computing time.
model = models.vgg19(pretrained=True)
model
# -
# ## 3.2. Setting up the Parameters
# +
# We have a couple of options: AlexNet, VGG-19, ResNet-152, Densenet-161,
from collections import OrderedDict
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
("fc1", nn.Linear(25088, 512)),
("relu", nn.ReLU()),
("dropout", nn.Dropout(0.5)),
("fc2", nn.Linear(512, 102)),
("output", nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# -
# ## 3.3. Setting Criterion and Optimizer
# +
# Do validation on the test set
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
def validation(model, validation_loader, criterion):
validation_loss = 0
accuracy = 0
# To cuda
model.to("cuda")
for ii, (images, labels) in enumerate(validation_loader):
images, labels = images.to("cuda"), labels.to("cuda")
output = model.forward(images)
validation_loss += criterion(output, labels).item()
prob = torch.exp(output)
# calculating accuracy by adding them up
equality = (labels.data == prob.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return validation_loss, accuracy
# -
# ## 3.4. Training the Model
# +
epochs = 5
steps = 3
print_every = 10
# using gpu
model.to("cuda")
print("Initializing the training process...\n")
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(train_loader):
steps += 1
inputs, labels = inputs.to("cuda"), labels.to("cuda")
optimizer.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
with torch.no_grad():
validation_loss, accuracy = validation(model, validation_loader, criterion)
validation_loss = validation_loss / len(validation_loader)
print("Epoch: {}/{} | ".format(e+1, epochs),
"Training Loss: {:.3f} | ".format(running_loss/print_every),
"Validation Lost: {:.3f} | ".format(validation_loss/len(validation_loader)),
"Accuracy: {:.3f}%".format(100*accuracy/len(validation_loader)))
running_loss = 0
print("Training process is complete!")
# -
# # Part 4: Testing our Network
#
# It is a good practice to test our trained network on test data, i.e., images the network has never seen either in training or validation. This will give us a good estimate for our model's performance on completely new images. We will run the test images through the network and measure the accuracy, the same way we measured validation.
# +
def test_accuracy(test_loader):
correct = 0
total = 0
model.to("cuda:0")
with torch.no_grad():
for img in test_loader:
images, labels = img
images, labels = images.to("cuda"), labels.to("cuda")
outputs = model(images)
nevermind, predictions = torch.max(outputs.data, 1)
correct += (predictions == labels).sum().item()
total += labels.size(0)
print("Accuracy of the network on the test dataset is: {:.3f}%".format(100*correct/total))
test_accuracy(test_loader)
# -
# 
# # Part 5: Saving the Trained Model
#
# Now that our network is trained, we will save the model so that we can load it later for making predictions. We will probably want to save other things such as the mapping of classes to indices which we get from one of the image datasets: `image_datasets['train'].class_to_idx`. We can attach this to the model as an attribute which will make inference easier later on.
#
# ```model.class_to_idx = image_datasets['train'].class_to_idx```
#
# Remember that we will want to completely rebuild the model later so that we can use it for inference. We need to make sure we include any information we need in the checkpoint. If we want to load the model and keep training, we will need to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. It is likely that we will want to use this trained model in the future, so it is best to save it now.
# +
# Save the checkpoint
checkpoint = {'class_to_idx': training_data.class_to_idx,
'state_dict': model.state_dict(),
'classifier': model.classifier,
'opt_state': optimizer.state_dict,
'num_epochs': epochs}
torch.save(checkpoint, 'checkpoint.pth')
# -
# # Part 6: Loading the Trained Model
#
# At this point it's good to write a function that can load a checkpoint and rebuild our model. That way we can come back to this project and keep working on it without having to retrain the network.
# +
# load the checkpoint and rebuild the model
def load_checkpoint(file_dir):
checkpoint = torch.load(file_dir)
model.load_state_dict(checkpoint['state_dict'])
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
return model
# +
model = load_checkpoint('checkpoint.pth')
print(model)
# -
# # Part 7: Inference for Classification
#
# In this part, I am going to write a function to use a trained network for inference. That is, I will pass an image into the network and predict the class of the flower in the given image. I will write a function called `predict` that takes an image and a model, then returns the top K most likely classes along with their probabilities. It should look like
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
#
# First I need to handle processing the input image such that it can be used in our network.
# ## 7.1. Image Preprocessing
#
# I am going to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
#
# First thing first, I will resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then I will crop out the center `224x224` portion of the image.
#
# Color channels of images are typically encoded as integers 0-255, but the model expects floats 0-1. So, I will need to convert the values. The easiest way is to use a NumPy array, which we can get from a PIL image using `np_image = np.array(pil_image)`.
#
# As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. I will subtract the means from each color channel, then divide by the standard deviation.
#
# And finally, `PyTorch` expects the color channel to be the first dimension but it's the third dimension in the PIL image and also in the Numpy array. I will reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
image_PIL = Image.open(image)
# Define transforms
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
# Transform the image (turn it into a tensor)
image_transformed = transform(image_PIL)
# Convert it to a NumPy array
array_image_transformed = np.array(image_transformed)
return array_image_transformed
# To check our work, the function below converts a `PyTorch` tensor and displays it in the notebook. If our `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
# +
import matplotlib.pyplot as plt
def imshow(image, ax=None, title=None):
"""
Imshow for Tensor.
"""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = np.array(image).transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
# -
image_dir = data_dir + "/test" + "/1" + "/image_06743.jpg"
image_test = process_image(image_dir)
imshow(image_test, ax=None, title=None)
# # Part 8: Class Prediction
#
# Once we can get the images in the correct format, it's time to write a function for making predictions with our model. A common practice is to predict the top 5 or so (usually called top-K) most probable classes. I will calculate the class probabilities then find the K largest values.
#
# To get the top K largest values in a tensor, I will use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. I will later convert these indices to the actual class labels using `class_to_idx` which we added to the model earlier.
#
# This method should take a path to an image and a model checkpoint, then return the probabilities and classes.
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
# +
def predict(image_path, model, topk=5):
"""
Predict the class (or classes) of an image using a trained deep learning model.
"""
img_torch = process_image(image_path)
# converting from np array to pytorch tensor again
# because nparray object has no unsqueeze method
img_torch = torch.from_numpy(img_torch).type(torch.FloatTensor)
# adjusting the dimensions
img_torch = img_torch.unsqueeze_(0)
img_torch = img_torch.float()
with torch.no_grad():
output = model.forward(img_torch.cuda())
probability = F.softmax(output.data, dim=1)
return probability.topk(topk)
# Setting model to evaluation mode and turning off gradients
loaded_model.eval()
with torch.no_grad():
# Running image through network
output = loaded_model.forward(img_add_dim)
# Calculating probabilities
probs = torch.exp(output)
probs_top = probs.topk(topk)[0]
index_top = probs.topk(topk)[1]
# Converting probabilities and outputs into lists
probs_top_list = np.array(probs_top)[0]
index_top_list = np.array(index_top[0])
# Getting indixes and classes
class_to_idx = loaded_model.class_to_idx
indx_to_class = {x: y for y, x in class_to_idx.items()}
# Converting index list to class list
classes_top_list = []
for index in index_top_list:
classes_top_list += [indx_to_class[index]]
return probs_top_list, classes_top_list
test_image_dir = data_dir + "/test" + "/38" + "/image_05833.jpg"
probabilities, classes = predict(test_image_dir, model)
print(probabilities)
print("-"*72)
print(classes)
# -
def predict(image_path, model, topk=5):
"""
Predict the class (or classes) of an image using a trained deep learning model.
"""
# Having an error with the GPUs atm, so switching to cpu for this
model_loaded = load_checkpoint(model).cpu()
image = process_image(image_path)
# from tensor to nparray
image_to_tensor = torch.from_numpy(image).type(torch.FloatTensor)
image_dim_u = image_to_tensor.unsqueeze_(0)
model_loaded.eval()
with torch.no_grad():
output = model_loaded.forward(image_dim_u)
probabilities = torch.exp(output)
probabilities_topk = probabilities.topk(topk)[0]
indexes_topk = probabilities.topk(topk)[1]
probabilities_topk_list = np.array(probabilities_topk[0])
indexes_topk_list = np.array(indexes_topk[0])
class_to_idx = model_loaded.class_to_idx
# we are accessing the names of the classes using their key values
idx_to_class = {y: x for x, y in class_to_idx.items()}
# indexes_topk_list -> classes_topk_list
classes_topk_list = []
for index in indexes_topk_list:
classes_topk_list += [idx_to_class[index]]
return probabilities_topk_list, classes_topk_list
# # Part 9: Sanity Checking
#
# Now that we can use a trained model for predictions, let's see whether our model makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. I will use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
#
# <img src='images/inference_example.png' width=300px>
#
# We can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
# +
# Display an image along with the top 5 classes
model_dir = 'checkpoint.pth'
test_image1_dir = data_dir + "/test" + "/38" + "/image_05833.jpg"
test_image2_dir = data_dir + "/test" + "/1" + "/image_06754.jpg"
# Getting the probabilities and classes once more
probabilities1, classes1 = predict(test_image1_dir, model_dir, topk=5)
probabilities2, classes2 = predict(test_image2_dir, model_dir, topk=5)
names1 = []
for i in classes1:
names1 += [cat_to_name[i]]
names2 = []
for i in classes2:
names2 += [cat_to_name[i]]
image1 = Image.open(test_image1_dir)
image2 = Image.open(test_image2_dir)
fig, ax = plt.subplots(2,2, figsize=(14,14))
ax[0,0].imshow(image1)
ax[0,0].set_title(cat_to_name[str(38)]);
ax[0,1].imshow(image2)
ax[0,1].set_title(cat_to_name[str(1)]);
y_names1 = np.arange(len(names1))
ax[1,0].barh(y_names1, probabilities1, color='darkblue')
ax[1,0].set_yticks(y_names1)
ax[1,0].set_yticklabels(names1)
ax[1,0].invert_yaxis()
y_names2 = np.arange(len(names2))
ax[1,1].barh(y_names2, probabilities2, color='darkblue')
ax[1,1].set_yticks(y_names2)
ax[1,1].set_yticklabels(names2)
ax[1,1].invert_yaxis()
# -
# # End
#
# Thank you for your interest in this notebook. I completed this project on July 10, 2019 as a part of [Udacity](https://www.udacity.com/)'s [Intro to Machine Learning Nanodegree](https://www.udacity.com/course/machine-learning-engineer-nanodegree--nd009t).
| flower-image-classification/.ipynb_checkpoints/image_classification_project-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Self Test for Lesson 1.6.
# # Introduction to the Common Information Model
#
# Lesson 1.6. introduced the CIM and how its classes and attributes can be used to decribe a power system model.
#
# __Learning Objectives:__
#
# After completing Lesson 1.6, the user should be able to
#
# * Describe what are GridAPPS-D Topics
# * Explain the difference between `/queue/` and `/topic/` channels
# * Import and use the GridAPPSD-Python library of topics
# * Implement shortcut functions for creating GridAPPS-D Topics
# Complete the quiz below either in your browser by clicking the link below or answering it in the notebook embed below!
#
# __[Open Quiz in a new browser window](https://docs.google.com/forms/d/e/1FAIpQLSeygGlwvVClL_F-pVcVvZcFYQdC0P3b8HBbJZTN4zJB173Lvw/viewform)__
#
#
# .
# + language="html"
# <iframe src="https://docs.google.com/forms/d/e/1FAIpQLSeygGlwvVClL_F-pVcVvZcFYQdC0P3b8HBbJZTN4zJB173Lvw/viewform?embedded=true" width="640" height="1517" frameborder="0" marginheight="0" marginwidth="0">Loading…</iframe>
# -
| Lesson 1.6T. Self Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# + [markdown] Collapsed="false"
# # Fleet Maintenance: Build and Import Trained Models into SAS Model Manager
#
# This notebook provides an example of how to build and train a Python model and then import the model into SAS Model Manager using the fleet maintenance data set. Lines of code that must be modified by the user, such as directory paths are noted with the comment "_Changes required by user._".
#
# _**Note:** If you download only this notebook and not the rest of the repository, you must also download the fleet maintenance CSV file from the data folder in the examples directory. These files are used when executing this notebook example._
#
# Here are the steps shown in this notebook:
#
# 1. Import and review data and preprocess for model training.
# 2. Build, train, and access a decision tree, random forest, and gradient boosting model.
# 3. Serialize the models into separate pickle files.
# 4. Write the metadata JSON files needed for importing into SAS Model Manager as well as optional files for fit statistics and ROC/Lift charts.
# 4. Write a score code Python file for model scoring.
# 5. Zip the pickle, JSON, and score code files into an archive file.
# 6. Import the ZIP archive file to SAS Model Manager via the Session object and relevant function call.
# + [markdown] Collapsed="false"
# ### Python Package Imports
# + Collapsed="false"
# Dataframes for data manipulations
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
# Mathematical calculations and array handling
import numpy as np
# Data partitioning for TRAIN and TEST data sets
from sklearn.model_selection import train_test_split
# Decision tree, random forest, and gradient boosting models
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
# Model assessments
from sklearn.metrics import classification_report, confusion_matrix
# Embedded plotting
import matplotlib.pyplot as plt
plt.rc("font", size=14)
# Pathing support
from pathlib import Path
# sasctl interface for importing models
import sasctl.pzmm as pzmm
from sasctl import Session
from sasctl.services import model_repository as modelRepo
# + [markdown] Collapsed="false"
# ### Import and Review Data Set
# + Collapsed="false"
fleetData = pd.read_csv('data/fleet_maintenance.csv',sep= ',')
fleetData.shape
# + Collapsed="false"
fleetData.head()
# + Collapsed="false"
fleetData.hist(figsize=(15,75), layout=(28, 5));
# + Collapsed="false"
fleetData.columns
# + [markdown] Collapsed="false"
# ### Preprocess Data
# + Collapsed="false"
predictorColumns = ['Speed_sensor', 'Vibration', 'Engine_Load', 'Coolant_Temp', 'Intake_Pressure', 'Engine_RPM', 'Speed_OBD',
'Intake_Air', 'Flow_Rate', 'Throttle_Pos', 'Voltage', 'Ambient', 'Accel', 'Engine_Oil_Temp', 'Speed_GPS',
'GPS_Longitude', 'GPS_Latitude', 'GPS_Bearing', 'GPS_Altitude', 'Turbo_Boost', 'Trip_Distance', 'Litres_Per_km',
'Accel_Ssor_Total', 'CO2', 'Trip_Time', 'CO_emission', 'HC_emission', 'PM_emission', 'NOx_emission', 'CO2_emission',
'Fuel_level', 'Oil_life', 'Vibration_alert', 'VibrationAlert_Total', 'Vibration_Recent', 'Turbo_alert',
'Emission_alert', 'Fog_control', 'Engine_control']
targetColumn = 'Maintenance_flag'
x = fleetData[predictorColumns]
y = fleetData[targetColumn]
xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size=0.3, random_state=42)
# For missing values, impute the data set's mean value
xTest.fillna(xTest.mean(), inplace=True)
xTrain.fillna(xTrain.mean(), inplace=True)
# + [markdown] Collapsed="false"
# ### Create, Train, and Assess Model
# + Collapsed="false"
treeModel = DecisionTreeClassifier(random_state=42, min_samples_leaf=25)
treeModel = treeModel.fit(xTrain, yTrain)
forestModel = RandomForestClassifier(random_state=42)
forestModel = forestModel.fit(xTrain, yTrain)
gradientModel = GradientBoostingClassifier(random_state=42)
gradientModel = gradientModel.fit(xTrain, yTrain)
# + Collapsed="false"
def sortFeatureImportance(model, xData):
features = {}
for importance, name in sorted(zip(model.feature_importances_, xData.columns), reverse=True):
features[name] = str(np.round(importance*100, 2)) + '%'
return features
importances = pd.DataFrame.from_dict(sortFeatureImportance(treeModel, xTrain), orient='index').rename(columns={0: 'DecisionTree'})
importances['RandomForest'] = pd.DataFrame.from_dict(sortFeatureImportance(forestModel, xTrain), orient='index')
importances['GradientBoosting'] = pd.DataFrame.from_dict(sortFeatureImportance(gradientModel, xTrain), orient='index')
importances
# + Collapsed="false"
yTreePredict = treeModel.predict(xTest)
yTreeProba = treeModel.predict_proba(xTest)
print(confusion_matrix(yTest, yTreePredict))
print(classification_report(yTest, yTreePredict))
print('Decision Tree Model Accuracy = ' + str(np.round(treeModel.score(xTest, yTest)*100,2)) + '%')
# + Collapsed="false"
yForestPredict = forestModel.predict(xTest)
yForestProba = forestModel.predict_proba(xTest)
print(confusion_matrix(yTest, yForestPredict))
print(classification_report(yTest, yForestPredict))
print('Random Forest Model Accuracy = ' + str(np.round(forestModel.score(xTest, yTest)*100,2)) + '%')
# -
yGradientPredict = gradientModel.predict(xTest)
yGradientProba = gradientModel.predict_proba(xTest)
print(confusion_matrix(yTest, yGradientPredict))
print(classification_report(yTest, yGradientPredict))
print('Gradient Boosting Model Accuracy = ' + str(np.round(gradientModel.score(xTest, yTest)*100,2)) + '%')
# + [markdown] Collapsed="false"
# ### Register Model in SAS Model Manager with pzmm
# + Collapsed="false"
modelPrefix = ['DecisionTreeClassifier', 'RandomForest', 'GradientBoosting']
zipFolder = [Path.cwd() / 'data/FleetMaintenanceModels/DecisionTreeClassifier/',
Path.cwd() / 'data/FleetMaintenanceModels/RandomForest/',
Path.cwd() / 'data/FleetMaintenanceModels/GradientBoosting']
model = [treeModel, forestModel, gradientModel]
for (m, prefix, path) in zip(model, modelPrefix, zipFolder):
pzmm.PickleModel.pickleTrainedModel(_, m, prefix, path)
# + Collapsed="false"
def writeJSONFiles(data, predict, target, zipFolder, yTrain, modelPrefix):
J = pzmm.JSONFiles()
# Write input variable mapping to a json file
J.writeVarJSON(data[predict], isInput=True, jPath=zipFolder)
# Set output variables and assign an event threshold, then write output variable mapping
outputVar = pd.DataFrame(columns=['EM_EVENTPROBABILITY', 'EM_CLASSIFICATION'])
outputVar['EM_CLASSIFICATION'] = yTrain.astype('category').cat.categories.astype('str')
outputVar['EM_EVENTPROBABILITY'] = 0.5 # Event threshold
J.writeVarJSON(outputVar, isInput=False, jPath=zipFolder)
# Write model properties to a json file
J.writeModelPropertiesJSON(modelName=modelPrefix,
modelDesc='',
targetVariable=target,
modelType='',
modelPredictors=predict,
targetEvent=1,
numTargetCategories=1,
eventProbVar='EM_EVENTPROBABILITY',
jPath=zipFolder,
modeler='sasdemo')
# Write model metadata to a json file
J.writeFileMetadataJSON(modelPrefix, jPath=zipFolder)
for (prefix, path) in zip(modelPrefix, zipFolder):
writeJSONFiles(fleetData, predictorColumns, targetColumn, path, yTrain, prefix)
# + Collapsed="false"
import getpass
def writeModelStats(xTrain, yTrain, testProba, yTest, model, target, zipFolder, conn):
J = pzmm.JSONFiles()
# Calculate train predictions
trainProba = model.predict_proba(xTrain)
# Assign data to lists of actual and predicted values
trainData = pd.concat([yTrain.reset_index(drop=True), pd.Series(data=trainProba[:,1])], axis=1)
testData = pd.concat([yTest.reset_index(drop=True), pd.Series(data=testProba[:,1])], axis=1)
# Calculate the model statistics and write to json files
J.calculateFitStat(trainData=trainData, testData=testData, jPath=zipFolder)
J.generateROCLiftStat(target, 1, conn, trainData=trainData, testData=testData, jPath=zipFolder)
username = get<PASSWORD>.getpass()
password = get<PASSWORD>()
host = 'myserver.com'
sess = Session(host, username, password, protocol='http')
conn = sess.as_swat()
testProba = [yTreeProba, yForestProba, yGradientProba]
for (m, proba, path) in zip(model, testProba, zipFolder):
writeModelStats(xTrain, yTrain, proba, yTest, m, targetColumn, path, conn)
# + Collapsed="false"
I = pzmm.ImportModel()
for (prefix, path) in zip(modelPrefix, zipFolder):
with sess:
I.pzmmImportModel(path, prefix, 'Fleet Management Report', x, y, '{}.predict({})')
# -
| examples/FleetManagement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit ('3.8.10')
# language: python
# name: python3
# ---
# # Training agents
# %load_ext autoreload
# %autoreload 2
# +
from game.episode import run_episode
from connect_four_env.connect_four_env import ConnectFourGymEnv
from agents.deep_v_agent import DeepVAgent
from connect_four_env.rendering import render_history
import constants
import pandas as pd
env = ConnectFourGymEnv()
# -
# ## Train against self
#
# In this section we train an agent as player 1 and player 2 alternatively.
#
# In the `model/test_agent` folder that will be created when running the next cell, you will find a .png image showing the progress of the agent.
# +
from game.training import train_against_self
agent = DeepVAgent(10, constants.PLAYER1, .1, env.board.shape)
win_rate, losses = train_against_self(
path_to_save="models/test_agent",
agent=agent,
n_episodes=1000,
discount=1.0,
num_opponents=3,
n_test_runs=30,
interval_test=200,
num_workers=1
)
# -
# Check a match of an agent against itself, and show the values it computes for each state it can reach.
from copy import deepcopy
values, winner = run_episode(agent, deepcopy(agent), env, keep_history=True, for_evaluation=False, get_values=True)
render_history(env.history, agent_values=values)
| Train an agent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy.spatial.transform import Rotation as R # Magnitude (and '*_matrix', use '*_dcm' instead) require a newer version of SciPy than SPIN's
import numpy as np
import torch
# +
# Let's test this by loading up a real example from 3dpw
import pickle as pkl
img_path = 'examples/image_00502_crop.jpg'
#img_path = 'examples/image_00980.jpg'
pickle_path = 'data/3dpw/sequenceFiles/validation/courtyard_basketball_01.pkl'
#pickle_path = 'data/3dpw/sequenceFiles/validation/outdoors_parcours_01.pkl'
frame = 502
# -
# using IPython's Magic %run let's us cleanly run the Demo script and get the variables defined
# therein into this notebook. use $ to insert variables from the notebook into any %magic command
# %run demo.py --checkpoint=data/model_checkpoint.pt --img=$img_path
# +
# open the sequence file, fetch the body_pose of the corresponding frame from it, remove global orientation
# and reshape it from (69,1) to (23,3) to input into a rotation object
seq = pkl.load(open(pickle_path,'rb'),encoding='latin-1')
gt_pose_axis_angle = seq['poses'][0][frame][3:]
gt_pose = R.from_rotvec(np.reshape(gt_pose_axis_angle, (23,-1)))
# +
# create a rotation object from the predicted pose output of demo.py
pred_pose = R.from_dcm(pred_output.body_pose.squeeze().cpu())
# +
# show the difference between the predicted and the ground truth pose
#e = R.as_euler(gt_pose, 'xyz', degrees=True)
#e
# +
# Now, let's check through examples that this behavior makes sense
#Left hip [flexion(front kick), external rotation, abduction]
#Right hip [extension(back kick), internal rotation, adduction]
#Spine [flexion (crunch), rotate to the left, touch right toe]
#Left knee [flexion, external rotation, abduction]
#Right knee [extension, internal rotation, adduction]
#Torso [flexion (crunch), rotate to the left, touch right shin]
#Left ankle [flexion, external rotation, abduction]
#Right ankle [flexion, internal rotation, adduction]
#Chest [flexion (crunch), rotate to the left, touch right knee]
#Left toes [flexion, fibular deviation, pronation]
#Right toes [flexion, tibial deviation, supination]
#Neck [flexion, rotate to the left, touch right shoulder]
#Left scapula [internal rotation, rotate backwards, lift arm]
#Right scapula [internal rotation, rotate forward, lower arm]
#Skull [flexion, look to the left, touch right shoulder]
#Left shoulder [internal rotation, rotate backwards, lift arm]
#Right shoulder [internal rotation, rotate forward, lower arm]
#Left elbow [internal rotation, hyperextension, abduction (unnatural)]
#Right elbow [internal rotation, flexion, adduction (unnatural)]
#Left wrist [interal rotation, ulnar deviation, extension]
#Right wrist [internal rotation, radial deviation, flexion]
#Left knuckles [internal rotation, ulnar deviation (unnatural), hyperextension]
#Right knuckles [internal rotation, radial deviation, flexion]
# -
d = {
'Left hip':{'Name': 'Left hip', 'x': 'Flexion', 'y': 'External rotation', 'z': 'Abduction'},
'Right hip':{'Name': 'Right hip', 'x': 'Extension', 'y': 'Internal rotation', 'z': 'Adduction'},
'Spine':{'Name': 'Spine', 'x': 'Flexion', 'y': 'Rotate to the left', 'z': 'Touch right ankle'},
'Left knee':{'Name': 'Left knee', 'x': 'Flexion', 'y': 'External rotation', 'z': 'Abduction'},
'Right knee':{'Name': 'Right knee', 'x': 'Extension', 'y': 'External rotation', 'z': 'Adduction'},
'Torso':{'Name': 'Torso', 'x': 'Flexion', 'y': 'Rotate to the left', 'z': 'Touch right shin'},
'Left ankle':{'Name': 'Left ankle', 'x': 'Flexion', 'y': 'External rotation', 'z': 'Abduction'},
'Right ankle':{'Name': 'Right ankle', 'x': 'Flexion', 'y': 'Internal rotation', 'z': 'Adduction'},
'Chest':{'Name': 'Chest', 'x': 'Flexion', 'y': 'Rotate to the left', 'z': 'Touch right knee'},
'Left toes':{'Name': 'Left toes', 'x': 'Flexion', 'y': 'Fibular deviation', 'z': 'Pronation'},
'Right toes':{'Name': 'Right toes', 'x': 'Flexion', 'y': 'Tibial deviation', 'z': 'Supination'},
'Neck':{'Name': 'Neck', 'x': 'Flexion', 'y': 'Rotate to the left', 'z': 'Touch right shoulder'},
'Left scapula':{'Name': 'Left scapula', 'x': 'Internal rotation', 'y': 'Rotate backwards', 'z': 'Clockwise rotation'},
'Right scapula':{'Name': 'Right scapula', 'x': 'Internal rotation', 'y': 'Rotate forward', 'z': 'Clockwise rotation'},
'Skull':{'Name': 'Skull', 'x': 'Flexion', 'y': 'Look to the left', 'z': 'Touch right shoulder'},
'Left shoulder':{'Name': 'Left shoulder', 'x': 'Internal rotation', 'y': 'Rotate backwards', 'z': 'Raise arm'},
'Right shoulder':{'Name': 'Right shoulder', 'x': 'Internal rotation', 'y': 'Rotate forward', 'z': 'Lower arm'},
'Left elbow':{'Name': 'Left elbow', 'x': 'Internal rotation', 'y': 'Extension', 'z': 'Abduction'},
'Right elbow':{'Name': 'Right elbow', 'x': 'Internal rotation', 'y': 'Flexion', 'z': 'Adduction'},
'Left wrist':{'Name': 'Left wrist', 'x': 'Internal rotation', 'y': 'Ulnar deviation', 'z': 'Extension'},
'Right wrist':{'Name': 'Right wrist', 'x': 'Internal rotation', 'y': 'Radial deviation', 'z': 'Flexion'},
'Left knuckles':{'Name': 'Left knuckles', 'x': 'Internal rotation', 'y': 'Ulnar deviation', 'z': 'Extension'},
'Right knuckles':{'Name': 'Right knuckles', 'x': 'Internal rotation', 'y': 'Radial deviation', 'z': 'Flexion'},
}
mpjae = torch.load('mpjae_per_part.pt')
mpjae_mean = mpjae.mean(dim=0)
mpjae_mean_no_root = mpjae_mean[1:]
mpjae_mean_no_root.shape
# +
e = mpjae_mean_no_root
i = 0
for key in d:
print('{:16}'.format(d[key]['Name']),
'{:>20}'.format(d[key]['x']),'{:> 7.2f}'.format(e[i][0]),
'{:>20}'.format(d[key]['y']),'{:> 7.2f}'.format(e[i][1]),
'{:>20}'.format(d[key]['z']),'{:> 7.2f}'.format(e[i][2]))
i+=1
# +
e = np.degrees(R.as_rotvec(pred_pose))
i = 0
for key in d:
print('{:16}'.format(d[key]['Name']),
'{:>20}'.format(d[key]['x']),'{:> 7.2f}'.format(e[i][0]),
'{:>20}'.format(d[key]['y']),'{:> 7.2f}'.format(e[i][1]),
'{:>20}'.format(d[key]['z']),'{:> 7.2f}'.format(e[i][2]))
i+=1
# +
r = R.as_rotvec(gt_pose)
q = R.as_rotvec(pred_pose)
e = np.degrees(r-q)
i = 0
for key in d:
print('{:16}'.format(d[key]['Name']),
'{:>20}'.format(d[key]['x']),'{:> 7.2f}'.format(e[i][0]),
'{:>20}'.format(d[key]['y']),'{:> 7.2f}'.format(e[i][1]),
'{:>20}'.format(d[key]['z']),'{:> 7.2f}'.format(e[i][2]))
i+=1
# -
abs(e).mean()
# +
# Let's say we now want to make our own SMPL pose with an elbow flexed at 90 degrees:
gt_pose_axis_angle = np.zeros(69)
gt_pose_axis_angle[58-3] = np.pi/2
gt_pose = R.from_rotvec(np.reshape(gt_pose_axis_angle, (23,-1)))
# +
e = np.degrees(gt_pose.as_rotvec())
i = 0
for key in d:
print('{:16}'.format(d[key]['Name']),
'{:>20}'.format(d[key]['x']),'{:> 7.2f}'.format(e[i][0]),
'{:>20}'.format(d[key]['y']),'{:> 7.2f}'.format(e[i][1]),
'{:>20}'.format(d[key]['z']),'{:> 7.2f}'.format(e[i][2]))
i+=1
# -
| notebooks/Rotation vectors proof of concept.ipynb |
/ ---
/ jupyter:
/ jupytext:
/ text_representation:
/ extension: .q
/ format_name: light
/ format_version: '1.5'
/ jupytext_version: 1.14.4
/ kernelspec:
/ display_name: SQL
/ language: sql
/ name: SQL
/ ---
/ + [markdown] azdata_cell_guid="7870ea3f-b017-46d9-bec6-15369f4ade69"
/ # Memory Grant Feedback
/ Memory grant feedback (MGF)adjusts memory grant sizes to learn and improve memory usage. This feature can adjust memory grant sizes for both batch and row mode operators, and eliminate the effects of memory grant misestimations (spills to TempDB) and overestimations (affects concurrency).
/
/ Memory grant feedback is a feature under the [**Intelligent Query Processing**](https://aka.ms/iqp) suite of features.
/
/ This example will show you how upgrading to **Database Compatibility Level 150** could improve performance of queries executing in row mode, that are affected by memory grant misestimations. Upgrading to **Database Compatibility Level 140** allows MGF for queries executing in batch mode, and **Database Compatibility Level 150** allows MGF on queries executing in row mode.
/
/ More information about this feature is available [here](https://docs.microsoft.com/sql/relational-databases/performance/intelligent-query-processing?view=sql-server-ver15#batch-mode-memory-grant-feedback).
/ + [markdown] azdata_cell_guid="975f5a9f-300f-450b-8981-cebb9ed6c719"
/ ## Step 1: Setup WideWorldImportersDW database
/
/ You could choose to use a container to evaluate this feature. Create an instance of SQL Server 2019 using a Docker image and restore the WideWorldImportersDW database backup
/
/ You will need the **WideWorldImportersDW** database for this exercise. If you don't have this sample database, then you download the sample database [here](https://github.com/Microsoft/sql-server-samples/releases/download/wide-world-importers-v1.0/WideWorldImportersDW-Full.bak "WideWorldImportersDW-Full download").
/
/ Restore the copied WideWorldImportersDW database backup into the container and restore the backup.
/
/ ##### Docker Commands
/ ```
/ docker pull mcr.microsoft.com/mssql/server:2019-latest
/
/ docker run -e "ACCEPT_EULA=Y" -e "SA_PASSWORD=`<<PASSWORD>`>" -p 1445:1433 --name sql2019demo -d mcr.microsoft.com/mssql/server:2019-latest
/
/ docker cp ".\Downloads\WideWorldImportersDW-Full.bak" sql2019demo:/var/opt/mssql/data
/ ```
/
/ **Note**: *For Linux installations the default path to use is /var/opt/mssql*
/
/ + azdata_cell_guid="b48b5a47-55d7-4c3c-ba51-2efa8d801029"
USE [master]
GO
IF EXISTS (SELECT [database_id] FROM sys.databases WHERE [name] = 'WideWorldImportersDW')
ALTER DATABASE [WideWorldImportersDW] SET SINGLE_USER WITH ROLLBACK IMMEDIATE
GO
DECLARE @datafilepath VARCHAR(8000) = CAST(SERVERPROPERTY('InstanceDefaultDataPath') AS VARCHAR(4000)) + 'WideWorldImportersDW.mdf'
DECLARE @logfilepath VARCHAR(8000) = CAST(SERVERPROPERTY('InstanceDefaultLogPath') AS VARCHAR(4000)) + 'WideWorldImportersDW.ldf'
DECLARE @inmemfilepath VARCHAR(8000) = CAST(SERVERPROPERTY('InstanceDefaultDataPath') AS VARCHAR(4000)) + 'WideWorldImportersDW_InMemory_Data_1'
DECLARE @secondaryfilepath VARCHAR(8000) = CAST(SERVERPROPERTY('InstanceDefaultDataPath') AS VARCHAR(4000))+ 'WideWorldImportersDW_2.ndf'
-- Change @backupfile file path as needed
DECLARE @backupfile VARCHAR(8000) = 'E:\SampleDBs\WideWorldImportersDW-Full.bak'
RESTORE DATABASE WideWorldImportersDW
FROM DISK = @backupfile
WITH MOVE 'WWI_Primary' TO @datafilepath,
MOVE 'WWI_UserData' TO @secondaryfilepath,
MOVE 'WWIDW_InMemory_Data_1' TO @inmemfilepath,
MOVE 'WWI_Log' TO @logfilepath, NOUNLOAD, REPLACE, STATS = 10
GO
USE [master]
GO
ALTER DATABASE [WideWorldImportersDW] MODIFY FILE ( NAME = N'WWI_Log', SIZE = 4GB )
GO
/ + [markdown] azdata_cell_guid="4e07d47f-3833-40dd-8203-1d43af8bacd8"
/ ## Step 2: Enlarge the WideWorldImportersDW database
/ + azdata_cell_guid="a7421321-3652-49c5-b83a-e25e69c48aed"
USE WideWorldImportersDW;
GO
/*
Assumes a fresh restore of WideWorldImportersDW
*/
IF OBJECT_ID('Fact.OrderHistory') IS NULL
BEGIN
SELECT [Order Key], [City Key], [Customer Key], [Stock Item Key], [Order Date Key], [Picked Date Key], [Salesperson Key], [Picker Key], [WWI Order ID], [WWI Backorder ID], Description, Package, Quantity, [Unit Price], [Tax Rate], [Total Excluding Tax], [Tax Amount], [Total Including Tax], [Lineage Key]
INTO Fact.OrderHistory
FROM Fact.[Order];
END;
ALTER TABLE Fact.OrderHistory
ADD CONSTRAINT PK_Fact_OrderHistory PRIMARY KEY NONCLUSTERED ([Order Key] ASC, [Order Date Key] ASC) WITH (DATA_COMPRESSION = PAGE);
GO
CREATE INDEX IX_Stock_Item_Key
ON Fact.OrderHistory ([Stock Item Key])
INCLUDE(Quantity)
WITH (DATA_COMPRESSION = PAGE);
GO
CREATE INDEX IX_OrderHistory_Quantity
ON Fact.OrderHistory ([Quantity])
INCLUDE([Order Key])
WITH (DATA_COMPRESSION = PAGE);
GO
CREATE INDEX IX_OrderHistory_CustomerKey
ON Fact.OrderHistory([Customer Key])
INCLUDE ([Total Including Tax])
WITH (DATA_COMPRESSION = PAGE);
GO
IF (SELECT COUNT(*) FROM [Fact].[OrderHistory]) < 3702592
BEGIN
DECLARE @i smallint
SET @i = 0
WHILE @i < 4
BEGIN
INSERT INTO [Fact].[OrderHistory] ([City Key], [Customer Key], [Stock Item Key], [Order Date Key], [Picked Date Key], [Salesperson Key], [Picker Key], [WWI Order ID], [WWI Backorder ID], Description, Package, Quantity, [Unit Price], [Tax Rate], [Total Excluding Tax], [Tax Amount], [Total Including Tax], [Lineage Key])
SELECT [City Key], [Customer Key], [Stock Item Key], [Order Date Key], [Picked Date Key], [Salesperson Key], [Picker Key], [WWI Order ID], [WWI Backorder ID], Description, Package, Quantity, [Unit Price], [Tax Rate], [Total Excluding Tax], [Tax Amount], [Total Including Tax], [Lineage Key]
FROM [Fact].[OrderHistory];
SET @i = @i +1
END;
END
GO
IF OBJECT_ID('Fact.OrderHistoryExtended') IS NULL
BEGIN
SELECT [Order Key], [City Key], [Customer Key], [Stock Item Key], [Order Date Key], [Picked Date Key], [Salesperson Key], [Picker Key], [WWI Order ID], [WWI Backorder ID], Description, Package, Quantity, [Unit Price], [Tax Rate], [Total Excluding Tax], [Tax Amount], [Total Including Tax], [Lineage Key]
INTO Fact.OrderHistoryExtended
FROM Fact.[OrderHistory];
END;
ALTER TABLE Fact.OrderHistoryExtended
ADD CONSTRAINT PK_Fact_OrderHistoryExtended PRIMARY KEY NONCLUSTERED ([Order Key] ASC, [Order Date Key] ASC)
WITH (DATA_COMPRESSION = PAGE);
GO
CREATE INDEX IX_Stock_Item_Key
ON Fact.OrderHistoryExtended ([Stock Item Key])
INCLUDE (Quantity);
GO
IF (SELECT COUNT(*) FROM [Fact].[OrderHistory]) < 29620736
BEGIN
DECLARE @i smallint
SET @i = 0
WHILE @i < 3
BEGIN
INSERT Fact.OrderHistoryExtended([City Key], [Customer Key], [Stock Item Key], [Order Date Key], [Picked Date Key], [Salesperson Key], [Picker Key], [WWI Order ID], [WWI Backorder ID], Description, Package, Quantity, [Unit Price], [Tax Rate], [Total Excluding Tax], [Tax Amount], [Total Including Tax], [Lineage Key])
SELECT [City Key], [Customer Key], [Stock Item Key], [Order Date Key], [Picked Date Key], [Salesperson Key], [Picker Key], [WWI Order ID], [WWI Backorder ID], Description, Package, Quantity, [Unit Price], [Tax Rate], [Total Excluding Tax], [Tax Amount], [Total Including Tax], [Lineage Key]
FROM Fact.OrderHistoryExtended;
SET @i = @i +1
END;
END
GO
UPDATE Fact.OrderHistoryExtended
SET [WWI Order ID] = [Order Key];
GO
-- Repeat the following until log shrinks. These demos don't require much log space.
CHECKPOINT
GO
DBCC SHRINKFILE (N'WWI_Log' , 0, TRUNCATEONLY)
GO
SELECT * FROM sys.dm_db_log_space_usage
GO
/ + [markdown] azdata_cell_guid="e97a9842-cd31-4d1c-8a1c-f1ef5f0e19da"
/ ## Step 3: Skew cardinality estimations
/
/ This will provide skewed statistical information to the Cardinality Estimator (CE).
/
/ Statistics are fundamental building blocks for the CE to output good estimations over the expected number of rows will be used from a table for a given query.
/ Therefore, it's fundamental for the CE to have statistics that accurately portray the underlying data distribution of a column or set of columns. If those statistics are skewed, then cardinality estimations will likely be wrong and the Query Optimizer will likely make wrong decisions. The result is an inneficient plan for a given query.
/ + azdata_cell_guid="511b31b6-d3ff-417e-91dc-d0f4fd9f06ec"
USE [WideWorldImportersDW];
GO
UPDATE STATISTICS Fact.OrderHistory
WITH ROWCOUNT = 1;
GO
/ + [markdown] azdata_cell_guid="cc613f99-f3c2-462f-93df-bd197de31bbc"
/ ## Step 4: Execute the query and observe the query execution plan
/
/ + azdata_cell_guid="71483984-6bdd-4866-8ddc-e40fe692c0db"
USE [WideWorldImportersDW];
GO
ALTER DATABASE [WideWorldImportersDW] SET COMPATIBILITY_LEVEL = 150;
GO
ALTER DATABASE SCOPED CONFIGURATION CLEAR PROCEDURE_CACHE;
GO
SELECT
fo.[Order Key], fo.Description,
si.[Lead Time Days]
FROM Fact.OrderHistory AS fo
INNER HASH JOIN Dimension.[Stock Item] AS si
ON fo.[Stock Item Key] = si.[Stock Item Key]
WHERE fo.[Lineage Key] = 9
AND si.[Lead Time Days] > 19;
GO
/ + [markdown] azdata_cell_guid="7898e074-2d93-472b-84a5-2e52e3e43144"
/ Observe the query execution plan (or actual plan).
/
/ 
/
/ Notice the yellow triangle on the *Hash Match* operator, signaling a warning.
/
/ Hovering over the operator brings up additional information. We see the warning detail:
/
/ *\"Operator used tempdb to spill data during execution with spill level 1 and 1 spilled thread(s), Hash wrote 52000 pages to and read 52000 pages from tempdb with granted memory 1024KB and used memory 968KB"*
/
/ 
/
/ This is a sizable spill that translates into I/O, which in most systems means slow performance.
/
/ Now click on the *SELECT* node, and look at properties of the entire plan. Specifically in the *MemoryGrantInfo* properties, note the *GrantedMemory* is 1056 KB (about 1 MB) and *IsMemoryGrantFeedbackAdjusted* has the "NoFirstExecution" state.
/
/ 
/
/ Because this was the first execution, the MGF feature didn't have a chance to learn before it executed. However, note what happens when the query is executed a few more times.
/ + [markdown] azdata_cell_guid="6140b74a-e79d-4108-9cba-415f66ed85a3"
/ ## Step 5: Execute the query again
/
/ Run the same query from Step 3 and then observe the query plan's *MemoryGrantInfo* properties.
/ + azdata_cell_guid="d37ad2f6-5a09-4418-8ba2-47f954af18e8"
USE [WideWorldImportersDW]
GO
SELECT
fo.[Order Key], fo.Description,
si.[Lead Time Days]
FROM Fact.OrderHistory AS fo
INNER HASH JOIN Dimension.[Stock Item] AS si
ON fo.[Stock Item Key] = si.[Stock Item Key]
WHERE fo.[Lineage Key] = 9
AND si.[Lead Time Days] > 19;
GO
/ + [markdown] azdata_cell_guid="0992ad26-d475-47ef-9b3e-5b9ccd429013"
/ Observe the query execution plan (or actual plan) of the 2nd execution.
/
/ 
/
/ Notice the yellow triangle on the *Hash Match* operator is gone, which means the query didn't spill and didn't incur in expensive I/O. That difference is also noticeable in the execution time. From the 1st to the 2nd execution, elapsed time dropped from **~43s** to **~5s**.
/
/ Now click on the *SELECT* node, and look at the *MemoryGrantInfo* properties. Note the *GrantedMemory* is now 625 MB (from 1056 KB) and *IsMemoryGrantFeedbackAdjusted* shows **YesAdjusting****. The MGF feature is already learning and adjusting.
/
/ On the 3rd execution, the *GrantedMemory* is still 625 MB, and *IsMemoryGrantFeedbackAdjusted* shows **YesStable**. This means the MGF feature found the optimal memory grant that's required to execute the query entirelly in memory.
/
/ ### 2nd Execution
/ 
/
/ ### 3rd Execution
/ 
/
/ + [markdown] azdata_cell_guid="7fc984f7-4902-495d-b4c7-81e3c51a1ee0"
/ ## Step 6: Reset the skewed statistics
/ + azdata_cell_guid="8f71aacc-84f5-418a-9f9c-83a39a84dc6f"
USE [WideWorldImportersDW]
GO
UPDATE STATISTICS Fact.OrderHistory
WITH ROWCOUNT = 3702672;
GO
| samples/features/intelligent-query-processing/notebooks/Memory_Grant_Feedback.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# model.inspect causes problems when working in model namespace because import inspect breaks things.
if False:
# Temporarily disable this test until I can recreate the model
from pathlib import Path
import os
from tensorflow.keras.models import load_model
if Path.cwd().stem == 'indl':
os.chdir(Path.cwd().parent)
layer_idx = 20 # [2, 6, 10, 14]
n_steps = 100
max_n_filters = 25
model_file = Path.cwd() / 'data' / 'kjm_ecog' / 'converted' / 'faces_basic' / 'mv_model_full.h5'
model = load_model(str(model_file))
# When processing softmax classification layer,
# second last dense layer should be converted from relu to linear.
if (layer_idx == len(model.layers) - 1) and (model.layers[-2].activation != tf.keras.activations.linear):
model.layers[-2].activation = tf.keras.activations.linear
import tempfile
# Save and load the model to actually apply the change.
tmp_path = Path(tempfile.gettempdir()) / (next(tempfile._get_candidate_names()) + '.h5')
try:
model.save(str(tmp_path))
model = load_model(str(tmp_path))
finally:
tmp_path.unlink()
model.summary()
maximizing_activations = visualize_layer(model, layer_idx, epochs=n_steps, loss_as_exclusive=True,
upsampling_steps=1, upsampling_factor=1,
filter_range=(0, max_n_filters),
output_dim=(701, model.get_input_shape_at(0)[-1]))
# Stitch timeseries together into one mega timeseries with NaN gaps.
stitched_data = _stitch_filters(maximizing_activations, n=2, sort_by_activation=False)
import matplotlib.pyplot as plt
# Create a colour code cycler e.g. 'C0', 'C1', etc.
from itertools import cycle
colour_codes = map('C{}'.format, cycle(range(10)))
plt.figure()
for chan_ix in [15, 9, 8]:
plt.plot(stitched_data[:, :, chan_ix], color=next(colour_codes))
plt.show()
| docs/Miscellaneous/junk_model_inspect.ipynb |