code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.6 64-bit
# name: python36664bit20e9155409db4a5daee3dd105c2673f5
# ---
from data import *
# # Data Augmentasi
#
# Pembuatan data augmentasi menggunakan library **keras.preprocessing.image.ImageDataGenerator** yang bisa menggabungkan Deep Neural Network image dan label secara bersamaan.
# ## Tentukan parameter data generator
# Gunakan parameter dengan metode random dictionary seperti dibawah ini.
data_gen_args = dict(rotation_range = 0.05,
width_shift_range = 0.01,
height_shift_range = 0.01,
shear_range = 0.05,
zoom_range = 0.05,
horizontal_flip = True,
fill_mode = 'nearest')
myGenerator = trainGenerator(600, 'data/train', 'image', 'label', data_gen_args, save_to_dir = "data/train/aug")
# ## Membuat data augmentasi ke folder **'data/train/aug/'**
num_batch = 2
for i, batch in enumerate(myGenerator):
print(i)
if(i>=(num_batch-1)):
break
# ## Membuat .npy data (data augmentasi yang sudah dikompres)
# Butuh memori tambahan untuk menyimpan data ini
image_arr,mask_arr = geneTrainNpy("data/train/aug/","data/train/aug/")
np.save("data/image_arr.npy",image_arr)
np.save("data/mask_arr.npy",mask_arr)
# ## Masking Test Data
def masking(img, mask, height=512, width=470, color = yellow): #GE 432 532
'''.'''
mask_out = np.zeros((512, 470, 3), dtype = 'uint8') #phantom
#mask_out = np.zeros((height, width, 3), dtype = 'uint8') #GE
#img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) #kalau shapenya (432,532) atau grayscale
img = np.zeros((height, width, 3), dtype = 'uint8')
for i in range(mask.shape[0]-1):
for j in range(mask.shape[1]-1):
if (mask[i,j] >= 30):
mask_out[i,j,:] = mask[i,j]
mask_out[i,j,:] = color
img[i,j,:] = mask_out[i,j,:]
segmented = img
return segmented
# +
import os
from os import listdir
from skimage import io
from IPython.display import clear_output
results_path = 'data/test-phantom/'
results_results = 'results/'
list_file = os.listdir(results_path)
masked = np.zeros((len(list_file)//2, 512, 470, 3), dtype ='uint8') #GE 432 532
for i in range(len(list_file)//2):
#masked[i] = masking(np.zeros((432, 532, 3), dtype = 'uint8'), io.imread(results_path+str(i)+'_predict.png'), color = white)
masked[i] = masking(io.imread(results_path+str(i)+'.png'), io.imread(results_path+str(i)+'_predict.png'), color = white)
if (i % 10 == 0):
clear_output(wait=True)
else:
print('Creating... ' + results_path+str(i)+'_predict.png')
io.imsave(os.path.join(results_results,"%d.png"%i), masked[i])
# -
# # CARA CONVERT PNG TO MP4
ffmpeg -r 30 -f image2 -i %d.png -vcodec libx264 -crf 15 -pix_fmt yuv420p test.mp4
# # CARA MENGGABUNGKAN 2 VIDEO YANG SAMA
ffmpeg -i Yoga60.avi -i aqua.mp4 -filter_complex "[0:v]pad=iw*2:ih[int]; [int][1:v]overlay=W/2:0[vid]" -map "[vid]" -c:v libx264 -crf 15 output2.mp4
# ## DICE SIMILARITY COEFFICIENT
def dice(im1, im2, empty_score=1.0):
"""
Computes the Dice coefficient, a measure of set similarity.
Parameters
----------
im1 : array-like, bool
Any array of arbitrary size. If not boolean, will be converted.
im2 : array-like, bool
Any other array of identical size. If not boolean, will be converted.
Returns
-------
dice : float
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Both are empty (sum eq to zero) = empty_score
Notes
-----
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
# +
import os
from os import listdir
from skimage import io
from IPython.display import clear_output
path_predict = './results/result threshold 220/'
path_acuan = './data/Ground Truth/'
list_predict = os.listdir(path_predict)
list_acuan = os.listdir(path_acuan)
dsc = np.zeros(len(list_predict), dtype = float)
temp = 0
for i in range(len(list_predict)):
img1 = cv2.cvtColor(cv2.imread(path_predict+str(i)+'.png'), cv2.COLOR_RGB2GRAY)
img2 = cv2.cvtColor(cv2.imread(path_acuan+str(i)+'.png'), cv2.COLOR_RGB2GRAY)
dsc[i] = dice(img1, img2)
temp = temp + dsc[i]
print(i, dsc[i])
hasil = temp/len(list_predict)
print()
print(hasil)
# -
# ## JACCARD SIMILARITY COEFFICIENT
# +
import numpy as np
def jaccard(im1, im2):
"""
Computes the Jaccard metric, a measure of set similarity.
Parameters
----------
im1 : array-like, bool
Any array of arbitrary size. If not boolean, will be converted.
im2 : array-like, bool
Any other array of identical size. If not boolean, will be converted.
Returns
-------
jaccard : float
Jaccard metric returned is a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Notes
-----
The order of inputs for `jaccard` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
intersection = np.logical_and(im1, im2)
union = np.logical_or(im1, im2)
return intersection.sum() / float(union.sum())
# +
import os
from os import listdir
from skimage import io
from IPython.display import clear_output
path_predict = './results/result phantom/'
path_acuan = './data/Ground Truth Phantom/'
list_predict = os.listdir(path_predict)
list_acuan = os.listdir(path_acuan)
dsc = np.zeros(len(list_predict), dtype = float)
temp = 0
for i in range(len(list_predict)-1):
img1 = cv2.cvtColor(cv2.imread(path_predict+str(i)+'.png'), cv2.COLOR_RGB2GRAY)
img2 = cv2.cvtColor(cv2.imread(path_acuan+str(i)+'.png'), cv2.COLOR_RGB2GRAY)
dsc[i] = jaccard(img1, img2)
temp = temp + dsc[i]
#print(dsc[i])
hasil = temp/len(list_predict)
print(hasil)
# -
# ## PIXEL DIFFERENCE
# +
import os
from os import listdir
from skimage import io
import cv2
import numpy as np
import matplotlib.pyplot as plt
path_predict = './results/result phantom/'
path_acuan = './data/Ground Truth Phantom/'
list_predict = os.listdir(path_predict)
list_acuan = os.listdir(path_acuan)
temp = 0
aa = 0
bb = 0
for i in range(len(list_acuan)):
a = np.count_nonzero(cv2.imread(path_predict+str(i)+'.png')==255)
b = np.count_nonzero(cv2.imread(path_acuan+str(i)+'.png')==255)
avg = abs(a-b)/b
print(i, b, a, avg)
temp = temp + avg
aa = aa + a
bb = bb + b
temp = temp/len(list_acuan)
aa = aa/len(list_acuan)
bb = bb/len(list_acuan)
print()
print(aa, bb, temp)
# -
| dataPrepare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_deploy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from deployment import model_deploy
slim = tf.contrib.slim
class DeploymentConfigTest(tf.test.TestCase):
def testDefaults(self):
deploy_config = model_deploy.DeploymentConfig()
self.assertEqual(slim.get_variables(), [])
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), '')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testCPUonly(self):
deploy_config = model_deploy.DeploymentConfig(clone_on_cpu=True)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'CPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testMultiGPU(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1), 'GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def testMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=1)
self.assertEqual(deploy_config.caching_device()(tf.no_op()), '')
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_clones=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testVariablesPS(self):
deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
reuse=reuse):
predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
reuse=reuse):
inputs = slim.batch_norm(inputs, decay=0.1)
predictions = slim.fully_connected(inputs, 1,
activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
class CreatecloneTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(len(clones), num_clones)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
self.assertEqual(len(update_ops), 2)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateMulticloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
num_ps_tasks=2)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for i, v in enumerate(slim.get_variables()):
t = i % 2
self.assertDeviceEqual(v.device, '/job:ps/task:%d/device:CPU:0' % t)
self.assertDeviceEqual(v.device, v.value().device)
self.assertEqual(len(clones), 2)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:%d' % i)
class OptimizeclonesTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 2)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticloneCPU(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones,
clone_on_cpu=True)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '/job:worker')
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
class DeployTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testLocalTrainOp(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
clone_on_cpu=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
self.assertEqual(slim.get_variables(), [])
model = model_deploy.deploy(deploy_config, model_fn, model_args,
optimizer=optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 4)
self.assertEqual(len(model.clones), 2)
self.assertEqual(model.total_loss.op.name, 'total_loss')
self.assertEqual(model.summary_op.op.name, 'summary_op/summary_op')
self.assertEqual(model.train_op.op.name, 'train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
moving_mean = tf.contrib.framework.get_variables_by_name(
'moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
initial_loss = sess.run(model.total_loss)
initial_mean, initial_variance = sess.run([moving_mean,
moving_variance])
self.assertAllClose(initial_mean, [0.0, 0.0, 0.0, 0.0])
self.assertAllClose(initial_variance, [1.0, 1.0, 1.0, 1.0])
for _ in range(10):
sess.run(model.train_op)
final_loss = sess.run(model.total_loss)
self.assertLess(final_loss, initial_loss / 10.0)
final_mean, final_variance = sess.run([moving_mean,
moving_variance])
self.assertAllClose(final_mean, [0.125, 0.25, 0.375, 0.25])
self.assertAllClose(final_variance, [0.109375, 0.1875,
0.234375, 0.1875])
def testNoSummariesOnGPU(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
optimizer=tf.train.GradientDescentOptimizer(1.0))
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
if __name__ == '__main__':
tf.test.main()
| deployment/model_deploy_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using Regex Combined with Pandas
# +
import pandas as pd
tweets = pd.read_csv("tweets.csv", header=None, names=["unknown", "unknown2", "date", "topic", "handle", "text"])
# -
tweets.head()
# get rid of some columns we don't care about
tweets.drop(columns=["unknown", "unknown2"], inplace=True)
# preview the data
tweets.head()
# get length of tweets in characters
tweets["text"].str.len()
tweets.head()
# count number of times Obama appears in tweets
tweets["text"].str.contains("Obama").sum()
# find all the @s in the tweets
tweets["ats"]=tweets["text"].str.findall(r'\B@\w+')
# +
# Mon May 11 03:17:40 UTC 2009
# get the weekday of tweet
tweets["weekday"] = tweets["date"].str.extract(r'([a-zA-Z]{3})\s[a-zA-z]{3}',expand=False)
# get the month of the tweet
tweets["months"] = tweets["date"].str.extract(r'[a-zA-Z]{3}\s([a-zA-z]{3})',expand=False)
# get the year of the tweet
tweets["date"].str.extract(r'[A-Z]{3}\s(20[0-1][1-9])$')
# -
# ### Exercises ( minutes)
# 1. Identify the list of email addresses for your security administrator to blacklist from your company's email servers.
# 2. Identify any IP addresses that should be blacklisted (an IPv4 address goes from **1.1.1.1 to 255.255.255.255**)
# 3. Find all hashtags mentioned in the tweets dataset. Store it as a separate column called **hashtags**.
# 4. Find the subject headings for these emails.
#
email_text = open("fraudulent_emails.txt").read()
# 1. Identify the list of email addresses for your security administrator to blacklist
# from your company's email servers.
import re
set(re.findall(r'\w+@\w+\.\w{2,6}', email_text)) # this matches only basic emails (<EMAIL>)
set(re.findall(r'\w+@\w+\.[\w+\.]{1,}',email_text)) # this matches more complicated emails with multiple domain names)
# +
# 2. Identify the list of email addresses for your security administrator to blacklist
# from your company's email servers.
re.findall(r'[1-2]?[0-9]{1,2}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', email_text)
# +
# 3. Find all hashtags mentioned in the tweets dataset. Store it as a separate column called hashtags.
# The \B is "not a word boundary", and it matches because the # (and the @ in the previous example) is not
# considered part of a word. Therefore, you need to opposite of a word boundary (a non-word boundary) to match
# the case where it begins with a non-word character.
# This will correctly NOT match text like she#he, sometext#someothertext
tweets["text"].str.findall(r'\B(#\w+)\b')
# -
# 4. Find the subject headings for these emails.
re.findall(r'Subject:\s(.+)', email_text)
| week4/Regex Part II.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
wine=pd.read_csv("wineQualityReds.csv")
wine.head(n=7)
wine=wine.drop('Unnamed: 0', axis=1)#drop a column
wine.head()
X_wine= wine.drop("quality", axis=1) #predictors
#Y = wine["quality"] #response
| section5/.ipynb_checkpoints/gmm1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Time traveling
#
# We will import the required modules
import numpy as np
import matplotlib.pyplot as plt
import shutil
import tiledb
# +
# clean up previous runs
array_uri = "arrays/timeseries"
try:
shutil.rmtree(array_uri)
except:
pass
# -
# We will create the array schema and populate the data using the `timestamp` parameter
# +
dom = tiledb.Domain(
tiledb.Dim(name="x", domain=(0, 12), tile=5, dtype=np.int32),
tiledb.Dim(name="y", domain=(0, 12), tile=5, dtype=np.int32))
att = tiledb.Attr(name="val", dtype=np.ubyte)
schema = tiledb.ArraySchema(domain=dom, attrs=(att,))
tiledb.DenseArray.create(array_uri, schema)
for fragment_idx in range(4):
ts = fragment_idx + 1
with tiledb.open(array_uri, mode="w", timestamp=ts) as arr:
x_start = fragment_idx * 3
y_start = fragment_idx * 3
x_end = x_start + 3
y_end = y_start + 3
arr[x_start:x_end, y_start:y_end] = 1
# -
# Lets visualize this data by opening the arrays at different timestamps, starting with the last timestamp, we will get all the data
# +
with tiledb.open(array_uri) as arr:
data = arr[:, :]
plt.imshow(data["val"], cmap="Greys")
# -
# Or we query for the data at the actual timestamp
# +
with tiledb.open(array_uri, timestamp=(4,4)) as arr:
data = arr[:, :]
plt.imshow(data["val"], cmap="Greys")
# -
# Or any interval thereof
# +
with tiledb.open(array_uri, timestamp=2) as arr:
data = arr[:, :]
plt.imshow(data["val"], cmap="Greys")
# -
| notebooks/2c-quickstart-timetraveling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
from bs4 import BeautifulSoup
import pandas as pd
import numpy as n
import time
import datetime
import re
from tqdm import tqdm
# Making list of htmls
lst = os.listdir("html/")
len(lst)
# Making chunks
#Splitting list into number of chunks. I need this for the tags in the html code
# Create a function called "chunks" with two arguments, l and n:
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
# Visiting each html
# +
resultlst = []
errors = []
for elem in tqdm(lst):
#print(elem)
file = open('html/'+elem, 'r')
text = file.read()
html = BeautifulSoup(text, 'html.parser')
html = html.find_all('td')
taglist = list(chunks(html, 5))
for tags in taglist:
try:
mini_dict = {'ID':tags[0].text,
'URL':tags[1].text.split('added')[0],
'Date':tags[1].text.split('added')[1],
'Valid Phish':tags[3].text}
resultlst.append(mini_dict)
except:
errors.append(elem)
# -
# Making DataFrame
phishlist = pd.DataFrame(resultlst)
phishlist.info()
# Saving off the files
phishlist.to_csv('d/goverCERTphishes.csv', index=False)
| 02 Opening HTMLs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # <div style="color:#303030;font-family:'arial blACK', sans-serif,monospace; text-align: center; padding: 50px 0; vertical-align:middle;" > <img src="https://github.com/PIA-Group/ScientIST-notebooks/blob/master/_Resources/Images/Lightbulb.png?raw=true" style=" background:#00a0e4;border-radius:10px;width:150px;text-align:left; margin-left:10%" /> <span style="position:relative; bottom:70px; margin-left:5%"> Analog to Digital Conversion - ADC </span> </div>
# ## <span style="color:#00a0e4;"> Keywords: </span>
# ``` Analog to Digital Conversion (ADC)```, ```Digital Signal Processing (DSP)```, ```Embedded Systems```
# # I. Introduction
# <br>
# <div class="title"style="width:100%; background:#00a0e4;font-family:'arial black',monospace; text-align: center; padding: 7px 0; border-radius: 5px 50px;margin-top:-15px" > </div>
# ## <div style="color:#00a0e4"> 1. Background </div>
# ### <div style="color:#00a0e4"> The Analog World</div>
#
# Any physical quantity that varies with time, space or any independent variable, can be defined as a signal. As they exist in the world, signals have a representation in any infinitely small portion of time, hence being designated as continuous.
#
# However, computational systems (either embedded - like the Arduino - or not) are only capable of dealing with binary representations (e.g. does a pin have voltage or not?). These are defined as digital signals; as you've seen in the previous lab while working with digital ports, the microcontroller translates 0V to binary 0 and 5V to binary 1. But how can one represent a voltage of 2.5V (e.g.)?
#
# The process of reducing a continuous signal to a discrete signal is defined as sampling, and has the goal of retrieving a digitally manageable numerical representation of the underlying process with a given numerical precision.
#
# This session aims at further understanding this fundamental process.
# #### <div style="color:#00a0e4"> What is the ADC?</div>
#
# The Analog to Digital Converter (ADC) of an embedded system is a peripheral capable of converting an analog voltage applied as an input to a pin, to a digital representation. This is done by mapping the voltage to a set of digital codes represented using a finite number of bits.
#
# In the example of the digital port (1-bit) we represent the input voltage at the pin with a set of two discrete levels $[0, 1]$. Expanding the number of bits, also increases our representation space. For example using two bits, if we consider all the possible states ($00$, $01$, $11$, $10$) we have a representation space of four discrete levels, i.e. $[0, 1, 2, 3]$).
#
# In a more general form, the representation space will have $2^n$ discrete levels, with $n$ being the resolution of the ADC (typically a fixed setting).
#
# ADCs can vary greatly between microcontroller. The ADC on the Arduino has $n=10$-bit ADC meaning it has the ability to map the analog input voltage to $1024$ ($2^{10}$) discrete levels. Other microcontrollers have $12$-bit ADCs ($2^{12} = 4096$ discrete levels) and some have $16$-bit ADCs ($2^{16} = 65536$ discrete levels).
#
# On the Seeeduino board, the ADC pins have an `A` as a prefix of their label ($A0$ through $A7$), to indicate these pins can read analog voltages.
# <img src="https://botland.store/img/art/inne/14993_2.jpg" width="500" border="0">
# <div style="width: 210px; height: 50px; outline: 5px solid #00ff00; position: absolute; bottom:12%; left:45%;"/>
# #### <div style="color:#00a0e4"> Relating ADC Value to Voltage</div>
#
# The ADC reports a ratiometric value. This means that the ADC assumes 5V is 1023 ($2^n-1$, given that discrete levels start at zero) and anything less than 5V will be a ratio between 5V and 1023, given by:
#
# $\frac{2^n-1}{V_{dd}}=\frac{ADC_i}{V_i}$
#
# where $V_{dd}$ is the operating voltage of the circuit, $V_i$ is the input voltage, and $ADC_i$ is the discrete level assigned by the ADC to the input voltage. Assuming a $10$-bit ADC and a system voltage of 5V, it becomes:
#
# $\frac{1023}{5}=\frac{ADC_i}{V_i}$
#
# <!--img src="https://cdn.sparkfun.com/assets/3/9/0/b/6/51140300ce395f777e000002.png" width="500" border="0"-->
#
# <!--img src="https://cdn.sparkfun.com/assets/4/0/2/7/9/5114021dce395f827d000002.png" width="400" border="0"-->
#
# This way, you can easily correspond the ADC reading obtained with the Arduino ($ADC_i$) to the input analog voltage (V_i).
# To read an analog voltage on the A0 pin of the Arduino board, you can use the default example, `Examples > 01.Basics > ReadAnalogVoltage`:
#
# ```cpp
# // the setup routine runs once when you press reset:
# void setup() {
# // initialize serial communication at 9600 bits per second:
# Serial.begin(9600);
# }
#
# // the loop routine runs over and over again forever:
# void loop() {
# // read the input on analog pin 0:
# int sensorValue = analogRead(A0);
# // Convert the analog reading (which goes from 0 - 1023) to a voltage (0 - 5V):
# float voltage = sensorValue * (5.0 / 1023.0);
# // print out the value you read:
# Serial.println(voltage);
# }
# ```
# <div style="background:#62d321;font-family:'arial', monospace; text-align: center; padding: 10px 0; border-radius:10px; width:70%; margin:auto " >
# <span style="font-size:20px;position:relative;color:white; "> Explore </span> <br>
# <div style="background:#c5e8b0;font-size:12px">
# You can learn more about ADCs at:
# <br><a href="https://learn.sparkfun.com/tutorials/analog-to-digital-conversion">https://learn.sparkfun.com/tutorials/analog-to-digital-conversion</a>
# </div>
# ### <div style="color:#00a0e4">Serial Communication</div>
#
# Embedded systems often communicate with other digital circuits (e.g. computers or other embedded systems). For those individual circuits to exchange information between one another, they must communicate in a way that both can understand, i.e. use what is known as a communication protocol.
#
# The Universal Asynchronous Receiver/Transmitter (UART), also known as "serial", is a common communication protocol found in embedded systems. It has been designed with a number of built-in mechanisms that help ensure robust data transfer between two digital systems, namely:
#
# - Baud rate
# - Data bits
# - Synchronization bits
# - Parity bits
# #### <div style="color:#00a0e4">Baud Rate</div>
#
# The baud rate specifies how fast data is sent over a serial line. It is usually expressed in units of bits-per-second (bps). If you invert the baud rate, you can find out just how long it takes to transmit a single bit.
#
# "Standard" baud are 1200, 2400, 4800, 19200, 38400, 57600, and 115200 bps, however, other speeds can be used. The only requirement to ensure communication, is that both devices are configured to operate at the same rate. The higher the baud rate, the faster data is sent/received
# <div style="background:#fe9b29; font-family:'arial', monospace;text-align: center; padding: 10px 0; border-radius:10px; width:70%; margin:auto " >
# <span style="font-size:20px;position:relative;color:white; "> Caution! </span> <br>
# <div style="background:#ffdab0;padding-left: 5px; font-size:12px">
# The baud rates is an important configuration when two devices are communication using the serial protocol. If a receiver is not decoding data at the same rate it was sent by the transmitter, generally the data will be garbled. If you don't see any data on the receiver or the decoded data is not what you expect, check to make sure that the baud rates match up.
# </div>
# #### <div style="color:#00a0e4">Framing the Data</div>
#
# Each block of data transmitted from your arduino, is actually sent in a frame or sequence of bits (usually a byte). To enable a receiver to segment the transmitted data, frames (which can also be known as packets) are delimited by appending synchronization and parity bits of known value to the transmitted data.
#
# <img src="https://cdn.sparkfun.com/r/700-700/assets/f/9/c/0/2/50d2066fce395fc43b000000.png" width="500" border="0">
#
# Let's explore each of these frame pieces:
#
# #### <div style="color:#00a0e4">Data Chunk</div>
#
# Block corresponding to the data to be transmit. The amount of data in each packet can be configure to a number of bits ranging from 5 to 9.
#
#
# #### <div style="color:#00a0e4">Synchronization bits</div>
#
# The synchronization bits are two or three special bits transferred with each chunk of data. They are the start bit and the stop bit(s). True to their name, these bits mark the beginning and end of a packet. There's always only one start bit, but the number of stop bits is configurable to either one or two (though it's commonly left at one).
#
#
# #### <div style="color:#00a0e4">Parity bits</div>
#
# Parity is a form of very simple, low-level error checking. It comes in two flavors: odd or even. To produce the parity bit, all 5-9 bits of the data byte are added up, and the evenness of the sum decides whether the bit is set or not.
# <div style="background:#62d321;font-family:'arial', monospace; text-align: center; padding: 10px 0; border-radius:10px; width:70%; margin:auto " >
# <span style="font-size:20px;position:relative;color:white; "> Explore </span> <br>
# <div style="background:#c5e8b0;font-size:12px">
# Learn more about Serial Communication at:
# <br><a href="https://learn.sparkfun.com/tutorials/serial-communication">https://learn.sparkfun.com/tutorials/serial-communication</a>
# </div>
# ## <div style="color:#00a0e4"> 2. Objectives</div>
# * Understand the process of converting an analog signal to a digital representation
# * Analyze the effect of data types in the throughput of your embedded system
# * Learn about profiling and accurate time keeping in devices with limited resources
#
# ## <div style="color:#00a0e4"> 3. Materials </div>
# * Arduino IDE
# * 1x Breadboard
# * 2x 10kOhm resistors
# * 1x TMP36 temperature (TMP) sensor
# * 1x Arduino (or analogous device)
# * 1x USB cable
# <div style="background:#00bfc2;font-family:'arial', monospace; text-align: center; padding: 10px 0; border-radius:10px; width:70%; margin:auto " >
# <span style="font-size:20px;position:relative;color:white; "> Note </span> <br>
# <div style="background:#9eddde;font-size:12px">
# Supplementary data files are provided here:
# <br>
# <a href="https://drive.google.com/drive/folders/1Xju0-vw1S6gjKG7wXGeIR4XMT48tFMum?usp=sharing">https://drive.google.com/drive/folders/1Xju0-vw1S6gjKG7wXGeIR4XMT48tFMum?usp=sharing</a>
# <br>
# Use them <b>only</b> if you're not able to gather your own data.
# </div>
# # II. Experimental
# <br>
# <div style="width:100%; background:#00a0e4;color:#282828;font-family:'arial black'; text-align: center; padding: 7px 0; border-radius: 5px 50px; margin-top:-15px" > </div>
# Consider the Resistive Voltage Divider represented in the following figure. Assemble the circuit on a breadboard, powering it from your Arduino, and connect the analog output of the circuit to an analog input of your choice.
#
# <img src="https://github.com/PIA-Group/ScientIST-notebooks/blob/master/_Resources/Images/A.Signal_Acquisition_IMG/a013/Resistive_Divider.png?raw=true" width="400" border="0">
#
# The following firmware allows to sample the analog inputs at a configurable sampling rate and stream them through the serial port, together with the elapsed time since the board began running the program. A formatted sequence of comma-separated values (CSV) is used (e.g. <TIME>, <A>).
#
# Data streaming can be started and stopped on demand, by a command sent to the firmware through the serial port. Confirm the overall operation of the setup using the Arduino IDE Serial Monitor.
#
# **Commands:**
#
# - `S` - starts and stops the acquisition.
#
# - `F100` - sets the sampling frequency to 100 Hz. Another natural number value can be used.
# ```cpp
# bool state=false;
# int f=100, dt=0;
# char command;
# unsigned long t=0, lt=0;
#
# void setup() {
# // initialize the serial communication:
# Serial.begin(9600);
# dt=int(1000*1/(float)f);
# }
#
# void loop() {
# if (Serial.available()) {
# command = Serial.read();
# switch (command) {
# case 'S':
# # state=!state;
# break;
# case 'F':
# f=Serial.parseInt();
# Serial.println(f);
# dt=int(1000*1/(float)f);
# break;
# }
# lt=millis();
# }
# if (state) {
# t=millis();
# if ((t-lt)>=dt) {
# Serial.print(t);
# Serial.print(",");
# Serial.print(analogRead(A0));
# Serial.print("\n");
# lt=t;
# }
# }
# }
# ```
# # III. Explore
# <br>
# <div class='h1' style="width:100%; background:#00a0e4;color:#282828;font-family:'arial black'; text-align: center; padding: 7px 0; border-radius: 5px 50px;margin-top:-15px" > </div>
# ## <div style="color:#00a0e4"> 1. Quizz </div>
# **1.** Explain what the following blocks of code do:
# a)
# ```cpp
# bool state=false;
# int f=100, dt=0;
# char command;
# unsigned long t=0, lt=0;
# ```
# b)
# ```cpp
# void setup() {
# Serial.begin(9600);
# dt=int(1000*1/(float)f);
# }
# ```
# c)
# ```cpp
# void loop() {
# if (Serial.available()) {
# command = Serial.read();
# switch (command) {
# case 'S':
# # state=!state;
# break;
# case 'F':
# f=Serial.parseInt();
# Serial.println(f);
# dt=int(1000*1/(float)f);
# break;
# }
# lt=millis();
# }
# ...
# }
# ```
# d)
# ```cpp
# void loop() {
# ...
# if (state) {
# t=millis();
# if ((t-lt)>=dt) {
# Serial.print(t);
# Serial.print(",");
# Serial.print(analogRead(A0));
# Serial.print("\n");
# lt=t;
# }
# }
# }
# ```
# **2.** Considering the operating voltage of your Arduino and the resolution of the Analog-to-Digital Converter (ADC), determine the **expected range of quantization steps** produced by the ADC and the corresponding **mV/step**, as well as the **discrete level assigned to the analog output** of the voltage divider. Using the provided firmware, **analyze** the output of the **experimental circuit**; describe and comment your experimental findings in light of your theoretical calculations.
# <br>
# +
# Example: 1 Hz
import pylab as pl
import matplotlib.pyplot as plt
fname = './data/L2.1-1Hz-9600bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - 1 Hz', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# -
# **3.** Configure the sampling rate on your firmware to 1Hz and the serial port baud rate to 9600bps, start the data acquisition and stop it after a couple of seconds. Derive the sampling period from the experimental data, and comment the results using statistical metrics to support your discussion. Repeat the experiment for sampling rates of 10Hz, 100Hz, and 1000Hz. You can use any software/programming language of your preference to analyze your results.
# <br>
#
# **TIP:** To facilitate the analysis, you can select all the content of the Arduino IDE Serial Monitor, copy it and paste the copied data to a spreadsheet (e.g. Excel); if the data is not automatically split into columns, save the data as a plaintext CSV file and load it.
# <br>
# +
# Example: 1 Hz
fname = './data/L2.2-1Hz-9600bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - 1 Hz', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# +
# Example: 10 Hz
fname = './data/L2.2-10Hz-9600bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - 10 Hz', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# +
# Example: 100 Hz
fname = './data/L2.2-100Hz-9600bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - 100 Hz', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# +
# Example: 1000 Hz
fname = './data/L2.2-1000Hz-9600bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - 1000 Hz', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# -
# **4.** With the sampling rate on your firmware set to 100Hz and the serial port baud rate to 300bps, start the data acquisition and stop it after a couple of seconds. Derive the sampling period from the experimental data, and comment the results using statistical metrics to support your discussion. Repeat the experiment for a baud rate of 115200bps.
#
# +
# Example: 100 Hz; baud rate 300bps
fname = './data/L2.3-100Hz-300bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - baud rate 300bps', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# +
# Example: 100 Hz; baud rate 1200bps
fname = './data/L2.3-100Hz-1200bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - baud rate 1200bps', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# +
# Example: 100 Hz; baud rate 9600bps
fname = './data/L2.3-100Hz-9600bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - baud rate 9600bps', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# +
# Example: 100 Hz; baud rate 115200bps
fname = './data/L2.3-100Hz-115200bps.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - baud rate 115200bps', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# -
# ## <div style="color:#00a0e4;"> 2. Beyond the Lab </div>
#
# This section is not mandatory and will not influence the evaluation of the lab. These are suplementary exercises designed for those who wish to explore and learn more about Arduino-based systems.
#
# **1.** Consider the Analog Devices TMP36 temperature (TMP) sensor represented in the following figure. Assemble the circuit on a breadboard, powering it from your Arduino, and connect the analog output to an analog input of your choice. Expand the firmware to, in addition to the data already sent, sample and stream also the TMP sensor data. With the sampling rate on your firmware set to 100Hz and the baud rate set to 9600bps, compare how sending the TMP data as raw ADC digital codes and as ºC affects the execution time.
#
# The measurement range for the Analog Devices TMP36 sensor used in this assembly is [-40; 125]ºC, has an offset of 0.5V (the sensor outputs 0.5V when it's at 0ºC), and the scale factor is 10mV/ºC [1].
#
# <img src="https://github.com/PIA-Group/ScientIST-notebooks/blob/master/_Resources/Images/A.Signal_Acquisition_IMG/a013/Setup TMP36.png?raw=true" width="600" border="0">
#
# TMP36 sensor. GND: 0V; 3V3: 3.3V power supply; A: Analog output.
#
# <div style="background:#fe9b29; font-family:'arial', monospace;text-align: center; padding: 10px 0; border-radius:10px; width:70%; margin:auto " >
# <span style="font-size:20px;position:relative;color:white; "> Caution! </span> <br>
# <div style="background:#ffdab0;font-size:12px">
# BE CAREFUL when connecting the temperature sensor to the breadboard; it has a specific position and CAN BE DAMAGED IF WRONGLY CONNECTED. <b>Triple check</b> your wiring before powering up the system, and check if the sensor is overheating.
# </div>
# </div>
# +
# Example: 100 Hz; baud rate 9600bps; RAW data
fname = './data/L2.4-100Hz-9600bps-raw.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - Raw data', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('ADC', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# +
# Example: 100 Hz; baud rate 9600bps; converted data
fname = './data/L2.4-100Hz-9600bps-converted.csv'
raw = pl.loadtxt(fname, delimiter = ',')
col_1 = raw[:, 0]
col_2 = raw[:, 1]
plt.title('Example - Converted data', {'size':14})
plt.xlabel('Time (ms)', color = "#00a0e4")
plt.ylabel('$\degree$C', color = "#00a0e4")
plt.plot(col_1, col_2, color = "#00a0e4")
plt.show()
# -
# ## <div style="color:#00a0e4"> 3. References </div>
#
# 1. https://www.tinkercad.com/dashboard?type=circuits&collection=designs
# 2. https://www.analog.com/media/en/technical-documentation/data-sheets/TMP35_36_37.pdf
# 4. https://www.arduino.cc/reference/tr/language/functions/communication/serial/begin/
#
# <div style="width: 100%; ">
# <div style="background:#00a0e4;color:white;font-family:'arial', monospace; text-align: center; padding: 50px 0; border-radius:10px; height:10px; width:100%; float:left " >
# <span style="font-size:12px;position:relative; top:-25px"> Please provide us your feedback <span style="font-size:14px;position:relative;COLOR:WHITE"> <a href="https://forms.gle/C8TdLQUAS9r8BNJM8">here</a>.</span></span>
# <br>
# <span style="font-size:17px;position:relative; top:-20px"> Suggestions are welcome! </span>
# </div>
# ```Contributors: Prof. <NAME>; <NAME>; <NAME>```
| A.Signal_Acquisition/L2. Analog-to-Digital Conversion (ADC).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
import random
import torch
import math
class DynamicNet(torch.nn.Module):
def __init__(self):
"""
생성자에서 5개의 매개변수를 생성(instantiate)하고 멤버 변수로 지정합니다.
"""
super().__init__()
self.a = torch.nn.Parameter(torch.randn(()))
self.b = torch.nn.Parameter(torch.randn(()))
self.c = torch.nn.Parameter(torch.randn(()))
self.d = torch.nn.Parameter(torch.randn(()))
self.e = torch.nn.Parameter(torch.randn(()))
def forward(self, x):
"""
모델의 순전파 단계에서는 무작위로 4, 5 중 하나를 선택한 뒤 매개변수 e를 재사용하여
이 차수들의의 기여도(contribution)를 계산합니다.
각 순전파 단계는 동적 연산 그래프를 구성하기 떄문에, 모델의 순전파 단계를 정의할 때
반복문이나 조건문과 같은 일반적인 Python 제어-흐름 연산자를 사용할 수 있습니다.
여기에서 연산 그래프를 정의할 때 동일한 매개변수를 여러번 사용하는 것이 완벽히 안전하다는
것을 알 수 있습니다.
"""
y = self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
for exp in range(4, random.randint(4, 6)):
y = y + self.e * x ** exp
return y
def string(self):
"""
Python의 다른 클래스(class)처럼, PyTorch 모듈을 사용해서 사용자 정의 메소드를 정의할 수 있습니다.
"""
return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3 + {self.e.item()} x^4 ? + {self.e.item()} x^5 ?'
# 입력값과 출력값을 갖는 텐서들을 생성합니다.
x = torch.linspace(-math.pi, math.pi, 2000)
y = torch.sin(x)
# 위에서 정의한 클래스로 모델을 생성합니다.
model = DynamicNet()
# 손실 함수와 optimizer를 생성합니다. 이 이상한 모델을 순수한 확률적 경사하강법(SGD; Stochastic Gradient Descent)으로
# 학습하는 것은 어려우므로, 모멘텀(momentum)을 사용합니다.
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-8, momentum=0.9)
for t in range(30000):
# 순전파 단계: 모델에 x를 전달하여 예측값 y를 계산합니다.
y_pred = model(x)
# 손실을 계산하고 출력합니다.
loss = criterion(y_pred, y)
if t % 2000 == 1999:
print(t, loss.item())
# 변화도를 0으로 만들고, 역전파 단계를 수행하고, 가중치를 갱신합니다.
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Result: {model.string()}')
| gaze_project/pythons/torch_examples/.ipynb_checkpoints/torch_ex8-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="tRLe8VENOQM9" colab_type="text"
# # MSE loss
#
# The current mlpack implementation has not been used here. <br>
# Instead I have used the standardised format which I am using for other loss functions.
# + [markdown] id="yPWU9O3LO94x" colab_type="text"
# ### Imports and installation of mlpack
# + id="ZoQR5XuyPAb9" colab_type="code" colab={}
# %%capture
# !sudo apt-get install libmlpack-dev
import torch
import torch.nn as nn
# + [markdown] colab_type="text" id="y6sYXvFs4H2k"
# ### PyTorch
# + [markdown] colab_type="text" id="YBHd3wIt4H2l"
# #### None Reduction
#
# + colab_type="code" id="tbvKldxa4H2m" outputId="87414d14-f4a6-4c34-e86f-234702f2305f" colab={"base_uri": "https://localhost:8080/", "height": 381}
loss = torch.nn.MSELoss(reduction='none')
input = torch.tensor([[-0.0494, 1.6028, 0.9639],
[-1.1958, 0.0737, 0.9648],
[-1.0486, -0.7091, 0.0745],
[-0.2121, 0.8612, 0.5924]], requires_grad=True)
target = torch.tensor([[ 0.4316, 0.5106, 0.7059],
[ 0.0164, 0.9255, -0.8288],
[-0.4478, 0.5571, -0.0231],
[ 1.1452, 0.0864, -1.0526]])
output = loss(input, target)
output.backward(torch.ones(input.shape))
print("Input : ")
print(input)
print("Target : ")
print(target)
print("FORWARD : ")
print("Loss : ")
print(output)
print("BACKWARD : ")
print(input.grad)
# + [markdown] colab_type="text" id="2eAofEln4H2p"
# #### Sum Reduction
# + colab_type="code" id="alko8BnL4H2p" outputId="1463791a-faa1-4ee7-ff1c-60ebaabb3a11" colab={"base_uri": "https://localhost:8080/", "height": 329}
loss = torch.nn.MSELoss(reduction='sum')
input = torch.tensor([[-0.0494, 1.6028, 0.9639],
[-1.1958, 0.0737, 0.9648],
[-1.0486, -0.7091, 0.0745],
[-0.2121, 0.8612, 0.5924]], requires_grad=True)
target = torch.tensor([[ 0.4316, 0.5106, 0.7059],
[ 0.0164, 0.9255, -0.8288],
[-0.4478, 0.5571, -0.0231],
[ 1.1452, 0.0864, -1.0526]])
output = loss(input, target)
output.backward()
print("Input : ")
print(input)
print("Target : ")
print(target)
print("FORWARD : ")
print("Loss : ")
print(output)
print("BACKWARD : ")
print(input.grad)
# + [markdown] colab_type="text" id="vJukNLu24H2s"
# #### Mean reduction
# + colab_type="code" id="_nlOMbXC4H2t" outputId="8d70b683-3dad-4c0f-bf6b-ee415bdd6631" colab={"base_uri": "https://localhost:8080/", "height": 329}
loss = torch.nn.MSELoss(reduction='mean')
input = torch.tensor([[-0.0494, 1.6028, 0.9639],
[-1.1958, 0.0737, 0.9648],
[-1.0486, -0.7091, 0.0745],
[-0.2121, 0.8612, 0.5924]], requires_grad=True)
target = torch.tensor([[ 0.4316, 0.5106, 0.7059],
[ 0.0164, 0.9255, -0.8288],
[-0.4478, 0.5571, -0.0231],
[ 1.1452, 0.0864, -1.0526]])
output = loss(input, target)
output.backward()
print("Input : ")
print(input)
print("Target : ")
print(target)
print("FORWARD : ")
print("Loss : ")
print(output)
print("BACKWARD : ")
print(input.grad)
# + [markdown] colab_type="text" id="h7fZHZDk4H2w"
# ### mlpack
#
# + [markdown] colab_type="text" id="20ckFsa14H2z"
# #### FORWARD AND BACKWARD - NEW FORMAT
#
# + colab_type="code" id="cFlCk9a64H20" colab={}
# %%capture
# %%writefile test.cpp
#include <iostream>
#include <armadillo>
using namespace std;
using namespace arma;
int main()
{
// Constructor
arma::mat x,y;
arma::mat weight;
x << -0.0494 << 1.6028 << 0.9639 << endr
<< -1.1958 << 0.0737 << 0.9648 << endr
<< -1.0486 << -0.7091 << 0.0745 << endr
<< -0.2121 << 0.8612 << 0.5924 << endr;
y << 0.4316 << 0.5106 << 0.7059 << endr
<< 0.0164 << 0.9255 << -0.8288 << endr
<< -0.4478 << 0.5571 << -0.0231 << endr
<< 1.1452 << 0.0864 << -1.0526 << endr;
// Forward
arma::mat loss_none = arma::square(x - y);
double loss_sum = arma::accu(loss_none);
double loss_mean = loss_sum / x.n_elem;
// Backward
arma::mat output;
output = 2 * (x - y);
// Display
cout << "------------------------------------------------------------------" << endl;
cout << "USER-PROVIDED MATRICES : " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "Input shape : "<< x.n_rows << " " << x.n_cols << endl;
cout << "Input : " << endl << x << endl;
cout << "Target shape : "<< y.n_rows << " " << y.n_cols << endl;
cout << "Target : " << endl << y << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "SUM " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "FORWARD : " << endl;
cout << "Loss : \n" << loss_none << '\n';
cout << "Loss (sum):\n" << loss_sum << '\n';
cout << "BACKWARD : " << endl;
cout << "Output shape : "<< output.n_rows << " " << output.n_cols << endl;
cout << "Output (sum) : " << endl << output << endl;
cout << "Sum of all values in this matrix : " << arma::as_scalar(arma::accu(output)) << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "MEAN " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "FORWARD : " << endl;
cout << "Loss (mean):\n" << loss_mean << '\n';
cout << "BACKWARD : " << endl;
cout << "Output shape : "<< output.n_rows << " " << output.n_cols << endl;
cout << "Output (mean) : " << endl << output / x.n_elem << endl;
cout << "Sum of all values in this matrix : " << arma::as_scalar(arma::accu(output / x.n_elem)) << endl;
cout << "------------------------------------------------------------------" << endl;
return 0;
}
# + id="xHT34eorQquz" colab_type="code" outputId="607dfdc5-0d70-4371-b905-8d5388543577" colab={"base_uri": "https://localhost:8080/", "height": 953} magic_args="bash" language="script"
# g++ test.cpp -o test -larmadillo && ./test
| loss_functions/MSE_Loss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -
# # ScaNN Demo with GloVe Dataset
# +
import numpy as np
import h5py
import os
import requests
import tempfile
import time
import scann
# -
# ### Download dataset
with tempfile.TemporaryDirectory() as tmp:
response = requests.get("http://ann-benchmarks.com/glove-100-angular.hdf5")
loc = os.path.join(tmp, "glove.hdf5")
with open(loc, 'wb') as f:
f.write(response.content)
glove_h5py = h5py.File(loc)
list(glove_h5py.keys())
dataset = glove_h5py['train']
queries = glove_h5py['test']
print(dataset.shape)
print(queries.shape)
# ### Create ScaNN searcher
normalized_dataset = dataset / np.linalg.norm(dataset, axis=1)[:, np.newaxis]
# configure ScaNN as a tree - asymmetric hash hybrid with reordering
# anisotropic quantization as described in the paper; see README
searcher = scann.ScannBuilder(normalized_dataset, 10, "dot_product").tree(
num_leaves=2000, num_leaves_to_search=100, training_sample_size=250000).score_ah(
2, anisotropic_quantization_threshold=0.2).reorder(100).create_pybind()
def compute_recall(neighbors, true_neighbors):
total = 0
for gt_row, row in zip(true_neighbors, neighbors):
total += np.intersect1d(gt_row, row).shape[0]
return total / true_neighbors.size
# ### ScaNN interface features
# +
# this will search the top 100 of the 2000 leaves, and compute
# the exact dot products of the top 100 candidates from asymmetric
# hashing to get the final top 10 candidates.
start = time.time()
neighbors, distances = searcher.search_batched(queries)
end = time.time()
# we are given top 100 neighbors in the ground truth, so select top 10
print("Recall:", compute_recall(neighbors, glove_h5py['neighbors'][:, :10]))
print("Time:", end - start)
# +
# increasing the leaves to search increases recall at the cost of speed
start = time.time()
neighbors, distances = searcher.search_batched(queries, leaves_to_search=150)
end = time.time()
print("Recall:", compute_recall(neighbors, glove_h5py['neighbors'][:, :10]))
print("Time:", end - start)
# +
# increasing reordering (the exact scoring of top AH candidates) has a similar effect.
start = time.time()
neighbors, distances = searcher.search_batched(queries, leaves_to_search=150, pre_reorder_num_neighbors=250)
end = time.time()
print("Recall:", compute_recall(neighbors, glove_h5py['neighbors'][:, :10]))
print("Time:", end - start)
# +
# we can also dynamically configure the number of neighbors returned
# currently returns 10 as configued in ScannBuilder()
neighbors, distances = searcher.search_batched(queries)
print(neighbors.shape, distances.shape)
# now returns 20
neighbors, distances = searcher.search_batched(queries, final_num_neighbors=20)
print(neighbors.shape, distances.shape)
# +
# we have been exclusively calling batch search so far; the single-query call has the same API
start = time.time()
neighbors, distances = searcher.search(queries[0], final_num_neighbors=5)
end = time.time()
print(neighbors)
print(distances)
print("Latency (ms):", 1000*(end - start))
| scann/docs/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lasso Regression with PySpark
# This notebook creates and measures a LASSO regression model using sklearn.
#
# * Method: LASSO regression
# * Dataset: MLlib Ridge Data
# ## Imports
# +
# Python core libs
from os import getlogin, path, environ
import numpy as np
# Set SPARK_HOME
environ["SPARK_HOME"] = "/home/students/spark-2.2.0"
# Findspark
import findspark
findspark.init()
# PySpark and PySpark SQL
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import FloatType
import pyspark.sql.functions as F
# PySpark MLlib
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
import matplotlib.pyplot as plt
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# -
# ## Get Some Context
# Create a SparkContext and a SQLContext context to use
sc = SparkContext(appName="LASSO Regression with Spark")
sqlContext = SQLContext(sc)
# ## Load and Prepare the Data
# Data
DATA_FILE = "/home/students/data/mllib/ridge-data/lpsa.data"
# +
def parse_point(line):
'''
Returns a labeled point for the record.
A labeled point is a local vector, either dense or sparse, associated with a label/response.
'''
values = [float(x) for x in line.replace(',', ' ').split(' ')]
return LabeledPoint(values[0], values[1:])
data = sc.textFile(DATA_FILE)
parsed_data = data.map(parse_point)
# -
# Show a single record: LabeledPoint(label, [features])
parsed_data.take(1)
# ## Fit a Linear Regression Model with LASSO
#
# Train a linear regression model using [Stochastic Gradient Descent (SGD)](https://en.wikipedia.org/wiki/Stochastic_gradient_descent) and L1 (LASSO) regularization.
#
# * iterations: number of iterations. (default: 100)
# * step: step parameter used in SGD. (default: 1.0)
# * regParam: The regularizer parameter. (default: 0.0)
# * regType: the type of regularizer used for training our model. Supported values:
# * `l1` for using L1 regularization
# * `l2` for using L2 regularization
# * None for no regularization (default)
# Create an instance of a LinearRegressionWithSGD and train it on the RDD of LabeledPoints
model = LinearRegressionWithSGD.train(parsed_data, iterations=100, step=0.00000001, regType='l1')
# Intercept for the model
print('Estimated intercept coefficient: {}'.format(model.intercept))
# ## Create Predictions
values_and_predictions = parsed_data.map(lambda p: (p.label, model.predict(p.features)))
values_and_predictions.take(1)
# +
# Create a plot to compare the actuals (values) and predictions
vp_list = values_and_predictions.collect()
vp_list_prepared = [(x[0], float(x[1])) for x in vp_list]
values_predictions_df = sqlContext.createDataFrame(vp_list_prepared, ["actual", "predicted"])
actuals = values_predictions_df.rdd.map(lambda r: r.actual).collect()
predictions = values_predictions_df.rdd.map(lambda r: r.predicted).collect()
print("Min actual: {}".format(min(actuals)))
print("Mean actual: {}".format(np.mean(actuals)))
print("Max actual: {}\n".format(max(actuals)))
print("Min prediction: {}".format(min(predictions)))
print("Mean prediction: {}".format(np.mean(predictions)))
print("Max prediction: {}".format(max(predictions)))
fig = plt.figure(figsize=(20,10))
plt.scatter(actuals, predictions)
plt.xlabel("Actuals")
plt.ylabel("Predictions")
plt.title("Actuals vs. Predictiions")
plt.show()
# -
# ## Model Evaluation
# ### Mean Squared Error
#
# * A measure of the average magnitude of the errors without consideration for their direction; measures accuracy for continuous variables.
# * Always non-negative
# * Values closer to zero (0) are better
# +
# Calculate the Mean Squared Error
MSE = values_and_predictions \
.map(lambda vp: (vp[0] - vp[1])**2) \
.reduce(lambda x, y: x + y) / values_and_predictions.count()
print("Mean Squared Error = " + str(MSE))
# -
# ## Cleanup
sc.stop()
| code/day_5/4 - Lasso Regression with PySpark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Scrape Singapore venues
# +
import time
import requests
import simplejson as json
import pandas as pd
import numpy as np
import glob
import config ## Python file including FourSquare client ID and Secret ##
import os
# -
dir1 = './Q2_Downloaded_JSON_Files'
dir2 = './Q2_Downloaded_JSON_Files/per_venues/'
# ### If the directory does not exist, obtain venue list that matches the criteria by Explore Endpoint, and save as JSON files (1 JSON file for up to 50 venues)
# ### Reference: https://developer.foursquare.com/docs/api/venues/explore
# ###
#
# +
if not os.path.exists(dir1):
os.makedirs(dir1)
params_explore = {
'client_id': config.client_id
, 'client_secret': config.client_secret
#, 'll': '1.332592,103.84755399999995' # location of NS19
#, 'radius': '400' # radius 400 meters
, 'near': 'Singapore'
#, 'llAcc': '20'
, 'limit': '50'
#, 'intend': 'match' # Not available for explore
#, 'intent': 'match'
, 'v': '20180201'
}
params_explore_str = '&'.join(['='.join(i) for i in params_explore.items()])
section_list = ['food', 'drinks', 'coffee', 'shops', 'arts', 'outdoors', 'sights', 'trending', 'nextVenues', 'topPicks'] ## All
#section_list = ['food'] ## To extract only restaurants
url_base = 'https://api.foursquare.com/v2/venues/explore?' + params_explore_str
for section in section_list:
i = 0
time.sleep(3)
req = requests.get(url_base + '§ion=' + section + '&offset=' + str(i))
jdata = json.loads(req.text)
with open('./Q2_Downloaded_JSON_Files/explore_' + section + '_' + str(i)+'.json', 'w') as outfile:
json.dump(jdata, outfile)
if 'meta' in jdata:
print(jdata['meta'])
if 'response' in jdata:
if 'totalResults' in jdata['response']:
num_venue = jdata['response']['totalResults']
print('total number of venues of', section, ': ', num_venue)
while i < num_venue:
if i > 0:
time.sleep(3)
req = requests.get(url_base + '§ion=' + section + '&offset=' + str(i))
jdata = json.loads(req.text)
with open('./Q2_Downloaded_JSON_Files/explore_' + section + '_' + str(i)+'.json', 'w') as outfile:
json.dump(jdata, outfile)
i += 50
# -
# ### Extract venue info from the downloaded JSON files, and store in a Data Frame. ###
for json_file in glob.glob('./Q2_Downloaded_JSON_Files/explore_*.json'):
section = json_file[35:-5].split('_')[0]
print(section)
# +
df = pd.DataFrame()
df.index.name = 'Venue ID'
for json_file in glob.glob('./Q2_Downloaded_JSON_Files/explore_*.json'):
section = json_file[35:-5].split('_')[0]
#print(section)
with open(json_file,'r') as json_data:
jdata = json.load(json_data)
if 'response' in jdata:
#print(json_file)
if 'groups' in jdata['response']:
groups = jdata['response']['groups']
for group in groups:
if 'items' in group:
items = group['items']
for item in items:
if 'venue' in item:
venue = item['venue']
if 'id' in venue:
venue_id = venue['id']
#print(venue_id)
if 'name' in venue:
df.loc[venue_id, 'Venue Name'] = venue['name']
df.loc[venue_id, 'Section'] = section
if 'categories' in venue:
venue_categories = venue['categories']
venue_category_list = []
for venue_category in venue_categories:
venue_category_list.append(venue_category['name'])
#df.loc[venue_id, 'Primary Category'] = ''
if 'primary' in venue_category and venue_category['primary']:
df.loc[venue_id, 'Primary Category'] = venue_category['name']
df.loc[venue_id, 'Categories'] = '|'.join(venue_category_list)
#df.loc[venue_id, 'Venue Category'] = venue_categories[0]['name']
if 'rating' in venue:
df.loc[venue_id, 'Rating'] = venue['rating']
if 'ratingSignals' in venue:
df.loc[venue_id, 'Rating Signals'] = venue['ratingSignals']
if 'stats' in venue:
if 'checkinsCount' in venue['stats']:
df.loc[venue_id, 'Checkins Count'] = venue['stats']['checkinsCount']
if 'usersCount' in venue['stats']:
df.loc[venue_id, 'Users Count'] = venue['stats']['usersCount']
if 'tipCount' in venue['stats']:
df.loc[venue_id, 'Comment Count'] = venue['stats']['tipCount']
if 'photos' in venue:
if 'count' in venue['photos']:
df.loc[venue_id, 'Photos Count'] = venue['photos']['count']
if 'location' in venue:
if 'address' in venue['location']:
df.loc[venue_id, 'Address'] = venue['location']['address']
if 'lat' in venue['location']:
df.loc[venue_id, 'Lat'] = venue['location']['lat']
if 'lng' in venue['location']:
df.loc[venue_id, 'Lng'] = venue['location']['lng']
if 'postalCode' in venue['location']:
df.loc[venue_id, 'Postal Code'] = venue['location']['postalCode']
if 'contact' in venue:
if 'twitter' in venue['contact']:
df.loc[venue_id, 'Twitter'] = venue['contact']['twitter']
if 'facebookUsername' in venue['contact']:
df.loc[venue_id, 'Facebook Username'] = venue['contact']['facebookUsername']
if 'url' in venue:
df.loc[venue_id, 'URL'] = venue['url']
if 'verified' in venue:
df.loc[venue_id, 'Verified'] = venue['verified']
print('Number of venues: ', len(df))
display(df)
# -
# ### If the directory does not exist, send request of "Tips" (add "/tips" before "?" in the URL) to obtain tips .
# ### Reference: https://developer.foursquare.com/docs/api/venues/tips
# ###
if not os.path.exists(dir2):
os.makedirs(dir2)
params_tips = {
'client_id': config.client_id
, 'client_secret': config.client_secret
, 'v': '20180201'
, 'sort': 'recent'
, 'limit': '500'
}
params_tips_str = '&'.join(['='.join(i) for i in params_tips.items()])
for venue_id in df.index:
time.sleep(0.75)
#req = requests.get('https://api.foursquare.com/v2/venues/' + venue_id + '?' + params_tips_str)
req = requests.get('https://api.foursquare.com/v2/venues/' + venue_id + '/tips' + '?' + params_tips_str)
jdata = json.loads(req.text)
with open('./Q2_Downloaded_JSON_Files/per_venues/tips_venue_id_'+ venue_id +'.json', 'w') as outfile:
json.dump(jdata, outfile)
if 'meta' in jdata:
print('Venue ID: ', venue_id, jdata['meta'])
# ### Read the downloaded JSON files for each venue and Tips Count and Tips to Data Frame ###
# +
tips_df = pd.DataFrame()
tips_df.index.name = 'Tip ID'
for json_file in glob.glob('./Q2_Downloaded_JSON_Files/per_venues/tips_venue_id_*.json'):
venue_id = json_file[52:-5]
#print(venue_id)
with open(json_file,'r') as json_data:
jdata = json.load(json_data)
comment_list = []
if 'response' in jdata:
if 'tips' in jdata['response']:
tips = jdata['response']['tips']
if 'count' in tips:
comments_count = tips['count']
df.loc[venue_id,'Comments Count'] = comments_count
if 'items' in tips:
for item in tips['items']:
if 'id' in item:
tip_id = item['id']
tips_df.loc[tip_id, 'Venue ID'] = venue_id
if 'createdAt' in item:
tips_df.loc[tip_id, 'Created At'] = item['createdAt']
if 'text' in item:
tips_df.loc[tip_id, 'Comment'] = item['text']
comment_list.append(item['text'])
if 'agreeCount' in item:
tips_df.loc[tip_id, 'Agree Count'] = item['agreeCount']
if 'disagreeCount' in item:
tips_df.loc[tip_id, 'Disagree Count'] = item['disagreeCount']
if 'likes' in item:
if 'count' in item['likes']:
tips_df.loc[tip_id, 'Likes Count'] = item['likes']['count']
if 'user' in item:
user = item['user']
if 'id' in user:
tips_df.loc[tip_id, 'User ID'] = user['id']
if 'firstName' in user:
tips_df.loc[tip_id, 'User First Name'] = user['firstName']
if 'lastName' in user:
tips_df.loc[tip_id, 'User Last Name'] = user['lastName']
if 'gender' in user:
tips_df.loc[tip_id, 'User Gender'] = user['gender']
#df.loc[venue_id,'Comments'] = ' ||| '.join(comment_list)
display(tips_df)
# + active=""
# display(df)
# -
# ### Calculate Sentiment Score for the venue comments by VADER Sentiment Analysis ###
# + active=""
# from nltk.sentiment.vader import SentimentIntensityAnalyzer
#
# senti = SentimentIntensityAnalyzer()
# df.loc[:,'Compound Score Based on Comments'] = df.loc[:,'Comments'].apply(lambda x: senti.polarity_scores(x)['compound'])
# + active=""
# df
# -
# ### (1) What are the 5 most commented venues in Singapore
df.sort_values(by='Comments Count', ascending=False)[:5]
# ### (2) What are the 5 best venues in Singapore, by looking at the overall ratings?
#
# + active=""
# df.sort_values(by='Rating', ascending=False)[:5]
# -
# ### (3) What are the 5 best venues in Singapore, by judging from the comments?
# + active=""
# df.sort_values(by='Compound Score Based on Comments', ascending=False)[:5]
# -
# ### Save the Data Frames as csv files.
# +
tips_df.index.name = 'Comment ID'
df.to_csv('./Venues.csv')
tips_df.to_csv('./User_Comments.csv')
# -
# w = pd.ExcelWriter('./Foursquare.xlsx')
# sheetname = 'Venues'
# df.to_excel(w, sheetname)
# sheetname = 'User_Comments'
# tips_df.to_excel(w, sheetname)
# w.save()
# +
tips_df.index.name = 'Comment ID'
df.to_csv('./Venues.tsv', sep = '\t')
tips_df.to_csv('./User_Comments.tsv', sep = '\t')
# -
| examples/Scrape_Foursquare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
# %load_ext autoreload
# %autoreload 2
# +
# default_exp define_models
# -
# # Define Models
#
# > This module contains helper functions to define a DGLM, DCMM, or DBCM. They use the first several observations of a time series to initialize the model by treating them as static generalized linear models. These functions are called automatically when running a PyBATS analysis.
# +
#hide
#exporti
import numpy as np
from pybats_nbdev.dbcm import dbcm
from pybats_nbdev.dcmm import dcmm
from pybats_nbdev.dglm import dlm, pois_dglm, bern_dglm, bin_dglm
import statsmodels.api as sm
# -
# ## Define a DGLM
#export
def define_dglm(Y, X, family="normal", n=None,
ntrend=1, nlf=0, nhol=0,
seasPeriods=[7], seasHarmComponents = [[1, 2, 3]],
deltrend = .995, delregn =.995, delseas = .999, dellf=.999, delVar = 0.999, delhol=1,
n0 = 1, s0 = 1,
a0=None, R0=None,
adapt_discount='info', discount_forecast=False,
prior_length=None, return_aR=False,
**kwargs):
"""
A helper function to define a DGLM.
This function is especially useful if you do not know how to specifify a prior mean and variance (a0, R0) for the state vector.
"""
if a0 is None or R0 is None:
# Inferring the number of observations to use for the prior
if prior_length is not None:
n_obs = prior_length
else:
n_obs = len(Y)
# Adding an intercept to the X matrix, if it doesn't already exist
if X is None and ntrend >= 1:
X_withintercept = np.ones([n_obs, 1])
elif ntrend >= 1:
if len(X.shape) == 1:
X = X.reshape(-1,1)
if not np.all(X[:,0] == 1):
X_withintercept = np.c_[np.ones([n_obs, 1]), X[:n_obs]]
else:
X_withintercept = X[:n_obs]
# Selecting only the correct number of observations (relevant if prior_length is given)
Y = Y[:n_obs]
if n is not None:
n = n[:n_obs]
# Infer the number of regression and holiday components
nregn = ncol(X_withintercept) - nhol - 1
nseas = 2 * sum(map(len, seasHarmComponents))
# Learn a prior based on the first 'prior_length' observations
if family == "normal":
prior_mean, prior_cov, p = define_dlm_params(Y, X_withintercept)
elif family == "poisson":
prior_mean, prior_cov, p = define_pois_params(Y, X_withintercept)
elif family == "bernoulli":
prior_mean, prior_cov, p = define_bern_params(Y, X_withintercept)
elif family == "binomial":
prior_mean, prior_cov, p = define_bin_params(Y, n, X_withintercept)
# Define a standard prior - setting latent factor priors at 1
# Unless prior mean (a0) and prior variance (R0) are supplied as arguments
prior = [[prior_mean[0]], [0] * (ntrend - 1), [*prior_mean[1:]], [0] * nseas, [1] * nlf]
if a0 is None:
a0 = np.array([m for ms in prior for m in ms]).reshape(-1, 1)
if R0 is None:
R0 = np.identity(a0.shape[0])
idx = [i for i in range(p + ntrend - 1)]
for j in range(1, ntrend):
idx.pop(j)
R0[np.ix_(idx, idx)] = prior_cov
# Add variance to holiday indicators - few observations, may be significantly different than other days
ihol = range(ntrend + nregn, ntrend + nregn + nhol)
for idx in ihol:
R0[idx, idx] = R0[idx, idx] * 2
else:
# Infer the number of regression and holiday components
p = len(a0)
nseas = 2 * sum(map(len, seasHarmComponents))
nregn = p - ntrend - nhol - nseas - nlf
if return_aR:
return a0, R0, nregn
if kwargs.get('rho') is not None:
rho = kwargs.get('rho')
else:
rho = 1
if family == "normal":
mod = dlm(a0=a0, R0=R0,
nregn=nregn,
ntrend=ntrend,
nlf=nlf,
nhol=nhol,
seasPeriods=seasPeriods,
seasHarmComponents=seasHarmComponents,
deltrend=deltrend, delregn=delregn,
delseas=delseas, delhol=delhol,
dellf=dellf,
n0=n0, s0=s0, delVar=delVar,
adapt_discount=adapt_discount,
discount_forecast = discount_forecast)
elif family == "poisson":
mod = pois_dglm(a0=a0, R0=R0,
nregn=nregn,
ntrend=ntrend,
nlf=nlf,
nhol=nhol,
seasPeriods=seasPeriods,
seasHarmComponents=seasHarmComponents,
deltrend=deltrend, delregn=delregn,
delseas=delseas, delhol=delhol,
dellf=dellf,
adapt_discount=adapt_discount,
discount_forecast = discount_forecast,
rho = rho)
elif family == "bernoulli":
mod = bern_dglm(a0=a0, R0=R0,
nregn=nregn,
ntrend=ntrend,
nlf=nlf,
nhol=nhol,
seasPeriods=seasPeriods,
seasHarmComponents=seasHarmComponents,
deltrend=deltrend, delregn=delregn,
delseas=delseas, delhol=delhol,
dellf=dellf,
adapt_discount=adapt_discount,
discount_forecast = discount_forecast,
rho=rho)
elif family == "binomial":
mod = bin_dglm(a0=a0, R0=R0,
nregn=nregn,
ntrend=ntrend,
nlf=nlf,
nhol=nhol,
seasPeriods=seasPeriods,
seasHarmComponents=seasHarmComponents,
deltrend=deltrend, delregn=delregn,
delseas=delseas, delhol=delhol,
dellf=dellf,
adapt_discount=adapt_discount,
discount_forecast = discount_forecast,
rho=rho)
return mod
# `define_dglm` is a simple helper function to initialize a model. The primary arguments to pass in to `define_dglm` are:
#
# - `Y` and `X`: The observations and predictors
# - `family`: The observation family, which can be 'poisson', 'bernoulli', 'normal', or 'binomial'
# - `prior_length`: The number of observations to use for defining the model
#
# You can pass in many other arguments, including anything accepted by `dglm.__init__`, such as the number of trend components, the seasonal components, and the number of holidays. The number of regression predictors, `nregn`, is automatically inferred from the number of columns in `X`.
#
# Below is a simple example of using `define_dglm` to initialize a Poisson DGLM. In most PyBATS use cases, this function is called indirectly through `analysis`. However, it can be useful to call directly to gain more control over customizing the model definition.
# +
import numpy as np
import pandas as pd
from pybats_nbdev.shared import load_sales_example
from pybats_nbdev.define_models import define_dglm
data = load_sales_example()
Y = data['Sales'].values
X = data['Advertising'].values
mod = define_dglm(Y, X,
family='poisson',
prior_length=21,
ntrend=1,
nhol=0,
seasPeriods=[7],
seasHarmComponents=[[1,2,3]]
)
mod.get_coef()
# -
#export
def define_dlm_params(Y, X=None):
n = len(Y)
p = ncol(X)
g = max(2, int(n / 2))
linear_mod = sm.OLS(Y, X).fit()
dlm_mean = linear_mod.params
dlm_cov = fill_diag((g / (1 + g)) * linear_mod.cov_params())
return dlm_mean, dlm_cov, p
#export
def define_bern_params(Y, X=None):
n = len(Y)
p = ncol(X)
nonzeros = Y.nonzero()[0]
g = max(2, int(n/2))
try:
Y_bern = np.c_[np.zeros([n, 1]), np.ones([n, 1])]
Y_bern[Y.nonzero()[0], 0] = 1
Y_bern[Y.nonzero()[0], 1] = 0
bern_mod = sm.GLM(endog=Y_bern, exog=X, family=sm.families.Binomial()).fit()
bern_params = bern_mod.params
bern_cov = fill_diag((g/(1+g))*bern_mod.cov_params())
except:
if len(nonzeros) > 0:
bernmean = len(nonzeros) / (n + 1)
bernmean = np.log(bernmean / (1 - bernmean))
bern_params = np.zeros(p)
bern_params[0] = bernmean
else:
bern_params = np.zeros(p)
bern_cov = np.identity(p)
return bern_params, bern_cov, p
#export
def define_bin_params(Y, n, X=None):
n_obs = len(Y)
p = ncol(X)
g = max(2, int(n_obs / 2))
try:
bin_mod = sm.GLM(endog=np.c_[Y, n], exog=X, family=sm.families.Binomial()).fit()
bin_params = bin_mod.params
bin_cov = fill_diag((g/(1+g))*bin_mod.cov_params())
except:
if np.sum(Y) > 0:
binmean = np.sum(Y) / np.sum(n)
binmean = np.log(binmean / (1 - binmean))
bin_params = np.zeros(p)
bin_params[0] = binmean
else:
bin_params = np.zeros(p)
bin_params[0] = np.max([-3, -np.sum(n)])
bin_cov = np.identity(p)
return bin_params, bin_cov, p
#export
def define_pois_params(Y, X=None):
n = len(Y)
p = ncol(X)
g = max(2, int(n/2))
try:
pois_mod = sm.GLM(Y, X,
family=sm.families.Poisson()).fit()
pois_params = pois_mod.params
pois_cov = fill_diag((g/(1+g))*pois_mod.cov_params())
except:
pois_params = np.zeros(p)
pois_cov = np.identity(p)
return pois_params, pois_cov, p
# These functions are the core of `define_dglm`. They take the first `prior_length` data points and fit a static, non-dynamic, generalized linear model (GLM), to initialize the DGLM.
#exporti
def ncol(x):
if x is None:
return 0
if len(np.shape(x)) == 1:
return 1
else:
return np.shape(x)[1]
#exporti
def fill_diag(cov):
diag = cov.diagonal().copy()
diag[diag == 0] = 1
np.fill_diagonal(cov, diag)
return cov
# ## Define Combinations of DGLMs
#export
def define_dcmm(Y, X,
ntrend=1, nlf=0, nhol = 0, rho=1,
seasPeriods = [7], seasHarmComponents = [[1,2,3]],
deltrend_bern=.995, delregn_bern=.995, delseas_bern=.995, dellf_bern=.999, delhol_bern=1,
deltrend_pois=.998, delregn_pois=.995, delseas_pois=.995, dellf_pois=.999, delhol_pois=1,
a0_bern = None, R0_bern = None, a0_pois = None, R0_pois = None,
interpolate=True, adapt_discount=False, prior_length = None,
**kwargs):
"""
A helper function to define a DCMM.
"""
nonzeros = Y.nonzero()[0]
pois_mod = define_dglm(Y[nonzeros] - 1, X[nonzeros], family="poisson", ntrend=ntrend, nlf=nlf, nhol=nhol,
seasPeriods=seasPeriods, seasHarmComponents=seasHarmComponents,
a0=a0_pois, R0=R0_pois, prior_length=prior_length)
bern_mod = define_dglm(Y, X, family="bernoulli", ntrend=ntrend, nlf=nlf, nhol=nhol,
seasPeriods=seasPeriods, seasHarmComponents=seasHarmComponents,
a0=a0_bern, R0=R0_bern, prior_length=prior_length)
mod = dcmm(a0_bern = bern_mod.a, R0_bern = bern_mod.R,
nregn_bern = bern_mod.nregn_exhol,
ntrend_bern = bern_mod.ntrend,
nlf_bern= bern_mod.nlf,
nhol_bern=bern_mod.nhol,
seasPeriods_bern = bern_mod.seasPeriods,
seasHarmComponents_bern = bern_mod.seasHarmComponents,
deltrend_bern = deltrend_bern, delregn_bern = delregn_bern,
delseas_bern = delseas_bern,
dellf_bern=dellf_bern,
delhol_bern = delhol_bern,
a0_pois = pois_mod.a, R0_pois = pois_mod.R,
nregn_pois = pois_mod.nregn_exhol,
ntrend_pois = pois_mod.ntrend,
nlf_pois=pois_mod.nlf,
nhol_pois=pois_mod.nhol,
seasPeriods_pois = pois_mod.seasPeriods,
seasHarmComponents_pois = pois_mod.seasHarmComponents,
deltrend_pois = deltrend_pois, delregn_pois = delregn_pois,
delseas_pois = delseas_pois,
dellf_pois=dellf_pois,
delhol_pois = delhol_pois,
rho = rho,
interpolate=interpolate,
adapt_discount=adapt_discount
)
return mod
# Dynamic Count Mixture Models are the combination of a Bernoulli and Poisson DGLM. `define_dcmm` is a convenient wrapper to help initialize both components of the `dcmm` together, and is called automatically by `analysis_dcmm`.
#export
def define_dbcm(Y_transaction, X_transaction=None, Y_cascade=None, X_cascade=None, excess_baskets=[], excess_values=[],
ntrend=1, nlf=0, nhol=0, rho=1,
seasPeriods=[7], seasHarmComponents=[[1, 2, 3]],
deltrend_bern=.995, delregn_bern=.995, delseas_bern=.995, dellf_bern=.999, delhol_bern=1,
deltrend_pois=.998, delregn_pois=.995, delseas_pois=.995, dellf_pois=.999, delhol_pois=1,
deltrend_cascade=.999, delregn_cascade=1., delseas_cascade=.999, dellf_cascade=.999, delhol_cascade=1.,
a0_bern=None, R0_bern=None, a0_pois=None, R0_pois=None, a0_cascade=None, R0_cascade=None,
interpolate=True, adapt_discount=False, prior_length=None,
**kwargs):
"""
A helper function to define a DBCM.
"""
# Define the dcmm
mod_dcmm = define_dcmm(Y = Y_transaction, X = X_transaction,
ntrend=ntrend, nlf=nlf, nhol=nhol, rho=rho,
seasPeriods=seasPeriods, seasHarmComponents=seasHarmComponents,
deltrend_bern=deltrend_bern, delregn_bern=delregn_bern, delseas_bern=delseas_bern,
dellf_bern=dellf_bern, delhol_bern = delhol_bern,
deltrend_pois=deltrend_pois, delregn_pois=delregn_pois, delseas_pois=delseas_pois,
dellf_pois=dellf_pois, delhol_pois=delhol_pois,
a0_bern=a0_bern, R0_bern=R0_bern, a0_pois=a0_pois, R0_pois=R0_pois,
interpolate=interpolate, adapt_discount=adapt_discount, prior_length=prior_length)
# Calculate the prior means for the Cascade
def cascade_prior_mean(alpha, beta):
alpha += 1
beta += 1
mean = alpha / (alpha + beta)
logit_mean = np.log(mean / (1 - mean))
return logit_mean
# Calculate the prior means for the cascades
if prior_length is not None:
if prior_length > 0:
ncascade = Y_cascade.shape[1]
nregn_cascade = ncol(X_cascade)
ntrend_cascade = 1
pcascade = nregn_cascade + ntrend_cascade
Yc = np.c_[Y_transaction, Y_cascade]
nonan = ~np.any(np.isnan(Yc), axis=1)
Yc = np.sum(Yc[:prior_length][nonan[:prior_length]], axis=0)
means = [cascade_prior_mean(Yc[i + 1], Yc[i] - Yc[i + 1]) for i in range(ncascade)]
a0_cascade = [np.zeros(pcascade).reshape(-1, 1) for i in range(ncascade)]
for i, m in enumerate(means):
a0_cascade[i][0] = m
R0_cascade = [0.1 * np.identity(pcascade) for i in range(ncascade)]
# Initialize empirically observed excess baskets
excess = []
if len(excess_values) == 0 and len(excess_baskets) > 0:
counts = np.sum(excess_baskets[:prior_length, :], axis=0)
counts[:len(counts) - 1] = counts[:len(counts) - 1] - counts[1:]
for val, count in enumerate(counts):
excess.extend([val + ncascade + 1 for c in range(count)])
else:
for e in excess_values[:prior_length]:
excess.extend(e)
else:
if a0_cascade is None:
if kwargs.get('pcascade') is None:
nregn_cascade = ncol(X_cascade)
ntrend_cascade = 1
pcascade = nregn_cascade + ntrend_cascade
else:
pcascade = kwargs.get('pcascade')
if kwargs.get('ncascade') is None:
ncascade = Y_cascade.shape[1]
else:
ncascade = kwargs.get('ncascade')
a0_cascade = [np.zeros(pcascade).reshape(-1, 1) for i in range(ncascade)]
else:
nregn_cascade = len(a0_cascade) - 1
ntrend_cascade = 1
if R0_cascade is None:
if kwargs.get('pcascade') is None:
nregn_cascade = ncol(X_cascade)
ntrend_cascade = 1
pcascade = nregn_cascade + ntrend_cascade
else:
pcascade = kwargs.get('pcascade')
if kwargs.get('ncascade') is None:
ncascade = Y_cascade.shape[1]
else:
ncascade = kwargs.get('ncascade')
R0_cascade = [0.1 * np.identity(pcascade) for i in range(ncascade)]
excess = []
# Define the model
mod = dbcm(mod_dcmm=mod_dcmm,
ncascade=ncascade,
a0_cascade=a0_cascade, # List of length ncascade
R0_cascade=R0_cascade, # List of length ncascade
nregn_cascade=nregn_cascade,
ntrend_cascade=1,
nlf_cascade=0,
seasPeriods_cascade=[],
seasHarmComponents_cascade=[],
deltrend_cascade=deltrend_cascade, delregn_cascade=delregn_cascade,
delseas_cascade=delseas_cascade, dellf_cascade=dellf_cascade, delhol_cascade=delhol_cascade,
excess=excess)
return mod
# Dynamic Binary Cascade Models are the combination of a DCMM and a cascade of binomial DGLMs. `define_dbcm` is a convenient wrapper to help initialize all of the components of a `dbcm` together, and is called automatically by `analysis_dbcm`.
#hide
from nbdev.export import notebook2script
notebook2script()
| nbs/03_define_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/vigneshwaran-dev/CV-research-timeline/blob/main/VGG/vgg19.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="shQIYIt6JbcA"
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import SGD
# + [markdown] id="nMVPxhNGl-MI"
# Defining the Model as per the Original Paper
# + id="wNHRgxhVKRJ_"
model = Sequential();
# 1st Convolutional Block
model.add(Conv2D(input_shape=(224, 224, 3), filters=64, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 2nd Convolutional Block
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 3rd Convolutional Block
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 4th Convolutional Block
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 5th Convolutional Block
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 1st Dense Layer
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# 2nd Dense Layer
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1000))
model.add(Activation('softmax'))
# + colab={"base_uri": "https://localhost:8080/"} id="dgmKxaerN-OP" outputId="08148014-baf0-46b3-f89e-4850b6515288"
model.summary()
# + id="mPJ6al6pLn5k"
model.compile(loss=categorical_crossentropy,
optimizer=SGD(learning_rate=0.01),
metrics=['accuracy'])
# + [markdown] id="etzsyGSKmGn7"
# Considering the data to be present in TRAIN_DATA_LOCATION and VALIDATION_DATA_LOCATION directories and running them through data generators to perform live data augumentation during the training process
# + id="BSvuN4XSdzNI"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_dir = 'TRAIN_DATA_LOCATION'
valid_dir = 'VALIDATION_DATA_LOCATION'
BATCH_SIZE = 32
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1)
train_generator = train_datagen.flow_from_directory(train_dir,
target_size=(224, 224),
color_mode='rgb',
batch_size=BATCH_SIZE,
seed=1,
shuffle=True,
class_mode='categorical')
valid_datagen = ImageDataGenerator(rescale=1.0/255.0)
valid_generator = valid_datagen.flow_from_directory(valid_dir,
target_size=(224, 224),
color_mode='rgb',
batch_size=BATCH_SIZE,
seed=7,
shuffle=True,
class_mode='categorical')
train_num = train_generator.samples
# + [markdown] id="vCkprKt6mbm3"
# Training the Model
# + id="QlUCsMp-kzXJ"
import datetime
log_dir = 'logs/fit/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
callback_list = [tensorboard_callback]
model.fit(train_generator,
epochs=1,
steps_per_epoch=train_num // BATCH_SIZE,
validation_data=valid_generator,
validation_steps=valid_num // BATCH_SIZE,
callbacks=callback_list,
verbose=1)
model.save('vgg19.h5')
# + [markdown] id="5Bj7xKU4md-y"
# Visualizing the performance using Tensorboard
# + id="W6pvAAeylQiC"
# %load_ext tensorboard
# %tensorboard --logdir logs/fit
# + [markdown] id="QFxjcbSdmjtK"
# Prediction
# + id="MEhIFtiEmjMl"
x_valid, label_batch = next(iter(valid_generator))
prediction_values = model.predict_classes(x_valid)
print(prediction_values)
| VGG/vgg19.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="quHhEHDwJJay" outputId="fb3fcdbb-610e-49c3-eb6b-99100bfb151e"
from urllib.request import urlopen
html=urlopen('http://pythonscraping.com/pages/page1.html')
print(html.read())
# + colab={"base_uri": "https://localhost:8080/"} id="msVnqcLVPE6B" outputId="148a0ef6-ee4c-474b-cd60-76915798be04"
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
bs = BeautifulSoup(html.read())
print(bs)
# -
print(bs.h1)
print(bs.html.body.h1)
print(bs.body.h1)
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
# lxml Has more advantage When parsing messy HTML < NO body / head section . unclosed tags. More faster
bs = BeautifulSoup(html.read(),'lxml')
print(bs)
# ## Sometimes we need to understand what is really going on when scraping and we need to done all those stuff by our hand
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
# 2 things could go wrong
# page Not Found
# Server is not found
print(html)
# To handle the error we do this
from urllib.error import HTTPError
from urllib.error import URLError
try:
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
except HTTPError as e:
print(e)
except URLError as Ur:
print("Server could not be found")
else :
bs = BeautifulSoup(html.read(),"lxml")
try:
badContent = bs.nonExistingTag.h1
except AttributeError as ae :
print("Tag Was not Found")
else :
print(badContent)
# +
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
def getTitle(url):
try:
html = urlopen(url)
except HTTPError as e:
return None
try:
bs = BeautifulSoup(html.read(), 'lxml')
title = bs.body.h1
except AttributeError as ae:
return None
return title
title = getTitle("http://www.pythonscraping.com/pages/page1.html")
if (title == None):
print("Title Could not found")
else :
print(title)
# -
html = urlopen("http://www.pythonscraping.com/pages/page1.html")
bs = BeautifulSoup(html, "lxml")
print(bs)
# + colab={"base_uri": "https://localhost:8080/"} id="VgHA4a9nH77U" outputId="3baaaf93-c44e-4022-e1ab-d580d097f63c"
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/warandpeace.html")
bs = BeautifulSoup(html, "lxml")
nameListGreen = bs.findAll("span", {"class" : ["green"]})
storyListRed = bs.findAll("span" , {"class" : ["red"]})
for name in nameListGreen:
print(name.get_text())
for name in storyListRed:
print(name.get_text())
# -
alltags = set(tag.name for tag in bs.findAll())
print(alltags)
# +
random = bs.findAll(["body","div"])
for r in random:
print(r.getText()) # Without Tags
print(r) # With Tags
break
# -
hlist = bs.findAll(["h1","h2"])
for h in hlist:
print(h)
# + colab={"base_uri": "https://localhost:8080/"} id="vV4JqP2mLjG7" outputId="3be4010e-638c-47d0-f212-144e4422d861"
hList = bs.findAll(["h1","h2","h3","h4","h5","h6"])
for h in hList:
print(h)
# + colab={"base_uri": "https://localhost:8080/"} id="FFDixErCMvIW" outputId="27650dd5-bedf-4a73-9190-293fa87a8b46"
nameList =bs.findAll(["span"],text="the prince")
for name in nameList:
print(name)
nameList = bs.find(class_="red")
nameList2 = bs.findAll("span",{"class":"green"})
print(nameList)
print(nameList2)
# + [markdown] colab={"base_uri": "https://localhost:8080/"} id="Ju9jY0KiN4oo" outputId="5c4d2ead-d42b-44cb-e5ef-664234e0fe4c"
# Objects on Beautiful soup
# - BeautifulSoup
# - Tag Objects List or Single value depends on (findAll and find)
# - NavigableString objs < Represent text within tags >
# - Comment obj < Represent the Comments>
# + colab={"base_uri": "https://localhost:8080/"} id="aRjkKxckOrIv" outputId="8d73860d-2096-4b29-ea9b-e1918c4e265e"
# Navigating Treess
html = urlopen("http://www.pythonscraping.com/pages/page3.html")
bs = BeautifulSoup(html,"lxml")
print(bs.html.head)
# -
# In Beautiful Soup all treated as descendant , ya intinya descendant tuh kek Cucu dan seterunya lah
# example :
# ada ayah namanya badang
# anaknya rino
# rino nikah punya anak sambir
# nah si sambir ini disebut descendant
#
# bs.findAll juga sama makenya descendant juga
#contoh descendant
print(bs.h1)
print(bs.html.h1)
print(bs.body.h1)
print(bs.findAll('img'))
# find children
for child in bs.find("table", {"id" : "giftList"}).children:
print(child)
# +
# Dealing with siblings
# Jadi ini setiap masing masing masuk ke tr , lalu ambil seluruh child yang ada di tr
for sibling in bs.find("table", {"id" : "giftList"}).tr.next_siblings:
print(sibling)
print("\n\n")
# -
# Dealing with parents
# Cara bacanya adalah : Get si parent dari pada tag yang dicari terus navigasi , ya gunain previous sibling
for parent in bs.find("img",{"src" : "../img/gifts/img1.jpg"}).parent.previous_sibling.previous_sibling.next_sibling:
print(parent)
| .ipynb_checkpoints/WebScrapingBasicToAdvanced-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# **Introduction to model validation**
# ___
# - What is model validation?
# - Model validation consists of:
# - ensuring your model performs as expected on new data
# - testing model performance on holdout datasets
# - selecting the best model, parameters, and accuracy metrics
# - achieving the best accuracy for the data given
# - scikit-learn modeling review
# - Basic modeling steps
# - model = RandomForestRegressor(n_estimators=500, random_state=1111_
# - model.fit(X=X_train, y=y_train)
# - predictions = model.predict(X_test)
# - print("{0:.2f}".format(mae(y_true=y_test, y_pred=predictions)))
# - e.g., "10.84"
# - Mean Absolute Error
# - (sum |y true - y pred|) / n
# - this course uses 538's ultimate Holloween Candy Power ranking dataset
# - Seen vs. unseen data
# - training data = seen data
# - testing data = unseen data
# ___
# + pycharm={"name": "#%%\n"}
#Seen vs. unseen data
#Model's tend to have higher accuracy on observations they have seen
#before. In the candy dataset, predicting the popularity of Skittles
#will likely have higher accuracy than predicting the popularity of
#Andes Mints; Skittles is in the dataset, and Andes Mints is not.
#You've built a model based on 50 candies using the dataset X_train
#and need to report how accurate the model is at predicting the
#popularity of the 50 candies the model was built on, and the 35
#candies (X_test) it has never seen. You will use the mean absolute
#error, mae(), as the accuracy metric.
# The model is fit using X_train and y_train
#model.fit(X_train, y_train)
# Create vectors of predictions
#train_predictions = model.predict(X_train)
#test_predictions = model.predict(X_test)
# Train/Test Errors
#train_error = mae(y_true=y_train, y_pred=train_predictions)
#test_error = mae(y_true=y_test, y_pred=test_predictions)
# Print the accuracy for seen and unseen data
#print("Model error on seen data: {0:.2f}.".format(train_error))
#print("Model error on unseen data: {0:.2f}.".format(test_error))
#################################################
#<script.py> output:
# Model error on seen data: 3.28.
# Model error on unseen data: 11.07.
#################################################
#When models perform differently on training and testing data, you
#should look to model validation to ensure you have the best performing
#model. In the next lesson, you will start building models to validate.
# -
# **Regression models**
# ___
# - Random forests in scikit-learn
# - decision trees
# - mean prediction of decision trees = final value for observation
# - parameters
# - n_estimators: the number of trees in the forest
# - max_depth: tha maximum depth of the trees
# - random_state: random seed for reproducibility
# - feature importance
# - .feature_ importances_
# ___
# + pycharm={"name": "#%%\n"}
#Set parameters and fit a model
#Predictive tasks fall into one of two categories: regression or
#classification. In the candy dataset, the outcome is a continuous
#variable describing how often the candy was chosen over another
#candy in a series of 1-on-1 match-ups. To predict this value (the
#win-percentage), you will use a regression model.
#In this exercise, you will specify a few parameters using a random
#forest regression model rfr.
# Set the number of trees
#rfr.n_estimators = 100
# Add a maximum depth
#rfr.max_depth = 6
# Set the random state
#rfr.random_state = 1111
# Fit the model
#rfr.fit(X_train, y_train)
#################################################
#You have updated parameters after the model was initialized. This
#approach is helpful when you need to update parameters. Before
#making predictions, let's see which candy characteristics were most
#important to the model.
# + pycharm={"name": "#%%\n"}
#Feature importances
#Although some candy attributes, such as chocolate, may be extremely
#popular, it doesn't mean they will be important to model prediction.
#After a random forest model has been fit, you can review the model's
#attribute, .feature_importances_, to see which variables had the
#biggest impact. You can check how important each variable was in the
#model by looping over the feature importance array using enumerate().
#If you are unfamiliar with Python's enumerate() function, it can loop
#over a list while also creating an automatic counter.
# Fit the model using X and y
#rfr.fit(X_train, y_train)
# Print how important each column is to the model
#for i, item in enumerate(rfr.feature_importances_):
# Use i and item to print out the feature importance of each column
# print("{0:s}: {1:.2f}".format(X_train.columns[i], item))
#################################################
#<script.py> output:
# chocolate: 0.44
# fruity: 0.03
# caramel: 0.02
# peanutyalmondy: 0.05
# nougat: 0.01
# crispedricewafer: 0.03
# hard: 0.01
# bar: 0.02
# pluribus: 0.02
# sugarpercent: 0.17
# pricepercent: 0.19
#################################################
#No surprise here - chocolate is the most important variable.
#.feature_importances_ is a great way to see which variables were
#important to your random forest model.
# -
# **Classification models**
# ___
# - Categorical Responses
# - Tic-Tac-Toe dataset
# - .predict()
# - sparse array
# - .predict_proba()
# - .get_params()
# - .score(X_test, y_test)
# ___
# + pycharm={"name": "#%%\n"}
#Classification predictions
#In model validation, it is often important to know more about the
#predictions than just the final classification. When predicting
#who will win a game, most people are also interested in how likely
#it is a team will win.
#Probability Prediction Meaning
#0 < .50 0 Team Loses
#.50 + 1 Team Wins
#In this exercise, you look at the methods, .predict() and
#.predict_proba() using the tic_tac_toe dataset. The first method
#will give a prediction of whether Player One will win the game, and
#the second method will provide the probability of Player One winning.
#Use rfc as the random forest classification model.
# Fit the rfc model.
#rfc.fit(X_train, y_train)
# Create arrays of predictions
#classification_predictions = rfc.predict(X_test)
#probability_predictions = rfc.predict_proba(X_test)
# Print out count of binary predictions
#print(pd.Series(classification_predictions).value_counts())
# Print the first value from probability_predictions
#print('The first predicted probabilities are: {}'.format(probability_predictions[0]))
#################################################
#<script.py> output:
# 1 563
# 0 204
# dtype: int64
# The first predicted probabilities are: [0.26524423 0.73475577]
#################################################
#ou can see there were 563 observations where Player One was
#predicted to win the Tic-Tac-Toe game. Also, note that the
#predicted_probabilities array contains lists with only two values
#because you only have two possible responses (win or lose). Remember
#these two methods, as you will use them a lot throughout this course.
# + pycharm={"name": "#%%\n"}
#Reusing model parameters
#Replicating model performance is vital in model validation. Replication
#is also important when sharing models with co-workers, reusing models
#on new data or asking questions on a website such as Stack Overflow.
#You might use such a site to ask other coders about model errors,
#output, or performance. The best way to do this is to replicate your
#work by reusing model parameters.
#In this exercise, you use various methods to recall which parameters
#were used in a model.
#rfc = RandomForestClassifier(n_estimators=50, max_depth=6, random_state=1111)
# Print the classification model
#print(rfc)
# Print the classification model's random state parameter
#print('The random state is: {}'.format(rfc.random_state))
# Print all parameters
#print('Printing the parameters dictionary: {}'.format(rfc.get_params()))
#################################################
#<script.py> output:
# RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
# max_depth=6, max_features='auto', max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=50, n_jobs=None,
# oob_score=False, random_state=1111, verbose=0,
# warm_start=False)
# The random state is: 1111
# Printing the parameters dictionary: {'bootstrap': True, 'class_weight': None, 'criterion': 'gini', 'max_depth': 6, 'max_features': 'auto', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 50, 'n_jobs': None, 'oob_score': False, 'random_state': 1111, 'verbose': 0, 'warm_start': False}
#################################################
#Recalling which parameters were used will be helpful going forward.
#Model validation and performance rely heavily on which parameters
#were used, and there is no way to replicate a model without keeping
#track of the parameters used!
# + pycharm={"name": "#%%\n"}
#Random forest classifier
#This exercise reviews the four modeling steps discussed throughout
#this chapter using a random forest classification model. You will:
#Create a random forest classification model.
#Fit the model using the tic_tac_toe dataset.
#Make predictions on whether Player One will win (1) or lose (0) the current game.
#Finally, you will evaluate the overall accuracy of the model.
#Let's get started!
#from sklearn.ensemble import RandomForestClassifier
# Create a random forest classifier
#rfc = RandomForestClassifier(n_estimators=50, max_depth=6, random_state=1111)
# Fit rfc using X_train and y_train
#rfc.fit(X_train, y_train)
# Create predictions on X_test
#predictions = rfc.predict(X_test)
#print(predictions[0:5])
#################################################
#<script.py> output:
# [1 1 1 1 1]
#################################################
# Print model accuracy using score() and the testing data
#print(rfc.score(X_test, y_test))
#################################################
#<script.py> output:
# 0.817470664928292
#################################################
#Notice the first five predictions were all 1, indicating that
#Player One is predicted to win all five of those games. You also
#see the model accuracy was only 82%.
#Let's move on to Chapter 2 and increase our model validation toolbox
#by learning about splitting datasets, standard accuracy metrics, and
#the bias-variance tradeoff.
# -
# **Creating train, test, and validation datasets**
# ___
# - Ratio Examples
# - 80:20
# - 90:10
# - used when we have little data
# - 70:30
# - used when model is computationally expensive
# - test_size
# - train_size
# - random_state
# - validation samples are used when testing different parameters
# - it is a holdout sample taken from the training sample
# ___
# + pycharm={"name": "#%%\n"}
#Create one holdout set
#Your boss has asked you to create a simple random forest model on the
#tic_tac_toe dataset. She doesn't want you to spend much time selecting
#parameters; rather she wants to know how well the model will perform
#on future data. For future Tic-Tac-Toe games, it would be nice to know
#if your model can predict which player will win.
#The dataset tic_tac_toe has been loaded for your use.
#Note that in Python, =\ indicates the code was too long for one line
#and has been split across two lines.
# Create dummy variables using pandas
#X = pd.get_dummies(tic_tac_toe.iloc[:, 0:9])
#y = tic_tac_toe.iloc[:, 9]
# Create training and testing datasets. Use 10% for the test set
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=1111)
#################################################
#Remember, without the holdout set, you cannot truly validate a model.
#Let's move on to creating two holdout sets.
# + pycharm={"name": "#%%\n"}
#Create two holdout sets
#You recently created a simple random forest model to predict Tic-Tac-Toe
#game wins for your boss, and at her request, you did not do any
#parameter tuning. Unfortunately, the overall model accuracy was too
#low for her standards. This time around, she has asked you to focus
#on model performance.
#Before you start testing different models and parameter sets, you
#will need to split the data into training, validation, and testing
#datasets. Remember that after splitting the data into training and
#testing datasets, the validation dataset is created by splitting the
#training dataset.
#The datasets X and y have been loaded for your use.
# Create temporary training and final testing datasets
#X_temp, X_test, y_temp, y_test =\
# train_test_split(X, y, test_size=0.20, random_state=1111)
# Create the final training and validation datasets
#X_train, X_val, y_train, y_val =\
# train_test_split(X_temp, y_temp, test_size=0.25, random_state=1111)
#################################################
#You now have training, validation, and testing datasets, but do you
#know when you need both validation and testing datasets?
#When testing parameters, tuning hyper-parameters, or anytime you are
#frequently evaluating model performance.
# -
# **Accuracy metrics: regression models**
# ___
# - Mean Absolute Error (MAE)
# - most intuitive
# - Mean Squared Error (MSE)
# - same as MAE except the difference term is squared
# - allows outlier errors to contribute more to the overall error
# - most widely used
# - MAE vs MSE
# - accuracy metrics are always application specific
# - MAE and MSE error terms are in different units and should not be compared
# ___
# + pycharm={"name": "#%%\n"}
#Mean absolute error
#Communicating modeling results can be difficult. However, most clients
#understand that on average, a predictive model was off by some number.
#This makes explaining the mean absolute error easy. For example, when
#predicting the number of wins for a basketball team, if you predict 42,
#and they end up with 40, you can easily explain that the error was
#two wins.
#In this exercise, you are interviewing for a new position and are
#provided with two arrays. y_test, the true number of wins for all
#30 NBA teams in 2017 and predictions, which contains a prediction
#for each team. To test your understanding, you are asked to both
#manually calculate the MAE and use sklearn.
#from sklearn.metrics import mean_absolute_error
# Manually calculate the MAE
#n = len(predictions)
#mae_one = sum(abs(y_test - predictions)) / n
#print('With a manual calculation, the error is {}'.format(mae_one))
# Use scikit-learn to calculate the MAE
#mae_two = mean_absolute_error(y_test, predictions)
#print('Using scikit-lean, the error is {}'.format(mae_two))
#################################################
#<script.py> output:
# With a manual calculation, the error is 5.9
# Using scikit-lean, the error is 5.9
#################################################
#These predictions were about six wins off on average. This isn't
#too bad considering NBA teams play 82 games a year. Let's see how
#these errors would look if you used the mean squared error instead.
# + pycharm={"name": "#%%\n"}
#Mean squared error
#Let's focus on the 2017 NBA predictions again. Every year, there are
#at least a couple of NBA teams that win way more games than expected.
#If you use the MAE, this accuracy metric does not reflect the bad
#predictions as much as if you use the MSE. Squaring the large errors
#from bad predictions will make the accuracy look worse.
#In this example, NBA executives want to better predict team wins.
#You will use the mean squared error to calculate the prediction
#error. The actual wins are loaded as y_test and the predictions as
#predictions.
#from sklearn.metrics import mean_squared_error
#n = len(predictions)
# Finish the manual calculation of the MSE
#mse_one = sum((y_test - predictions)**2) / n
#print('With a manual calculation, the error is {}'.format(mse_one))
# Use the scikit-learn function to calculate MSE
#mse_two = mean_squared_error(y_test, predictions)
#print('Using scikit-learn, the error is {}'.format(mse_two))
#################################################
#<script.py> output:
# With a manual calculation, the error is 49.1
# Using scikit-learn, the error is 49.1
#################################################
# If you run any additional models, you will try to beat an MSE of 49.1,
#which is the average squared error of using your model. Although the
#MSE is not as interpretable as the MAE, it will help us select a
#model that has fewer 'large' errors.
# + pycharm={"name": "#%%\n"}
#Performance on data subsets
#In professional basketball, there are two conferences, the East and
#the West. Coaches and fans often only care about how teams in their
#own conference will do this year.
#You have been working on an NBA prediction model and would like to
#determine if the predictions were better for the East or West
#conference. You added a third array to your data called labels,
#which contains an "E" for the East teams, and a "W" for the West.
#y_test and predictions have again been loaded for your use.
# Find the East conference teams
#east_teams = labels == "E"
# Create arrays for the true and predicted values
#true_east = y_test[east_teams]
#preds_east = predictions[east_teams]
# Print the accuracy metrics
#print('The MAE for East teams is {}'.format(
# mae(true_east, preds_east)))
#################################################
#<script.py> output:
# The MAE for East teams is 6.733333333333333
#################################################
# Print the West accuracy
#print('The MAE for West conference is {}'.format(west_error))
#################################################
#<script.py> output:
# <script.py> output:
# The MAE for West conference is 5.01
#################################################
#It looks like the Western conference predictions were about two
#games better on average. Over the past few seasons, the Western teams
#have generally won the same number of games as the experts have
#predicted. Teams in the East are just not as predictable as those
#in the West.
# -
# **Classification metrics**
# ___
# - Examples
# - **precision**
# - **recall**/sensitivity
# - **accuracy**
# - specificity
# - f-1 score and its variations
# - etc...
#
# - Confusion Matrix
# - confusion_matrix
# 
#
# - Accuracy
# - overall ability of a model to predict correct classification
# 
#
# - Precision
# - when we don't want to overpredict positive values
# - e.g., the number of invited interviewees who accept a position
# 
#
# - Recall
# - when we can't afford to miss any positive values
# - e.g., even if a patient has a small chance of having cancer
# 
# ___
# + pycharm={"name": "#%%\n"}
#Confusion matrices
#Confusion matrices are a great way to start exploring your model's
#accuracy. They provide the values needed to calculate a wide range
#of metrics, including sensitivity, specificity, and the F1-score.
#You have built a classification model to predict if a person has a
#broken arm based on an X-ray image. On the testing set, you have the
#following confusion matrix:
# Prediction: 0 Prediction: 1
#Actual: 0 324 (TN) 15 (FP)
#Actual: 1 123 (FN) 491 (TP)
# Calculate and print the accuracy
#accuracy = (491 + 324) / (953)
#print("The overall accuracy is {0: 0.2f}".format(accuracy))
# Calculate and print the precision
#precision = (491) / (491 + 15)
#print("The precision is {0: 0.2f}".format(precision))
# Calculate and print the recall
#recall = (491) / (491 + 123)
#print("The recall is {0: 0.2f}".format(recall))
#################################################
#<script.py> output:
# The overall accuracy is 0.86
# The precision is 0.97
# The recall is 0.80
#################################################
#In this case, a true positive is a picture of an actual broken arm
#that was also predicted to be broken. Doctors are okay with a few
#additional false positives (predicted broken, not actually broken),
#as long as you don't miss anyone who needs immediate medical attention.
# + pycharm={"name": "#%%\n"}
#Confusion matrices, again
#Creating a confusion matrix in Python is simple. The biggest challenge
#will be making sure you understand the orientation of the matrix. This
#exercise makes sure you understand the sklearn implementation of
#confusion matrices. Here, you have created a random forest model
#using the tic_tac_toe dataset rfc to predict outcomes of 0 (loss) or
#1 (a win) for Player One.
#Note: If you read about confusion matrices on another website or for
#another programming language, the values might be reversed.
#from sklearn.metrics import confusion_matrix
# Create predictions
#test_predictions = rfc.predict(X_test)
# Create and print the confusion matrix
#cm = confusion_matrix(y_test, test_predictions)
#print(cm)
# Print the true positives (actual 1s that were predicted 1s)
#print("The number of true positives is: {}".format(cm[1, 1]))
#################################################
#<script.py> output:
# [[177 123]
# [ 92 471]]
# The number of true positives is: 471
#################################################
#Row 1, column 1 represents the number of actual 1s that were predicted
#1s (the true positives). Always make sure you understand the orientation
#of the confusion matrix before you start using it!
# + pycharm={"name": "#%%\n"}
#Precision vs. recall
#The accuracy metrics you use to evaluate your model should always be
#based on the specific application. For this example, let's assume
#you are a really sore loser when it comes to playing Tic-Tac-Toe,
#but only when you are certain that you are going to win.
#Choose the most appropriate accuracy metric, either precision or
#recall, to complete this example. But remember, if you think you
#are going to win, you better win!
#Use rfc, which is a random forest classification model built on the
#tic_tac_toe dataset.
#from sklearn.metrics import precision_score
#test_predictions = rfc.predict(X_test)
# Create precision or recall score based on the metric you imported
#score = precision_score(y_test, test_predictions)
# Print the final result
#print("The precision value is {0:.2f}".format(score))
#################################################
#<script.py> output:
# The precision value is 0.79
#################################################
#Precision is the correct metric here. Sore-losers can't stand losing
#when they are certain they will win! For that reason, our model needs
#to be as precise as possible. With a precision of only 79%, you may
#need to try some other modeling techniques to improve this score.
# -
# **The bias-variance tradeoff**
# ___
# - Variance
# - following the training data too closely
# - fails to generalize to test data
# - low training error but high testing error
# - occurs when models are overfit and have high complexity
# 
#
# - Bias
# - failing to find the relationship between the data and the response
# - high training and testing error
# - occurs when models are underfit
# - e.g., not enough trees, trees not deep enough
# 
#
# - Optimal performance
# 
#
# - Parameters causing over/underfitting
# - max_depth
# - max_features
# ___
# + pycharm={"name": "#%%\n"}
#Error due to under/over-fitting
#The candy dataset is prime for overfitting. With only 85 observations,
#if you use 20% for the testing dataset, you are losing a lot of vital
#data that could be used for modeling. Imagine the scenario where most
#of the chocolate candies ended up in the training data and very few
#in the holdout sample. Our model might only see that chocolate is a
#vital factor, but fail to find that other attributes are also
#important. In this exercise, you'll explore how using too many features
#(columns) in a random forest model can lead to overfitting.
#A feature represents which columns of the data are used in a
#decision tree. The parameter max_features limits the number of
#features available.
# Update the rfr model
#rfr = RandomForestRegressor(n_estimators=25,
# random_state=1111,
# max_features=2)
#rfr.fit(X_train, y_train)
# Print the training and testing accuracies
#print('The training error is {0:.2f}'.format(
# mae(y_train, rfr.predict(X_train))))
#print('The testing error is {0:.2f}'.format(
# mae(y_test, rfr.predict(X_test))))
#################################################
#<script.py> output:
# The training error is 3.88
# The testing error is 9.15
#################################################
# Update the rfr model
#rfr = RandomForestRegressor(n_estimators=25,
# random_state=1111,
# max_features=11)
#rfr.fit(X_train, y_train)
# Print the training and testing accuracies
#print('The training error is {0:.2f}'.format(
# mae(y_train, rfr.predict(X_train))))
#print('The testing error is {0:.2f}'.format(
# mae(y_test, rfr.predict(X_test))))
#################################################
#<script.py> output:
# The training error is 3.57
# The testing error is 10.05
#################################################
# Update the rfr model
#rfr = RandomForestRegressor(n_estimators=25,
# random_state=1111,
# max_features=4)
#rfr.fit(X_train, y_train)
# Print the training and testing accuracies
#print('The training error is {0:.2f}'.format(
# mae(y_train, rfr.predict(X_train))))
#print('The testing error is {0:.2f}'.format(
# mae(y_test, rfr.predict(X_test))))
#################################################
#<script.py> output:
# The training error is 3.60
# The testing error is 8.79
#################################################
#The chart below shows the performance at various max feature values.
#Sometimes, setting parameter values can make a huge difference in
#model performance.
# -
# 
# + pycharm={"name": "#%%\n"}
#Am I underfitting?
#You are creating a random forest model to predict if you will win a
#future game of Tic-Tac-Toe. Using the tic_tac_toe dataset, you have
#created training and testing datasets, X_train, X_test, y_train, and
#y_test.
#You have decided to create a bunch of random forest models with
#varying amounts of trees (1, 2, 3, 4, 5, 10, 20, and 50). The more
#trees you use, the longer your random forest model will take to run.
#However, if you don't use enough trees, you risk underfitting. You
#have created a for loop to test your model at the different number
#of trees.
#from sklearn.metrics import accuracy_score
#test_scores, train_scores = [], []
#for i in [1, 2, 3, 4, 5, 10, 20, 50]:
# rfc = RandomForestClassifier(n_estimators=i, random_state=1111)
# rfc.fit(X_train, y_train)
# Create predictions for the X_train and X_test datasets.
# train_predictions = rfc.predict(X_train)
# test_predictions = rfc.predict(X_test)
# Append the accuracy score for the test and train predictions.
# train_scores.append(round(accuracy_score(y_train, train_predictions), 2))
# test_scores.append(round(accuracy_score(y_test, test_predictions), 2))
# Print the train and test scores.
#print("The training scores were: {}".format(train_scores))
#print("The testing scores were: {}".format(test_scores))
#################################################
#<script.py> output:
# The training scores were: [0.94, 0.93, 0.98, 0.97, 0.99, 1.0, 1.0, 1.0]
# The testing scores were: [0.83, 0.79, 0.89, 0.91, 0.91, 0.93, 0.97, 0.98]
#################################################
#Notice that with only one tree, both the train and test scores are
#low. As you add more trees, both errors improve. Even at 50 trees,
#this still might not be enough. Every time you use more trees, you
#achieve higher accuracy. At some point though, more trees increase
#training time, but do not decrease testing error.
# -
# **The problems with holdout sets**
# ___
# - the split matters
# - sampling error for train/test samples
# ___
# + pycharm={"name": "#%%\n"}
#Two samples
#After building several classification models based on the tic_tac_toe
#dataset, you realize that some models do not generalize as well as
#others. You have created training and testing splits just as you
#have been taught, so you are curious why your validation process is
#not working.
#After trying a different training, test split, you noticed differing
#accuracies for your machine learning model. Before getting too frustrated
#with the varying results, you have decided to see what else could be
#going on.
# Create two different samples of 200 observations
#sample1 = tic_tac_toe.sample(200, random_state=1111)
#sample2 = tic_tac_toe.sample(200, random_state=1171)
# Print the number of common observations
#print(len([index for index in sample1.index if index in sample2.index]))
#################################################
#<script.py> output:
# 40
#################################################
# Print the number of observations in the Class column for both samples
#print(sample1['Class'].value_counts())
#print(sample2['Class'].value_counts())
#################################################
#<script.py> output:
# positive 134
# negative 66
# Name: Class, dtype: int64
# positive 123
# negative 77
# Name: Class, dtype: int64
#################################################
#Notice that there are a varying number of positive observations for
#both sample test sets. Sometimes creating a single test holdout
#sample is not enough to achieve the high levels of model validation
#you want. You need to use something more robust.
#
# If our models are not generalizing well or if we have limited data,
#we should be careful using a single training/validation split. You
#should use the next lesson's topic: cross-validation.
# -
# **Cross-validation**
# ___
# 
# 
# - KFold in sklearn.model_selection
# - n_splits: number of cross-validation splits
# - shuffle: boolean indicating to shuffle data before splitting
# - random_state: random seed
# - .split(X): indices of splits
# ___
# + pycharm={"name": "#%%\n"}
#scikit-learn's KFold()
#You just finished running a colleagues code that creates a random
#forest model and calculates an out-of-sample accuracy. You noticed
#that your colleague's code did not have a random state, and the
#errors you found were completely different than the errors your
#colleague reported.
#To get a better estimate for how accurate this random forest model
#will be on new data, you have decided to generate some indices to
#use for KFold cross-validation.
#from sklearn.model_selection import KFold
# Use KFold
#kf = KFold(n_splits=5, shuffle=True, random_state=1111)
# Create splits
#splits = kf.split(X)
# Print the number of indices
#for train_index, val_index in splits:
# print("Number of training indices: %s" % len(train_index))
# print("Number of validation indices: %s" % len(val_index))
#################################################
#<script.py> output:
# Number of training indices: 68
# Number of validation indices: 17
# Number of training indices: 68
# Number of validation indices: 17
# Number of training indices: 68
# Number of validation indices: 17
# Number of training indices: 68
# Number of validation indices: 17
# Number of training indices: 68
# Number of validation indices: 17
#################################################
#This dataset has 85 rows. You have created five splits - each
#containing 68 training and 17 validation indices. You can use these
#indices to complete 5-fold cross-validation.
# + pycharm={"name": "#%%\n"}
#Using KFold indices
#You have already created splits, which contains indices for the
#candy-data dataset to complete 5-fold cross-validation. To get a
#better estimate for how well a colleague's random forest model will
#perform on a new data, you want to run this model on the five
#different training and validation indices you just created.
#In this exercise, you will use these indices to check the accuracy
#of this model using the five different splits. A for loop has been
#provided to assist with this process.
#from sklearn.ensemble import RandomForestRegressor
#from sklearn.metrics import mean_squared_error
#rfc = RandomForestRegressor(n_estimators=25, random_state=1111)
# Access the training and validation indices of splits
#for train_index, val_index in splits:
# Setup the training and validation data
# X_train, y_train = X[train_index], y[train_index]
# X_val, y_val = X[val_index], y[val_index]
# Fit the random forest model
# rfc.fit(X_train, y_train)
# Make predictions, and print the accuracy
# predictions = rfc.predict(X_val)
# print("Split accuracy: " + str(mean_squared_error(y_val, predictions)))
#################################################
#<script.py> output:
# Split accuracy: 178.75586448813047
# Split accuracy: 98.29560208158634
# Split accuracy: 86.2673010849621
# Split accuracy: 217.4185114496197
# Split accuracy: 140.5437661156536
#################################################
#KFold() is a great method for accessing individual indices when
#completing cross-validation. One drawback is needing a for loop to
#work through the indices though. In the next lesson, you will look
#at an automated method for cross-validation using sklearn
# -
# **sklearn's cross_val_score()**
# ___
# - cross_val_score()
# - estimator: the model to use
# - X, y: predictor and response arrays
# - cv: the number of cross-validation splits
# - scoring: see make_scorer below
# - make_scorer from sklearn.metrics to create a scorer for cross_val_score()
# - mean of errors is usually reported with std to indicate variance of errors
# ___
# + pycharm={"name": "#%%\n"}
#scikit-learn's methods
#You have decided to build a regression model to predict the number
#of new employees your company will successfully hire next month. You
#open up a new Python script to get started, but you quickly realize
#that sklearn has a lot of different modules. Let's make sure you
#understand the names of the modules, the methods, and which module
#contains which method.
#Follow the instructions below to load in all of the necessary methods
#for completing cross-validation using sklearn. You will use modules:
#metrics
#model_selection
#ensemble
# Instruction 1: Load the cross-validation method
from sklearn.model_selection import cross_val_score
# Instruction 2: Load the random forest regression model
from sklearn.ensemble import RandomForestRegressor
# Instruction 3: Load the mean squared error method
# Instruction 4: Load the function for creating a scorer
from sklearn.metrics import mean_squared_error, make_scorer
#################################################
#It is easy to see how all of the methods can get mixed up, but it
#is important to know the names of the methods you need. You can
#always review the scikit-learn documentation should you need any help
#https://scikit-learn.org/stable/documentation.html
# + pycharm={"name": "#%%\n"}
#Implement cross_val_score()
#Your company has created several new candies to sell, but they are
#not sure if they should release all five of them. To predict the
#popularity of these new candies, you have been asked to build a
#regression model using the candy dataset. Remember that the response
#value is a head-to-head win-percentage against other candies.
#Before you begin trying different regression models, you have
#decided to run cross-validation on a simple random forest model to
#get a baseline error to compare with any future results.
#rfc = RandomForestRegressor(n_estimators=25, random_state=1111)
#mse = make_scorer(mean_squared_error)
# Set up cross_val_score
#cv = cross_val_score(estimator=rfc,
# X=X_train,
# y=y_train,
# cv=10,
# scoring=make_scorer(mean_squared_error))
# Print the mean error
#print(cv.mean())
#################################################
#<script.py> output:
# 155.55845080026586
#################################################
#You now have a baseline score to build on. If you decide to build
#additional models or try new techniques, you should try to get an
#error lower than 155.56. Lower errors indicate that your popularity
#predictions are improving
# -
# **Leave-one-out-cross-validation (LOOCV)**
# ___
# - n models for n observations
# 
# - when to use LOOCV
# - the amount of training data is limited
# - you want the absolute best error estimate for new data
# - this method is very computationally expensive
# ___
# + pycharm={"name": "#%%\n"}
#Leave-one-out-cross-validation
#Let's assume your favorite candy is not in the candy dataset, and
#that you are interested in the popularity of this candy. Using 5-fold
#cross-validation will train on only 80% of the data at a time. The
#candy dataset only has 85 rows though, and leaving out 20% of the
#data could hinder our model. However, using leave-one-out-cross-validation
#allows us to make the most out of our limited dataset and will give
#you the best estimate for your favorite candy's popularity!
#In this exercise, you will use cross_val_score() to perform LOOCV.
#from sklearn.metrics import mean_absolute_error, make_scorer
# Create scorer
#mae_scorer = make_scorer(mean_absolute_error)
#rfr = RandomForestRegressor(n_estimators=15, random_state=1111)
# Implement LOOCV
#scores = cross_val_score(rfr, X=X, y=y, cv=y.shape[0], scoring=mae_scorer)
# Print the mean and standard deviation
#print("The mean of the errors is: %s." % np.mean(scores))
#print("The standard deviation of the errors is: %s." % np.std(scores))
#################################################
#<script.py> output:
# The mean of the errors is: 9.464989603398694.
# The standard deviation of the errors is: 7.265762094853885.
#################################################
#You have come along way with model validation techniques. The final
#chapter will wrap up model validation by discussing how to select
#the best model and give an introduction to parameter tuning
# -
# **Introduction to hyperparameter tuning**
# ___
# - model parameters are:
# - learned or estimated from the data
# - the result of fitting a model
# - used when making future predictions
# - not manually set
# - linear regression parameters
# - coefficients and intercepts
# - .coef_, .intercept_
# - model hyperparameters
# - manually set *before* the training occurs
# - specify how the training is supposed to happen
# - random forest hyperparameters
# - n_estimators
# - max_depth
# - max_features
# - min_samples_split
# - the minimum number of samples required to make a split
# - hyperparameter tuning
# - select hyperparameters
# - run a single model type at different value sets
# - create ranges of possible values to select from
# - specify a single accuracy metric
# - specifying ranges
# - .get_params_
# - start with the basics
# - read through the documentation
# - test practical ranges
# ___
# + pycharm={"name": "#%%\n"}
#Creating Hyperparameters
#For a school assignment, your professor has asked your class to
#create a random forest model to predict the average test score for
#the final exam.
#After developing an initial random forest model, you are unsatisfied
#with the overall accuracy. You realize that there are too many
#hyperparameters to choose from, and each one has a lot of possible
#values. You have decided to make a list of possible ranges for the
#hyperparameters you might use in your next model.
#Your professor has provided de-identified data for the last ten
#quizzes to act as the training data. There are 30 students in your
#class.
# Review the parameters of rfr
#print(rfr.get_params())
#################################################
#<script.py> output:
# {'bootstrap': True, 'criterion': 'mse', 'max_depth': None, 'max_features': 'auto', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 'warn', 'n_jobs': None, 'oob_score': False, 'random_state': 1111, 'verbose': 0, 'warm_start': False}
#################################################
# Maximum Depth
#max_depth = [4, 8, 12]
# Minimum samples for a split
#min_samples_split = [2, 5, 10]
# Max features
#max_features = [4, 6, 8, 10]
#################################################
#Hyperparameter tuning requires selecting parameters to tune, as
#well the possible values these parameters can be set to.
# + pycharm={"name": "#%%\n"}
#Running a model using ranges
#You have just finished creating a list of hyperparameters and ranges
#to use when tuning a predictive model for an assignment. You have
#used max_depth, min_samples_split, and max_features as your range
#variable names.
#from sklearn.ensemble import RandomForestRegressor
# Fill in rfr using your variables
#rfr = RandomForestRegressor(
# n_estimators=100,
# max_depth=random.choice(max_depth),
# min_samples_split=random.choice(min_samples_split),
# max_features=random.choice(max_features))
# Print out the parameters
#print(rfr.get_params())
#################################################
#<script.py> output:
# {'bootstrap': True, 'criterion': 'mse', 'max_depth': 4, 'max_features': 10, 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 100, 'n_jobs': None, 'oob_score': False, 'random_state': None, 'verbose': 0, 'warm_start': False}
#################################################
#Notice that min_samples_split was randomly set to 2. Since you
#specified a random state, min_samples_split will always be set to
#2 if you only run this model one time.
# -
# **RandomizedSearchCV**
# ___
# - Grid Searching hyperparameters
# 
# - tests every possible combination
# - each additional hyperparameter added increases computation time exponentially
# - better methods
# - random searching using RandomizedSearch CV in sklearn.model_selection
# - bayesian optimization
# - random search parameters
# - estimator: the model to use
# - param_distributions: dictionary containing hyperparameters and possible values
# - n_iter: number of iterations
# - scoring: scoring method to use
# ___
# + pycharm={"name": "#%%\n"}
#Preparing for RandomizedSearch
#Last semester your professor challenged your class to build a
#predictive model to predict final exam test scores. You tried
#running a few different models by randomly selecting hyperparameters.
#However, running each model required you to code it individually.
#After learning about RandomizedSearchCV(), you're revisiting your
#professors challenge to build the best model. In this exercise, you
#will prepare the three necessary inputs for completing a random
#search.
#from sklearn.ensemble import RandomForestRegressor
#from sklearn.metrics import make_scorer, mean_squared_error
# Finish the dictionary by adding the max_depth parameter
#param_dist = {"max_depth": [2, 4, 6, 8],
# "max_features": [2, 4, 6, 8, 10],
# "min_samples_split": [2, 4, 8, 16]}
# Create a random forest regression model
#rfr = RandomForestRegressor(n_estimators=10, random_state=1111)
# Create a scorer to use (use the mean squared error)
#scorer = make_scorer(mean_squared_error)
#################################################
#To use RandomizedSearchCV(), you need a distribution dictionary,
#an estimator, and a scorer—once you've got these, you can run a
#random search to find the best parameters for your model.
# + pycharm={"name": "#%%\n"}
#Implementing RandomizedSearchCV
#You are hoping that using a random search algorithm will help you
#improve predictions for a class assignment. You professor has
#challenged your class to predict the overall final exam average
#score.
#In preparation for completing a random search, you have created:
#param_dist: the hyperparameter distributions
#rfr: a random forest regression model
#scorer: a scoring method to use
# Import the method for random search
#from sklearn.model_selection import RandomizedSearchCV
# Build a random search using param_dist, rfr, and scorer
#random_search =\
# RandomizedSearchCV(
# estimator=rfr,
# param_distributions=param_dist,
# n_iter=10,
# cv=5,
# scoring=scorer)
#################################################
#Although it takes a lot of steps, hyperparameter tuning with
#random search is well worth it and can improve the accuracy of
#your models. Plus, you are already using cross-validation to
#validate your best model.
# -
# **Selecting your final model**
# ___
# - .best_score_
# - .best_params_
# - .best_estimator_
# - best model
# - other attributes
# - .cv_results_
# - ['mean_test_score']
# - ['params']
# - ['max_depth']
# - save model for use later
# - from sklearn.externals import joblib
# - joblib.dump(model, 'model_best_<date>.pkl')
# ___
# + pycharm={"name": "#%%\n"}
#Selecting the best precision model
#Your boss has offered to pay for you to see three sports games this
#year. Of the 41 home games your favorite team plays, you want to
#ensure you go to three home games that they will definitely win.
#You build a model to decide which games your team will win.
#To do this, you will build a random search algorithm and focus on
#model precision (to ensure your team wins). You also want to keep
#track of your best model and best parameters, so that you can use
#them again next year (if the model does well, of course). You have
#already decided on using the random forest classification model rfc
#and generated a parameter distribution param_dist.
#from sklearn.metrics import precision_score, make_scorer
# Create a precision scorer
#precision = make_scorer(precision_score)
# Finalize the random search
#rs = RandomizedSearchCV(
# estimator=rfc, param_distributions=param_dist,
# scoring = precision,
# cv=5, n_iter=10, random_state=1111)
#rs.fit(X, y)
# print the mean test scores:
#print('The precision for each run was: {}.'.format(rs.cv_results_['mean_test_score']))
# print the best model score:
#print('The best precision for a single model was: {}'.format(rs.best_score_))
#################################################
#<script.py> output:
# The precision for each run was: [0.86446668 0.75302055 0.67570816 0.88459939 0.88381178 0.86917588
# 0.68014695 0.81721906 0.87895856 0.92917474].
# The best precision for a single model was: 0.9291747446879924
#################################################
#Your model's precision was 93%! The best model accurately predicts
#a winning game 93% of the time. If you look at the mean test scores,
#you can tell some of the other parameter sets did really poorly.
#Also, since you used cross-validation, you can be confident in your
#predictions. Well done!
# -
# **Course completed!**
# ___
# - Some topics covered:
# - accuracy/evaluation metrics
# - splitting data into train, validation, and test sets
# - cross-validation and LOOCV
# - hyperparameter tuning
# - Next steps
# - check out kaggle
# - DataCamp courses
# - hyperparameter tuning in Python
# - deep learning in Python
# ___
#
| datacamp/model validation in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''tensor'': conda)'
# name: python_defaultSpec_1599484525539
# ---
# +
import scipy
import tensorflow as tf
from keras.datasets import mnist
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate, Lambda
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add
from keras.layers.advanced_activations import PReLU, LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.layers.experimental.preprocessing import Resizing
from keras.applications import VGG19
from keras.models import Sequential, Model
from keras.optimizers import Adam
import datetime
import matplotlib.pyplot as plt
import sys
import numpy as np
import os
from glob import glob
import keras.backend as K
# -
class SRGAN():
def __init__(self):
#input shape
self.channels = 3
self.lr_height = 64
self.lr_width = 64
self.lr_shape = (self.lr_height, self.lr_width, self.channels)
self.hr_height = self.lr_height * 4
self.hr_width = self.lr_width * 4
self.hr_shape = (self.hr_height, self.hr_width, self.channels)
#number of residual blocks
self.n_residual_blocks = 16
# Optimizer
optimizer = Adam(0.0002, 0.5)
# Vgg for descrimination
self.vgg = self.build_vgg()
self.vgg.trainable = False
self.vgg.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
# Calculate output shape of D (PatchGAN)
patch = int(self.hr_height / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='mse',optimizer=optimizer,metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# High res. and low res. images
img_hr = Input(shape=self.hr_shape)
img_lr = Input(shape=self.lr_shape)
# Generate high res. version from low res.
fake_hr = self.generator(img_lr)
# Extract image features of the generated img
fake_features = self.vgg(fake_hr)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# Discriminator determines validity of generated high res. images
validity = self.discriminator(fake_hr)
self.combined = Model([img_lr, img_hr], [validity, fake_features])
self.combined.compile(loss=['binary_crossentropy', 'mse'],
loss_weights=[1e-3, 1],
optimizer=optimizer)
def build_vgg(self):
#vgg = VGG19(weights='imagenet')
#vgg.summary()
#vgg.outputs = [vgg.layers[9].output]
#img = Input(shape=self.hr_shape)
#img_features = vgg(img)
#return Model(img, img_features)
img_vgg = Input(shape=self.hr_shape)
vgg = VGG19(weights="imagenet", include_top=False, input_tensor=img_vgg)
return Model(inputs=vgg.input, outputs=vgg.layers[9].output)
def build_generator(self):
def residual_block(layer_input, filters):
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Add()([d, layer_input])
return d
def deconv2d(layer_input):
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
u = Activation('relu')(u)
return u
# Low resolution image input
img_lr = Input(shape=self.lr_shape)
# Pre-residual block
c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr)
c1 = Activation('relu')(c1)
# Propogate through residual blocks
r = residual_block(c1, self.gf)
for _ in range(self.n_residual_blocks - 1):
r = residual_block(r, self.gf)
# Post-residual block
c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r)
c2 = BatchNormalization(momentum=0.8)(c2)
c2 = Add()([c2, c1])
# Upsampling
u1 = deconv2d(c2)
u2 = deconv2d(u1)
# Generate high resolution output
gen_hr = Conv2D(self.channels, kernel_size=9, strides=1, padding='same', activation='tanh')(u2)
return Model(img_lr, gen_hr)
def build_discriminator(self):
def d_block(layer_input, filters, strides=1, bn=True):
d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
# Input img
d0 = Input(shape=self.hr_shape)
d1 = d_block(d0, self.df, bn=False)
d2 = d_block(d1, self.df, strides=2)
d3 = d_block(d2, self.df*2)
d4 = d_block(d3, self.df*2, strides=2)
d5 = d_block(d4, self.df*4)
d6 = d_block(d5, self.df*4, strides=2)
d7 = d_block(d6, self.df*8)
d8 = d_block(d7, self.df*8, strides=2)
d9 = Dense(self.df*16)(d8)
d10 = LeakyReLU(alpha=0.2)(d9)
validity = Dense(1, activation='sigmoid')(d10)
return Model(d0, validity)
def train(self):
start_time = datetime.datetime.now()
for epoch in range(epochs):
# Sample images and their conditioning counterparts
imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)
# From low res. image generate high res. version
fake_hr = self.generator.predict(imgs_lr)
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
# Train the discriminators (original images = real / generated = Fake)
d_loss_real = self.discriminator.train_on_batch(imgs_hr, valid)
d_loss_fake = self.discriminator.train_on_batch(fake_hr, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ------------------
# Train Generator
# ------------------
# Sample images and their conditioning counterparts
imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)
# The generators want the discriminators to label the generated images as real
valid = np.ones((batch_size,) + self.disc_patch)
# Extract ground truth image features using pre-trained VGG19 model
image_features = self.vgg.predict(imgs_hr)
# Train the generators
g_loss = self.combined.train_on_batch([imgs_lr, imgs_hr], [valid, image_features])
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
print ("%d time: %s" % (epoch, elapsed_time))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
class DataLoader():
def __init__(self, dataset_name, img_res=(136, 180)):
self.dataset_name = dataset_name
self.img_res = img_res
def load_data(self, batch_size=1, is_testing=False):
data_type = "train" if not is_testing else "test"
path = glob('/datasets/dataset_06/half/')
batch_images = np.random.choice(path, size=batch_size)
imgs_hr = []
imgs_lr = []
for img_path in batch_images:
img = self.imread(img_path)
h, w = self.img_res
low_h, low_w = int(h / 4), int(w / 4)
img_hr = scipy.misc.imresize(img, self.img_res)
img_lr = scipy.misc.imresize(img, (low_h, low_w))
# If training => do random flip
if not is_testing and np.random.random() < 0.5:
img_hr = np.fliplr(img_hr)
img_lr = np.fliplr(img_lr)
imgs_hr.append(img_hr)
imgs_lr.append(img_lr)
imgs_hr = np.array(imgs_hr) / 127.5 - 1.
imgs_lr = np.array(imgs_lr) / 127.5 - 1.
return imgs_hr, imgs_lr
def imread(self, path):
return scipy.misc.imread(path, mode='RGB').astype(np.float)
# + tags=[]
gan = SRGAN()
gan.discriminator.summary()
gan.train(epochs=30000, batch_size=1, sample_interval=50)
# -
gan.train(epochs=30000, batch_size=1, sample_interval=50)
| model/srgan/srgan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementing Logistic Regression
# steps
#
# 1) Collecting data
#
#
# 2) Analysing Data
#
#
# 3) Data Wrangling
#
#
# 4) Train & test
#
#
# 5) Accuracy check
# # Step1 - collecting the data
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as plt
# %matplotlib inline
import math
titanic_data=pd.read_csv('Titanic.csv')
titanic_data.head(10)
# -
print("totsl no. of passenger in data set :"+str(len(titanic_data)))
print(titanic_data.shape)
# # Step 2: Analysing the data
sns.countplot(x='Survived' , data=titanic_data)
sns.countplot(x='Survived' , hue='Sex' , data=titanic_data)
sns.countplot(x='Survived' , hue='Pclass' , data=titanic_data)
titanic_data['Age'].plot.hist()
titanic_data['Fare'].plot.hist(bins=20,figsize=(15,5))
titanic_data.info()
sns.countplot(x='SibSp' , data=titanic_data)
# # Step3-data wrangling or cleaning the data
titanic_data.isnull()
titanic_data.isnull().sum()
sns.heatmap(titanic_data.isnull(),yticklabels=False,cmap="viridis")
# Here you see yellow color this shows in data having a many null values in Age , Cabin ,and some in embarked coloumn
sns.boxenplot(x="Pclass" , y="Age" , data=titanic_data)
# blue box defines 1 class passengers are having a person age between 30-50 years old
#
# orange box defines 1 class passengers are having a person age between 25-35 years old
#
# green box defines 1 class passengers are having a person age between 20-30 years old
titanic_data.head()
titanic_data.drop("Cabin" , axis=1,inplace=True)
titanic_data.head()
# +
# Drop all NAn value using dropna method
titanic_data.dropna(inplace=True)
# -
sns.heatmap(titanic_data.isnull(),yticklabels=False,cmap="viridis")
# Here you see no null values is present so data is cleaned completely
titanic_data.isnull().sum()
titanic_data.head()
sex=pd.get_dummies(titanic_data['Sex'],drop_first=True)
sex.head()
embarked=pd.get_dummies(titanic_data['Embarked'],drop_first=True)
embarked.head()
pcal=pd.get_dummies(titanic_data['Pclass'],drop_first=True)
pcal.head()
titanic_data=pd.concat([titanic_data,sex,embarked,pcal],axis=1)
titanic_data.head()
titanic_data.drop(['PassengerId' ,'Sex' ,'Name' , 'Ticket' , 'Embarked','Pclass'],axis=1,inplace=True)
titanic_data.head()
# # Step 4- Training and testing the data
# independent variable
X=titanic_data.drop("Survived" ,axis=1)
# dependent variable
y=titanic_data["Survived"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
from sklearn.linear_model import LogisticRegression
logmodel=LogisticRegression()
logmodel.fit(X_train,y_train)
predictions=logmodel.predict(X_test)
from sklearn.metrics import classification_report
classification_report(y_test,predictions)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test,predictions)
from sklearn.metrics import accuracy_score
accuracy_score(y_test,predictions)
# So here accurcy of our model is approx 78%
# this is quite good
from sklearn.metrics import precision_score
precision_score(y_test,predictions)
from sklearn.metrics import f1_score
f1_score(y_test,predictions)
from sklearn.metrics import recall_score
recall_score(y_test,predictions)
| Titanic logistic reg/Logistic regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import subprocess
import os, io, getpass
import psutil
def __MonitorNvidiaGPU():
'''
Function that monitors Running Processes on Nvidia GPU
Returns a DataFrame (pid, process_name, cmdline, used_gpu_memory, utilization)
'''
getGPUProcesses = 'nvidia-smi pmon -c 1 -s mu'
proc = subprocess.Popen(getGPUProcesses, shell=True, stdout=subprocess.PIPE)
output = proc.stdout.read().decode('utf-8').split('\n')
# Remove the line describing the units of each feature
del output[1]
# convert to csv format...
output[0] = output[0].replace('# ', '')
output = [line.strip() for line in output]
output = [','.join(line.split()) for line in output]
# ...and drop the command feature (will be added later)...
output = [','.join(line.split(',')[:8]) for line in output]
# ...and convert to DataFrame
procsGPU = pd.read_csv(io.StringIO('\n'.join(output)), header=0)
# procsGPUFeats = self.__GetProcessAttributes(procsGPU.pid.values)
# return procsGPU.merge(procsGPUFeats, on='pid', how='inner')
return procsGPU
def __MonitorCPU(mostImportant=10):
procs = []
for proc in psutil.process_iter():
if proc.pid == os.getpid(): continue
if proc.status() != 'running': continue
try:
if getpass.getuser() == proc.username():
procs.append({'pid': proc.pid,
'cpu_percent': proc.cpu_percent(),
'memory_percent': proc.memory_percent(),
'name': proc.name(),
'exe': proc.exe(),
'status': proc.status()
})
except psutil.AccessDenied:
continue
process_log = pd.DataFrame(procs, columns=['pid', 'cpu_percent', 'memory_percent', 'name', 'exe', 'status'])
if not process_log.empty:
print ('Logging Data')
return process_log.sort_values(['memory_percent'], ascending=False)[:mostImportant]
else:
print ('Nothing to Log')
return process_log
df1 = __MonitorNvidiaGPU()
df1
df2 = __MonitorCPU()
df2
df1.pid
df2.pid
df1.merge(df2, on='pid', how='inner')
import datetime
from time import gmtime, strftime, sleep
while (True):
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()), end='\r', flush=True)
time.sleep(1)
| notebooks/Process_Monitor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy.stats
import pandas as pd
dummy_age = [20, 21, 24, 24, 28, 26, 19, 22, 26, 24, 21,
19, 22, 28, 29, 6, 100, 25, 25, 28, 31]
dummy_height = [150, 151, 155, 153, 280, 160, 158, 157, 158, 145, 150,
155, 155, 151, 152, 153, 160, 152, 157, 157, 160, 153]
dummy_df = pd.DataFrame(list(zip(dummy_age, dummy_height)),
columns =['Age', 'Height(cm)'])
dummy_df
# ## Calculate z-score using scipy.stats.zscore
scipy.stats.zscore(dummy_df['Height(cm)'])
# ### We can also use absolute values while calculating z_score
z_score_height = np.abs(scipy.stats.zscore(dummy_df['Height(cm)']))
dummy_df.iloc[np.where(z_score_height>3)]
# ## Let's see if Z-score works for Age as well
z_score_age = np.abs(scipy.stats.zscore(dummy_df['Age']))
dummy_df.iloc[np.where(z_score_age>3)]
| Section 3/Using Z-scores.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] editable=true
# # Explore how Kafka Works
#
# In this section you reviewed Kafka's architecture and how it stores data. In this exercise, you
# will spend some time seeing how Kafka works.
#
# ## Topic Storage
#
# First, let's create a topic
#
# `kafka-topics --create --topic kafka-arch --partitions 1 --replication-factor 1 --zookeeper localhost:2181`
#
# ### <a name="dir"></a>Inspecting the Directory Structure
#
# Now that the topic is successfully created, let's see how Kafka stored it on disk.
#
# `ls -alh /var/lib/kafka/data | grep kafka-arch`
#
# What does the output look like?
#
# What kind of data is kept inside of the directory?
#
# `ls -alh /var/lib/kafka/data/kafka-arch*`
#
# If you try to open the file ending in `.log` is there anything in it?
#
# ### Produce Data
#
# Now that we have this topic, let's produce some data into it.
#
# `kafka-console-producer --topic "kafka-arch" --broker-list localhost:9092`
#
# Produce 5-10 messages.
#
# Once you're done, hit Ctrl+C to exit.
#
# Repeat the steps from [Inspecting the Directory Structure](#dir) and see how the results have
# changed.
#
#
# ## Topics and Partitions
#
# Now that you've seen what a topic with a single partition looks like, let's see what happens if we
# modify the number of partitions
#
# `kafka-topics --alter --topic kafka-arch --partitions 3 --zookeeper localhost:2181`
#
# Try repeating the steps from [the previous section](#dir). How many folders do you see now?
#
# Try modifying the number of partitions a few more times to see how Kafka modifies the data on disk.
#
# + [markdown] editable=true
#
| 2. Data Ingestion with Kafka & Kafka Streaming/Lesson2/startup (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ISCX2016 Dataset - Replication of Paper Results and Classifier Testing
# +
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# -
# # Dataset
# https://www.unb.ca/cic/datasets/url-2016.html
# +
# Dataset path
dataset_base_path = r"D:\\dev\\CyberML\\Datasets\\ISCXURL2016"
# mapping_label = {0:"benign", 1:"spam", 2:"phishing", 3:"malware"}
mapping_label = {0: 'benign', 1: 'malware', 2: 'phishing', 3: 'spam'}
mapping_label_r = dict((v, k) for k, v in mapping_label.items())
print(mapping_label)
print(mapping_label_r)
# +
urldata = pd.read_csv(os.path.join(
dataset_base_path, "All_BestFirst.csv"), low_memory=False)
urldata = urldata[urldata["class"] != 'Defacement']
urldata["class"] = urldata["class"].apply(lambda x: mapping_label_r[x])
display(urldata)
print(urldata.info())
# +
# Split data
X = urldata.drop("class", axis=1, inplace=False)
Y = urldata["class"]
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.30, random_state=42)
print("Done")
# Heatmap
corrmat = urldata.corr()
sns.set_style("darkgrid")
f, ax = plt.subplots(figsize=(13, 13))
sns.heatmap(corrmat, square=True, annot=True,
annot_kws={'size': 10}, cmap='Blues')
plt.show()
# Random forest still gets high accuracy because features can be non linearly correlated
print(X_train.describe())
# -
# # Random Forest
rf = RandomForestClassifier(n_estimators=10, random_state=42)
rf.fit(X_train, y_train)
plt.figure(figsize=(6, 4))
plt.bar([x for x in range(len(rf.feature_importances_))],
rf.feature_importances_)
plt.xticks([i for i in range(len(X.columns))], X.columns, rotation=90)
plt.xlabel("Feature")
plt.ylabel("Feature importance")
plt.show()
# +
# Model Predictions
predicted = rf.predict(X_test)
acc = metrics.accuracy_score(y_test, predicted)
print("Accuracy = {:.2f}".format(acc))
cm = metrics.confusion_matrix(y_test, predicted)
print(cm)
print('Classification Report:')
print(metrics.classification_report(y_test, predicted, target_names=mapping_label.values()))
classes = [i for i in mapping_label.values()]
x_axis_labels = list(sorted(mapping_label.values()))
y_axis_labels = list(sorted(mapping_label.values()))
sns.set_style("darkgrid")
plt.figure(figsize=(9, 9))
sns.set(font_scale=1.4)
# sns.heatmap(cm, annot=True, fmt="g", linewidths=.5, square = True, cmap = 'Blues_r', xticklabels=classes, yticklabels=classes);
p = sns.heatmap(cm, annot=True, fmt="g", linewidths=.5, square=True, cmap='Blues_r',
xticklabels=x_axis_labels, yticklabels=y_axis_labels, annot_kws={
"size": 15},
cbar=True, cbar_kws={"shrink": .80})
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
p.set_xticklabels(x_axis_labels, size=13)
p.set_yticklabels(y_axis_labels, size=13)
all_sample_title = 'Accuracy Score: {:.2f}'.format(acc)
plt.title(all_sample_title, size=15)
plt.show()
# -
# # SVM Model
svm = SVC(gamma='auto', C=5)
svm.fit(X_train, y_train)
# +
predicted = svm.predict(X_test)
acc = metrics.accuracy_score(y_test, predicted)
print("Accuracy = {:.2f}".format(acc))
cm = metrics.confusion_matrix(y_test, predicted)
print(cm)
print('Classification Report:')
print(metrics.classification_report(y_test, predicted, target_names=mapping_label.values()))
classes = [i for i in mapping_label.values()]
sns.set_style("darkgrid")
plt.figure(figsize=(9, 9))
sns.set(font_scale=1.4)
sns.heatmap(cm, annot=True, fmt="g", linewidths=.5, square=True,
cmap='Blues_r', xticklabels=classes, yticklabels=classes)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
all_sample_title = 'Accuracy Score: {:.2f}'.format(acc)
plt.title(all_sample_title, size=15)
plt.show()
# +
# # Stop execution early
# raise KeyboardInterrupt
# +
# from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
n_comp = 2
# # PCA plot
# pca = PCA(n_components=n_comp).fit(X_train)
# print(pca.explained_variance_ratio_)
# data2D_PCA = pca.transform(X_train)
# t-SNE plot
embeddings = TSNE(
n_components=n_comp,
# init='pca',
# perplexity=200,
# perplexity=10,
# perplexity=7,
n_jobs=6, verbose=1
)
data2D_TSNE = embeddings.fit_transform(X_train)
print("Final Embeddings KL Divergence: {}".format(embeddings.kl_divergence_))
# +
import matplotlib.cm as cm
# from mpl_toolkits.mplot3d import Axes3D
# data2D = data2D_PCA
data2D = data2D_TSNE
N = len(np.unique(y_train))
print(N, "classes")
# Combined plot
# https://stackoverflow.com/a/54944523/4017530
fig, ax = plt.subplots(figsize=(12, 8))
# fig = plt.figure(figsize = (12, 8))
# ax = plt.axes(projection='3d')
colors = cm.rainbow(np.linspace(0, 1, N))
for group, c in zip(np.unique(y_train), colors):
ix = np.where(y_train == group)
ax.scatter(data2D[ix, 0], data2D[ix, 1], color=c, label=mapping_label[group], alpha=0.9)
ax.legend()
plt.xlabel("t-SNE dimension 1")
plt.ylabel("t-SNE dimension 2")
plt.show()
| notebooks/supervised_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <style>div.container { width: 100% }</style>
# <img style="float:left; vertical-align:text-bottom;" height="65" width="172" src="../assets/PyViz_logo_wm_line.png" />
# <div style="float:right; vertical-align:text-bottom;"><h2>Tutorial 03. Customizing Visual Appearance</h2></div>
# The previous tutorial focused on specifying elements and simple collections of them. This one explains how the visual appearance can be adjusted to bring out the most salient aspects of your data, or just to make the style match the overall theme of your document. We'll use data in Pandas, and HoloViews, Bokeh, and Matplotlib to display the results:
#
# <div style="margin: 10px">
# <a href="http://pandas.pydata.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:140px" src="../assets/pandas.png"/></a>
# <a href="http://holoviews.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="../assets/holoviews.png"/></a>
# <a href="http://bokeh.pydata.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="../assets/bokeh.png"/></a>
# <a href="http://matplotlib.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="../assets/matplotlib_wm.png"/></a>
# </div>
#
# HoloViews explicitly makes the distinction between **data** and **plotting options**, which allows annotating the data with semantic metadata before deciding how to visualize the data. It also allows rendering the same object using different plotting libraries, such as Bokeh or Matplotlib.
#
#
# ## Preliminaries
#
# In the [annotating your data section](./02_Annotating_Data.ipynb), ``hv.extension('bokeh')`` was used at the start to load and activate the bokeh plotting extension. In this notebook, we will also briefly use [matplotlib](www.matplotlib.org) that will be loaded, but not yet activated, by listing it second:
import pandas as pd
import holoviews as hv
hv.extension('bokeh', 'matplotlib')
# ## Visualizing diamond data
#
# Let's find some interesting data to generate elements from, before we consider how to customize them. Here is a dataset of information about 50,000 individual diamonds (including their weight in carats and their cut, quality, and price), which provides some rich opportunities for visualization:
diamonds = pd.read_csv('../data/diamonds.csv')
diamonds.head()
# One obvious thing to look at is the relationship between the mass of the diamonds given in 'carat' and their 'price'. Since the dataset is large we will sample 1,000 random datapoints from the DataFrame and plot the 'carat' column against the 'price' as a Scatter:
hv.Scatter(diamonds.sample(5000), 'carat', 'price')
# There is clearly structure in this data, but the data is also clearly being overplotted and being squashed into a small plot. To fix the problems, we can start customizing the appearance of this object using the HoloViews [options system](http://holoviews.org/user_guide/Customizing_Plots.html). Later on in the tutorial, we will see an alternative way of avoiding overplotting using Datashader in [Working_with_Large_Datasets](./10_Working_with_Large_Datasets.ipynb).
#
# ## Types of option
#
# If we want to change the appearance of what we can already see in the plot, we're no longer focusing on the data and metadata stored in the elements, but about details of the presentation. Details specific to the final plot are stored and handled by the separate "options" system, not the element objects. HoloViews allows you to set three types of options:
#
# * **plot options**: Options that tell *HoloViews* how to *construct* the plot
# * **style options**: Options that tell the underlying *plotting library* how to *style* the plot
# * **normalization options**: Options that tell *HoloViews* how to *normalize* the various elements in the plot against each other (not covered in this tutorial)
#
#
# ### Plot options
#
# We noted that the data is too compressed in the x direction. Let us fix that by specifying the ``width`` plot option and additionally spread the data out along the y-axis by enabling a log axis using the ``logy`` option. We will also enable a Bokeh 'hover' tool letting us reveal more information about each datapoint:
# %%opts Scatter [width=600 logy=True tools=['hover']]
scatter = hv.Scatter(diamonds.sample(5000), 'carat', ['price', 'cut']).redim.label(carat='Carat (ct)',
price='Price ($)')
scatter
# Here you can see that it's still a plot of Price vs. Carat, but if you hover over a datapoint you can see that the 'cut' information is visible for each point as you visit it.
#
# The top line uses a special IPython/Jupyter syntax called the ``%%opts`` *cell magic* to specify the ``width`` plot option for all [``Scatter``](http://holoviews.org/reference/elements/bokeh/Scatter.html) objects in this cell. ``%%opts`` accepts a simple specification where we pass the ``width=900`` keyword argument to [``Scatter``](http://holoviews.org/reference/elements/bokeh/Curve.html) as a plot option (denoted by the *square brackets*).
#
# In this tutorial we will generally use this convenient, tab-completable IPython specific syntax, but you can of course set options in regular Python as well (see the [user guide](http://holoviews.org/user_guide/Customizing_Plots.html) for details):
scatter.options(width=500, height=200)
# %%opts Scatter
# Exercise: Try setting the height plot option of the Curve above.
# Note: %%opts must be on the *first* line of the cell
# Hint: the magic supports tab completion when the cursor is in the square brackets!
# Exercise: Try enabling the boolean show_grid plot option for the curve above
# #### Aside: ``hv.help``
#
# Tab completion helps discover what keywords are available, but you can get more complete help using the ``hv.help`` utility. For instance, to learn more about the options for ``hv.Scatter`` run ``hv.help(hv.Scatter)``:
# hv.help(hv.Scatter)
# ### Style options
#
# The plot options earlier instructed HoloViews to build a plot 600 pixels wide, when rendered with the Bokeh plotting extension. Now let's specify that the Bokeh glyph should be colored by the 'cut' column using the 'Set1' colormap and reduce the 'alpha' and 'size' of the points so we can see overlapping points better:
# %%opts Scatter [color_index='cut'] (alpha=0.5 cmap='Set1')
scatter
# Note how the plot options applied above to ``scatter`` are remembered! The ``%%opts`` magic is used to customize the *object* displayed as output for a particular code cell: behind the scenes HoloViews has linked the specified options to the ``scatter`` object via a hidden integer id attribute.
#
# Having used the ``%%opts`` magic on ``scatter`` again, we have now associated the 'alpha', 'size' and 'cmap' *style option* to it. In the options specification syntax, style options are the keywords in *parentheses* and are keywords defined and used by Bokeh to style the corresponding [scatter glyph](http://bokeh.pydata.org/en/latest/docs/user_guide/plotting.html#scatter-markers).
#
#
# Exercise: Display scatter without any new options to verify it stays colored
# +
# Exercise: Try setting the 'size' style options to 1
# -
# ## Switching to matplotlib
#
# Let us now view our curve with matplotlib using the ``%%output`` cell magic:
# %%output backend='matplotlib'
scatter
# All our options are gone! This is because the options are associated with the corresponding plotting extension---if you switch back to 'bokeh', the options will be applicable again. In general, options have to be specific to backends; e.g. the ``size`` style option accepted by Bokeh is called ``s`` in matplotlib:
# %%output backend='matplotlib'
# %%opts Scatter [aspect=4 fig_size=400] (color='blue' s=4 alpha=0.2)
scatter.select(carat=(0, 3))
# +
# %%output backend='matplotlib'
# Exercise: Apply the color_index and alpha options as above, but to the matplotlib plot
# -
# ### The ``%output`` line magic
#
# In the cells above we repeated ``%%output backend='matplotlib'`` to use matplotlib to render those particular cells. Instead of repeating ourselves with the cell magic, we can use a "line magic" (similar syntax to the cell magic but with one ``%``) to set things globally. Let us switch to matplotlib with a line magic and specify that we want SVG output instead of PNG:
# %output backend='matplotlib' fig='svg'
# Unlike the cell magic, the line magic doesn't need to be followed by any expression and can be used anywhere in the cell. Both the ``%output`` and ``%opts`` line magics set things globally so it is recommended you declare them at the top of your notebooks to avoid confusion when re-running notebook cells. Now let us look at the SVG matplotlib output we requested:
# %%opts Scatter [aspect=4 fig_size=400 xrotation=70] (color='green' s=10 marker='^')
scatter
# +
# Exercise: Verify for yourself that the output above is SVG and not PNG
# You can do this by right-clicking above then selecting 'Open Image in a new Tab' (Chrome) or 'View Image' (Firefox)
# -
# ## Switching back to bokeh
#
# In previous releases of HoloViews, it was typical to switch to matplotlib in order to export to PNG or SVG, because Bokeh did not support these file formats. Since [Bokeh 0.12.6](https://bokeh.github.io/blog/2017/6/13/release-0-12-6/) we can now easily use HoloViews to export Bokeh plots to a PNG file, as we will now demonstrate:
# %output backend='bokeh'
# By passing ``fig='png'`` and a ``filename='diamonds'`` to ``%output`` we can both render to PNG and save the output to file:
# %%output fig='png' filename='diamonds'
scatter
# Here we have requested PNG format using ``fig='png'`` and that the output should go to diamonds.png using ``filename='diamonds'``:
# ls *.png
# Bokeh also has some SVG support, but it is not yet exposed in HoloViews.
# ## Using ``group`` and ``label``
# The above examples showed how to customize by Element type, but HoloViews offers multiple additional levels of customization that should be sufficient to cover any purpose. For our last example, let us split our diamonds dataframe based on the clarity of the diamonds, selecting the lowest and highest clarity:
low_clarity = diamonds[diamonds.clarity=='I1']
high_clarity = diamonds[diamonds.clarity=='IF']
# We'll now introduce the [``Spikes``](http://holoviews.org/reference/elements/bokeh/Spikes.html) element, and display it with a large width, a log y-axis and some modifications to the xticks. We can specify those options for all following [``Spikes``](http://holoviews.org/reference/elements/bokeh/Spikes.html) elements using the ``%opts`` *line magic*:
# %opts Spikes [width=900 logx=True xticks=8 xrotation=90]
# This time we will visulize the same the data as a dot graph by combining ``Scatter`` elements with the ``Spikes``, showing the distribution of prices between the low and high clarity groups.
#
# We can do this using the element ``group`` and ``label`` introduced in the [annotating your data](./02_Annotating_Data.ipynb) section as follows:
# %%opts Spikes.Diamonds.Low (color='blue')
# %%opts Spikes.Diamonds.High (color='red')
hv.Spikes( low_clarity, 'price', 'carat', group='Diamonds', label='Low') *\
hv.Scatter( low_clarity, 'price', 'carat', group='Diamonds', label='Low') *\
hv.Spikes( high_clarity, 'price', 'carat', group='Diamonds', label='High')*\
hv.Scatter(high_clarity, 'price', 'carat', group='Diamonds', label='High')
# Using the color option to distinguish between the two categories of data we can now see the clear difference between the two groups, showing that diamonds with a low clarity need to have much higher mass in carats to obtain the same price. Similar techniques can be used to provide arbitrarily specific customizations when needed.
# Exercise: Remove the two %%opts lines above and observe the effect
# Exercise: Give the 'Low' clarity scatter points a black 'line_color'
# +
# Optional Exercise: Try differentiating the two sets of spikes by group and not label
# -
# # Onwards
#
# We have now seen some of the ways you can customize the appearance of your visualizations. You can consult our [Customizing Plots](http://holoviews.org/user_guide/Customizing_Plots.html) user guide to learn about other approaches, including the ``hv.opts`` and ``hv.output`` utilities that let you set options per type without relying on notebook-specific syntax, and the ``.options`` method that lets you set options on particular objects. When called without any arguments, you can also use ``.opts()`` to clear any customizations that may be set on that object.
#
# You may also wish to consult the extra [A1 Exploration with Containers](./A1_Exploration_with_Containers.ipynb) tutorial, which gives examples of how the appearance of elements can be customized when viewed in containers. In the next tutorial, [Working with Tabular Data](./04_Working_with_Tabular_Data.ipynb) we will see how to use the flexibility offered by HoloViews when working with tabular data.
| examples/tutorial/03_Customizing_Visual_Appearance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense,Activation,Dropout,MaxPool2D,Conv2D,Flatten,ReLU
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.utils import np_utils
import numpy as np
from keras.datasets import mnist
import matplotlib.pyplot as plt
import pandas as pd
from livelossplot.keras import PlotLossesCallback
# %matplotlib inline
help(mnist)
(x_train, y_train), (x_test, y_test)=mnist.load_data()
plt.imshow(x_train[2])
x_train[2]
y_train[2]
print( x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
x_train=x_train.reshape(x_train.shape[0],28,28,1).astype('float32')
x_test=x_test.reshape(x_test.shape[0],28,28,1).astype('float32')
print( x_train.shape)
print(x_test.shape)
x_train=x_train/255
x_test=x_test/255
y_train=np_utils.to_categorical(y_train)
y_test=np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
num_classes
def myModel():
model=Sequential()
model.add(Conv2D(32,(3,3),input_shape=(28,28,1),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model=myModel()
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=12, batch_size=200,callbacks=[PlotLossesCallback()], verbose=2)
scores = model.evaluate(x_test, y_test, verbose=0)
print("CNN Error: %.2f%%" % (100-scores[1]*100))
pwd
| MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
import seaborn as sns
from sklearn.cluster import KMeans
# Load Data into DataFrame
df_Avoidable_Death = pd.read_csv('Avoidable_Death.csv',encoding = 'ISO-8859-1')
df_Health_Risk_Factor = pd.read_csv('Health_Risk_Factor.csv',encoding = 'ISO-8859-1')
# Function to plot linear regression for scatter plot
# Argument: slope(float), intercept(float)
# Return: None
def LinearRegressionLinePlot(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '-', color='black')
# Drop noisy column(useless)
df_Health_Risk_Factor = df_Health_Risk_Factor.drop(columns = [" ste_name"])
df_Avoidable_Death = df_Avoidable_Death.drop(columns = [" ste_name"])
# Rename
df_Health_Risk_Factor.rename(columns={'alchl_p_2_asr': 'RiskAlcoholConsumption_per100',\
' frt_intk_2_asr':'FruitAdequateIntake_per100',\
' lga_code': 'lga_code',\
' lga_name': 'lga_name',\
' hbld_pres_2_asr': 'HighBloodPressure_per100',\
' ovrwgt_p_2_asr': 'Overweight_per100',\
' lw_excse_2_asr':'LowExercise_per100',\
' wst_meas_p_2_asr': 'RiskWaistMeasurement_per100',\
' smkrs_p_2_asr':'Somker_per100' ,\
' psych_dstrs_2_asr': 'PsychologicalDistress_per100',\
' obese_p_2_asr': 'obese_per100'}, inplace=True)
df_Avoidable_Death.rename(columns = {'lga_name': 'lga_name',\
' lga_code': 'lga_code',\
' avoid_dths_cancer_2_asr':'cancer_per100,000',\
' avoid_dths_diab_2_asr': 'diabetes_per100,000',\
' avoid_dths_colo_2_asr':'colorectal_per100,000' ,\
' avoid_dths_pulm_2_asr': 'pulmonary _per100,000',\
' avoid_dths_cerb_2_asr': 'cerebrovascular_per100,000'}, inplace = True)
# Change Column Position
df_Health_Risk_Factor = df_Health_Risk_Factor[['lga_code','lga_name','FruitAdequateIntake_per100',
'RiskAlcoholConsumption_per100', 'HighBloodPressure_per100',
'Overweight_per100', 'LowExercise_per100',
'RiskWaistMeasurement_per100', 'Somker_per100',
'PsychologicalDistress_per100', 'obese_per100']]
df_Avoidable_Death = df_Avoidable_Death[['lga_code',
'lga_name','diabetes_per100,000', 'cerebrovascular_per100,000',
'colorectal_per100,000', 'cancer_per100,000',
'pulmonary _per100,000']]
# Data sorting on 'lga_area' in ascending for both datasets
df_Avoidable_Death.sort_values(by = ['lga_code']);
df_Health_Risk_Factor.sort_values(by = ['lga_code']);
# Integrate two dataset on the feature 'lga_coded' and 'lga_name'
# Interated dataset of Avoidable Death and Health Risk Factor
left = df_Avoidable_Death
right = df_Health_Risk_Factor
BigDF = pd.merge(left, right, on = ['lga_code', 'lga_name'])
# Solution for Missing Value
Number_of_Null_Values_per_Row = BigDF.isnull().sum(axis = 1)
Index_of_Number_of_Null_Values_per_Row_Excess_Three = Number_of_Null_Values_per_Row\
.loc[Number_of_Null_Values_per_Row>3]
# Delete Records contains more than 3 missing value(exclusive)
BigDF = BigDF.drop(Index_of_Number_of_Null_Values_per_Row_Excess_Three.index)
# Fullfill the remain records have null value by mean value
Mean_Values_All = BigDF.mean()
BigDF_Keys = BigDF.keys()
for i in BigDF_Keys[2:]:
BigDF[i].fillna(Mean_Values_All[i], inplace=True)
# Then separate two datasets for futher using
df_new_Avoidable_Death = BigDF.iloc[:, 2:7]
df_new_Health_Risk_Factor= BigDF.iloc[:, 7:16]
# Add a new feature that is the sum of all kinds of avoidable death for every remian area
Avoidable_Death_Total = BigDF.iloc[:, 2:7].sum(axis=1)
BigDF['Avoidable_Death_Total'] = Avoidable_Death_Total
df_new_Avoidable_Death['Avoidable_Death_Total'] = Avoidable_Death_Total
# Add a new feature thar is the approximate sum of peolpe that have health risk factor for every remain area
# But does not contain FruitAdequateIntake_per100 feature since it is a healthy feature
Health_Risk_Factor_Total = BigDF.iloc[:, 8:16].sum(axis = 1)
BigDF['Health_Risk_Factor_Total'] = Health_Risk_Factor_Total
df_new_Health_Risk_Factor['Health_Risk_Factor_Total'] = Health_Risk_Factor_Total
#Ratio = Avoidable_Death_Total_Over_Health_Risk_Factor_Total
Ratio = BigDF['Avoidable_Death_Total']/BigDF['Health_Risk_Factor_Total']
BigDF['Ratio'] = Ratio
# Outlier Detection and Solving
# Boxplot for total number of avoidable death for each area per 100, 000
plt.boxplot(BigDF['Avoidable_Death_Total'])
plt.ylabel('Population per 100, 000')
plt.xlabel('Total number of Avoidable Death')
plt.title('Total number of Avoidable Death \n in Victoria in 2015')
plt.show()
# Boxplot for population have health risk factor per 100
plt.boxplot(BigDF['Health_Risk_Factor_Total'])
plt.ylabel('Population per 100')
plt.xlabel('Population have health risk factor')
plt.title('Populayion have health risk factor \n per 100 in Victorial 2015')
plt.show()
# Boxplot for ratio
plt.boxplot(BigDF['Ratio'])
plt.ylabel('Avoidable_Death_Total_\nOver Health_Risk_Factor_Total')
plt.xlabel('Ratio')
plt.title('Avoidable_Death_Total_Over_Health_Risk_Factor_Total \n in Victorial 2015')
plt.show()
# find the area with extremel hign number of people died from avoidable death -
# - by extracting the record with max Avoidable_Death_Total
BigDF.sort_values(['Avoidable_Death_Total']).head(1)
# find the area with extremel hign ratio
# - by extracting the record with max Ratio
BigDF.sort_values(['Ratio'], ascending = False).head(1)
# Scatter plot
plt.scatter(BigDF['Avoidable_Death_Total'], BigDF['Health_Risk_Factor_Total'], color='red')
plt.title("Total Avoidable Death per 100,000\n VS. Population have health risk factor per 100")
plt.ylabel("Population have health risk factor per 100")
plt.xlabel("Total Avoidable Death per 100,000")
# plot the linear regression line
Info = linregress(BigDF['Avoidable_Death_Total'], BigDF['Health_Risk_Factor_Total'])
LinearRegressionLinePlot(Info[0], Info[1])
plt.show()
print("Slope:%.2f," % (Info[0]), "Intercept:%.2f." % (Info[1]))
print("Pearson r is %.2f" % (BigDF['Avoidable_Death_Total'].corr(BigDF['Health_Risk_Factor_Total'])))
#compute the Pearson correlation matrix of features of avoidable death Against features of health risk factors
Corr_Matric_All = BigDF.iloc[:, 2:].corr()
Result = Corr_Matric_All[['diabetes_per100,000','cancer_per100,000',\
'colorectal_per100,000', 'pulmonary _per100,000',\
'cerebrovascular_per100,000']].loc\
[['FruitAdequateIntake_per100','obese_per100', 'RiskWaistMeasurement_per100',\
'LowExercise_per100', 'PsychologicalDistress_per100', 'Somker_per100', \
'RiskAlcoholConsumption_per100', 'HighBloodPressure_per100',
'Overweight_per100']]
# Display the correlation matrix
Result
# plot the heatmap of correlation matrix
ax = sns.heatmap(Result, cmap="bwr", square = True)
ax.xaxis.tick_top()
plt.xticks(rotation = 90)
ax.set_title('Pearson Correlation about\n Avoidable Death against Health Risk Factor',\
rotation='horizontal',x=0.5 ,y= -0.22)
plt.show()
BigDF = BigDF.drop(columns = ['RiskAlcoholConsumption_per100', 'HighBloodPressure_per100', 'Overweight_per100'])
df_new_Health_Risk_Factor = df_new_Health_Risk_Factor.drop(columns = ['RiskAlcoholConsumption_per100', 'HighBloodPressure_per100', 'Overweight_per100'])
print(df_new_Health_Risk_Factor.keys())
#compute the Pearson correlation matrix of features of avoidable death Against features of health risk factors
Corr_Matric_All = BigDF.iloc[:, 2:].corr()
Result = Corr_Matric_All[['diabetes_per100,000','cancer_per100,000',\
'colorectal_per100,000', 'pulmonary _per100,000',\
'cerebrovascular_per100,000']].loc\
[['FruitAdequateIntake_per100','obese_per100', 'RiskWaistMeasurement_per100',\
'LowExercise_per100', 'PsychologicalDistress_per100', 'Somker_per100']]
# Display the correlation matrix
Result
# plot the heatmap of correlation matrix
ax = sns.heatmap(Result, cmap="bwr", square = True)
ax.xaxis.tick_top()
plt.xticks(rotation = 90)
ax.set_title('Pearson Correlation about\n Avoidable Death against Health Risk Factor',\
rotation='horizontal',x=0.5 ,y= -0.22)
plt.show()
# Bar Chart for Avoidable Death Causes for each area
df1 = df_new_Avoidable_Death.sort_values(by = "Avoidable_Death_Total", ascending = False)
ax = df1.iloc[:, :5].plot(kind="bar", stacked=True, figsize = (20,10))
ax.set_xticklabels(BigDF['lga_name'])
ax.set_title("Bar Chart for Avoidable Death Causes for each area")
# Bar Chart for Health Risk Factores for each area
df2 = df_new_Health_Risk_Factor.sort_values(by = "Health_Risk_Factor_Total", ascending = False)
ax2 = df2.iloc[:,:6].plot(kind="bar", stacked=True, figsize = (20,10))
ax2.set_xticklabels(BigDF['lga_name'])
ax2.set_title("# Bar Chart for Avoidable Death Causes for each area")
plt.show()
df1 = df_new_Avoidable_Death.sort_values(by = "Avoidable_Death_Total", ascending = False)
df2 = df_new_Health_Risk_Factor.sort_values(by = "Health_Risk_Factor_Total", ascending = False)
Key_df1 = df1.keys()
Key_df2 = df2.keys()
ax = plt.pie(df1.sum().iloc[:5], labels = Key_df1[:5], autopct = '%1.2f%%')
plt.title("Avoidable Death in Victoria in 2015")
plt.show(ax)
ax2 =plt.pie(df2.sum().iloc[:6], labels = Key_df2[:6], autopct = '%1.2f%%')
plt.title("Health Risk Factor in Victorial in 2015")
plt.show(ax2)
import numpy as np
import math,random
from scipy.spatial.distance import pdist, squareform
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
def VAT(R):
"""
VAT algorithm adapted from matlab version:
http://www.ece.mtu.edu/~thavens/code/VAT.m
Args:
R (n*n double): Dissimilarity data input
R (n*D double): vector input (R is converted to sq. Euclidean distance)
Returns:
RV (n*n double): VAT-reordered dissimilarity data
C (n int): Connection indexes of MST in [0,n)
I (n int): Reordered indexes of R, the input data in [0,n)
"""
R = np.array(R)
N, M = R.shape
if N != M:
R = squareform(pdist(R))
J = list(range(0, N))
y = np.max(R, axis=0)
i = np.argmax(R, axis=0)
j = np.argmax(y)
y = np.max(y)
I = i[j]
del J[I]
y = np.min(R[I,J], axis=0)
j = np.argmin(R[I,J], axis=0)
I = [I, J[j]]
J = [e for e in J if e != J[j]]
C = [1,1]
for r in range(2, N-1):
y = np.min(R[I,:][:,J], axis=0)
i = np.argmin(R[I,:][:,J], axis=0)
j = np.argmin(y)
y = np.min(y)
I.extend([J[j]])
J = [e for e in J if e != J[j]]
C.extend([i[j]])
y = np.min(R[I,:][:,J], axis=0)
i = np.argmin(R[I,:][:,J], axis=0)
I.extend(J)
C.extend(i)
RI = list(range(N))
for idx, val in enumerate(I):
RI[val] = idx
RV = R[I,:][:,I]
return RV.tolist(), C, I
df3 = pd.DataFrame({'Avoidabel_Death_Total': BigDF['Avoidable_Death_Total']})
sns.heatmap(df3,cmap='viridis',xticklabels=True,yticklabels=False)
plt.show()
####Visualise the dissimilarity matrix for Iris using a heatmap (without applying VAT)####
df4=df3.copy().as_matrix()
np.random.shuffle(df4) ####randomise the order of rows (objects)
sq = squareform(pdist(df4)) ###commpute the dissimilarity matrix
ax=sns.heatmap(sq,cmap='viridis',xticklabels=False,yticklabels=False)
ax.set(xlabel='Objects', ylabel='Objects')
plt.show()
#####Apply VAT Algorithm to Iris dataset and visualise using heatmap########
RV, C, I = VAT(df3)
x=sns.heatmap(RV,cmap='viridis',xticklabels=False,yticklabels=False)
x.set(xlabel='Objects', ylabel='Objects')
plt.show()
df_new_Avoidable_Death['Avoidable_Death_Total'] = BigDF['Avoidable_Death_Total']
df_new_Avoidable_Death['lga_code'] = BigDF['lga_code']
df_new_Avoidable_Death['lga_name'] = BigDF['lga_name']
df_new_Avoidable_Death.to_csv('new_avoidable.csv', sep = ',')
Avoidable_Death_Total = BigDF["Avoidable_Death_Total"]
# Function for scatter ploting and compute pearson correlation as well as linear regression
# @Argument: s1(Series) s2(Series) health_risk_factor_name(string)
# Return: None
def Scatter_and_Pearson_Corr(s2, s1, health_risk_factor_name):
# Scatter plot
plt.scatter(s1, s2, color='red')
plt.title("Total Avoidable Death per 100,000\n VS. "+ health_risk_factor_name)
plt.ylabel("Population "+ health_risk_factor_name)
plt.xlabel("Total Avoidable Death per 100,000")
# plot the linear regression line
Info = linregress(s1, s2)
LinearRegressionLinePlot(Info[0], Info[1])
plt.show()
print("Slope:%.2f," % (Info[0]), "Intercept:%.2f." % (Info[1]))
print("Pearson r is %.2f" % (s1.corr(s2)))
Scatter_and_Pearson_Corr(Avoidable_Death_Total, BigDF['FruitAdequateIntake_per100'],\
"FruitAdequateIntake_per100")
Scatter_and_Pearson_Corr(Avoidable_Death_Total, BigDF['obese_per100'],\
"obese_per100")
Scatter_and_Pearson_Corr(Avoidable_Death_Total, BigDF['RiskWaistMeasurement_per100'],\
"RiskWaistMeasurement_per100")
Scatter_and_Pearson_Corr(Avoidable_Death_Total, BigDF['LowExercise_per100'],\
"LowExercise_per100")
Scatter_and_Pearson_Corr(Avoidable_Death_Total, BigDF['PsychologicalDistress_per100'],\
"PsychologicalDistress_per100")
Scatter_and_Pearson_Corr(Avoidable_Death_Total, BigDF['Somker_per100'],\
"Somker_per100")
# -
print(df_new_Health_Risk_Factor.keys())
print(df_new_Avoidable_Death.keys())
| phase3/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# So the last five minutes were spent on writing this simple python script that takes any exception as input and searches for a solution on Stack Overflow.
# It was inspired by [this joke on reddit](https://www.reddit.com/r/ProgrammerHumor/comments/45oyre/the_ultimate_php_exception_handler).
# Anyway, the code is very simple:
# +
import webbrowser
def handle(err, url='http://stackoverflow.com'):
"""Open browser and search for exception on StackOverflow"""
err = str(err) # conver Exception object to string
url += '/search?q=python+' + err.replace(' ', '+') # join search string
webbrowser.open(url)
# -
# Now an exception can be treated like this:
try:
x = 1 / 0
except Exception as e: # note the Python 3 syntax
handle(e)
# The function will construct a search query string and open the resulting url in a new tab of the default browser.
| _blog/_notebooks/2016-02-25-ultimate-exception-handler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''base'': conda)'
# name: python37464bitbaseconda0d4e73dbffd14058a7eacf93a44c56a7
# ---
# # Classificação de Vetores de Caracteríticas de Janelas
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
import os
# +
PATH = os.getcwd().replace('Notebooks','')
eeg = np.load(PATH+'vars/feature_windows_eeg.npy')
ecg = np.load(PATH+'vars/feature_windows_ecg.npy')
emg = np.load(PATH+'vars/feature_windows_emg.npy')
# -
# ## Funções Auxiliares
# +
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix, auc, roc_curve
from sklearn import preprocessing
def org(data):
le = preprocessing.LabelEncoder()
X = data[:,:-1]
y = le.fit(data[:,-1]).transform(data[:,-1])
return X,y
def conf_matrix(y, y_pred):
m = confusion_matrix(y, y_pred)
return m.astype('float')/m.sum(axis=1)[:, np.newaxis]
def plot_cm(cm):
con_df = pd.DataFrame(data=cm, columns=['No', 'Yes'])
sns.heatmap(con_df, annot=True, cmap=plt.cm.Blues)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def metrics(cm):
VP, FN, FP, VN = cm.ravel()
names = ["ACU: ","SENS: ","ESP: "]
return np.array([np.round(100*(VP+VN)/cm.sum(),decimals=4),
np.round((100*VP/(VP+FN)), decimals=4),
np.round((100*VN/(VN+FP)), decimals=4)])
# print(names[0],
# np.round(100*(VP+VN)/cm.sum(),
# decimals=4))
# print(names[1],
# np.round((100*VP/(VP+FN)), decimals=4))
# print(names[2],
# np.round((100*VN/(VN+FP)), decimals=4))
# -
# ## Classificadores
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
def KNN(X,y):
knn_ = KNeighborsClassifier(n_neighbors=5)
y_pred = cross_val_predict(knn_, X, y, cv=10)
# y_prob = cross_val_predict(knn_, X, y, cv=10, method='predict_proba')
cm = conf_matrix(y, y_pred)
return metrics(cm)
def svm_rbf(X,y):
svc_ = SVC(kernel='rbf',probability=True)
y_pred = cross_val_predict(svc_, X, y, cv=10)
# y_prob = cross_val_predict(svc_, X, y, cv=10,method='predict_proba')
cm = conf_matrix(y, y_pred)
return metrics(cm)
def svm_poly(X,y):
svc_ = SVC(kernel='poly',probability=True)
y_pred = cross_val_predict(svc_, X, y, cv=10)
# y_prob = cross_val_predict(svc_, X, y, cv=10,method='predict_proba')
cm = conf_matrix(y, y_pred)
return metrics(cm)
def nb(X,y):
nb_ = GaussianNB()
y_pred = cross_val_predict(nb_, X, y, cv=10)
# y_prob = cross_val_predict(nb_, .X, .y, cv=10,method='predict_proba')
cm = conf_matrix(y, y_pred)
return metrics(cm)
# return roc_curve(y,y_prob[:,1])
def lda(X,y):
lda_ = LinearDiscriminantAnalysis()
y_pred = cross_val_predict(lda_, X, y, cv=10)
# y_prob = cross_val_predict(lda_, .X, .y, cv=10,method='predict_proba')
cm = conf_matrix(y, y_pred)
return metrics(cm)
# return roc_curve(y,y_prob[:,1])
def qda(X,y):
qda_ = QuadraticDiscriminantAnalysis()
y_pred = cross_val_predict(qda_, X, y, cv=10)
# y_prob = cross_val_predict(qda_, self.X, self.y, cv=10,method='predict_proba')
cm = conf_matrix(y, y_pred)
return metrics(cm)
# return roc_curve(y,y_prob[:,1])
# -
# ## Classificação EEG
# +
X, y = org(eeg)
met = []
index_class = ["knn","svm_rbf","svm_poly","nb","lda","qda"]
col_metrics = ['ac','sens','esp']
met.append(KNN(X,y))
met.append(svm_rbf(X,y))
met.append(svm_poly(X,y))
met.append(nb(X,y))
met.append(lda(X,y))
met.append(qda(X,y))
pd.DataFrame(data=np.array(met),columns=col_metrics,index=index_class)
# -
# ## Classficação ECG
# +
X, y = org(ecg)
met = []
index_class = ["knn","svm_rbf","svm_poly","nb","lda","qda"]
col_metrics = ['ac','sens','esp']
met.append(KNN(X,y))
met.append(svm_rbf(X,y))
met.append(svm_poly(X,y))
met.append(nb(X,y))
met.append(lda(X,y))
met.append(qda(X,y))
pd.DataFrame(data=np.array(met),columns=col_metrics,index=index_class)
# -
# ## Classficação EMG
# +
X, y = org(emg)
met = []
index_class = ["knn","svm_rbf","svm_poly","nb","lda","qda"]
col_metrics = ['ac','sens','esp']
met.append(KNN(X,y))
met.append(svm_rbf(X,y))
met.append(svm_poly(X,y))
met.append(nb(X,y))
met.append(lda(X,y))
met.append(qda(X,y))
pd.DataFrame(data=np.array(met),columns=col_metrics,index=index_class)
# -
# # Combinando Sinais
# ## EEG - ECG - EMG
# +
aux = np.hstack((eeg[:,:-1],ecg[:,:-1]))
aux2 = np.hstack((aux,emg))
X, y = org(aux2)
met = []
index_class = ["knn","svm_rbf","svm_poly","nb","lda","qda"]
col_metrics = ['ac','sens','esp']
met.append(KNN(X,y))
met.append(svm_rbf(X,y))
met.append(svm_poly(X,y))
met.append(nb(X,y))
met.append(lda(X,y))
met.append(qda(X,y))
pd.DataFrame(data=np.array(met),columns=col_metrics,index=index_class)
# -
# ## ECG - EMG
# +
aux = np.hstack((ecg[:,:-1],emg))
# aux2 = np.hstack((aux,emg))
X, y = org(aux)
met = []
index_class = ["knn","svm_rbf","svm_poly","nb","lda","qda"]
col_metrics = ['ac','sens','esp']
met.append(KNN(X,y))
met.append(svm_rbf(X,y))
met.append(svm_poly(X,y))
met.append(nb(X,y))
met.append(lda(X,y))
met.append(qda(X,y))
pd.DataFrame(data=np.array(met),columns=col_metrics,index=index_class)
# -
# ## EEG - ECg
# +
aux = np.hstack((eeg[:,:-1],ecg))
# aux2 = np.hstack((aux,emg))
X, y = org(aux)
met = []
index_class = ["knn","svm_rbf","svm_poly","nb","lda","qda"]
col_metrics = ['ac','sens','esp']
met.append(KNN(X,y))
met.append(svm_rbf(X,y))
met.append(svm_poly(X,y))
met.append(nb(X,y))
met.append(lda(X,y))
met.append(qda(X,y))
pd.DataFrame(data=np.array(met),columns=col_metrics,index=index_class)
# -
# ## EEG - EMG
# +
aux = np.hstack((eeg[:,:-1],emg))
# aux2 = np.hstack((aux,emg))
X, y = org(aux)
met = []
index_class = ["knn","svm_rbf","svm_poly","nb","lda","qda"]
col_metrics = ['ac','sens','esp']
met.append(KNN(X,y))
met.append(svm_rbf(X,y))
met.append(svm_poly(X,y))
met.append(nb(X,y))
met.append(lda(X,y))
met.append(qda(X,y))
pd.DataFrame(data=np.array(met),columns=col_metrics,index=index_class)
# -
emg.shape
| Notebooks/classifiers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 32-bit
# language: python
# name: python38332bit9c047e7842d149818f878efc40058254
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
warnings.warn("this will not show")
df = pd.read_json("scout_car.json", lines=True)
pd.options.display.max_rows = 100
pd.options.display.max_columns = 100
df["Comfort&Convenience"] = df["\nComfort & Convenience\n"]
df["Entertainment&Media"] = df["\nEntertainment & Media\n"]
df["Extras"] = df["\nExtras\n"]
df["Safety&Security"] = df["\nSafety & Security\n"]
drop_columns = ["kW","prev_owner","Next Inspection","null","Body","Full Service","Non-smoking Vehicle","Electricity consumption",
"Last Service Date","Other Fuel Types", "Availability","Last Timing Belt Service Date", "Available from",
"\nComfort & Convenience\n","\nEntertainment & Media\n","\nExtras\n","\nSafety & Security\n","description",
"Body Color Original","Model Code",'short_description','url','registration','Emission Label','Country version',
'Offer Number']
df.drop(drop_columns, axis = 1, inplace = True)
df.shape
df.isnull().sum()/df.shape[0]*100
# def show_nans(df, limit):
# missing = df.isnull().sum()*100/df.shape[0]
# return missing.loc[lambda x : x > limit]
# ## make_model
# ## body_type
# ## price
# ## vat
# ## km
df["km"] = df.km.str.replace(',','').str.extract('(\d{1,8})')[0].astype('float')
# ## Previous Owners
df["Previous_Owners"] = [item[0] if type(item) == list else item for item in df["Previous Owners"]]
df["Previous_Owners"] = df["Previous_Owners"].str.strip("\n").astype('float')
df["Previous_Owners"].value_counts(dropna=False)
df.drop("Previous Owners", axis=1, inplace=True)
# ## hp
df["hp_kW"] = df.hp.str.extract('(\d{1,4})')[0].astype('float')
# +
#Alternatif yöntem
#oto.replace({"hp" : {" kW" : "", "-" : "0"}}, regex = True, inplace = True)
#oto["hp_kw"] = pd.to_numeric(oto.hp)
# -
df.drop('hp', axis=1, inplace=True)
# ## Type
df["Type"] = df.Type.str[1]
df['Type'].value_counts(dropna=False)
# ## Warranty
df["Warranty"] = df.Warranty.apply(lambda x : x[0] if type(x)==list else x)
df["Warranty"] = df.Warranty.str.strip("\n").str.extract('(\d{1,2})')[0].astype("float")
# ## Inspection new
df["Inspection_new"] = [item[0] if type(item) == list else item for item in df["Inspection new"]]
df["Inspection_new"] = df["Inspection_new"].str.strip("\n")
df.drop("Inspection new", axis=1, inplace=True)
# ## Make
df["Make"] = df.Make.str.strip('\n')
df.drop("Make", axis=1, inplace=True)
# ## Model
df["Model"] = df.Model.str[1]
df.drop("Model", axis=1, inplace=True)
# ## First Registration & age
df['First Registration'] = df['First Registration'].str[1].astype('float')
df['age'] = 2019 - df['First Registration']
df.drop("First Registration", axis=1, inplace=True)
# ## Body Color
df['Body_Color'] = df['Body Color'].str[1]
df.drop("Body Color", axis=1, inplace=True)
# ## Paint Type
df['Paint_Type'] = df['Paint Type'].str[0].str.strip('\n')
df.drop("Paint Type", axis=1, inplace=True)
# ## Upholstery
df["Upholstery"] = [item[0] if type(item) == list else item for item in df.Upholstery]
df["Upholstery"] = df.Upholstery.str.strip("\n").str.split(", ")
df["Upholstery"].value_counts(dropna=False)
u_type = ["Cloth",'Part leather', 'Full leather','Velour', 'alcantara']
df["Upholstery_type"] = df["Upholstery"].apply(lambda x : x[0] if type(x) == list and x[0] in u_type else np.nan)
df["Upholstery_type"].value_counts(dropna=False)
df["Upholstery_type"] = df["Upholstery_type"].fillna("Other")
# +
color = ['Black', 'Grey', 'Brown', 'Beige', 'White', 'Blue', 'Red', 'Yellow', 'Orange']
def finder(x):
if type(x) == list and len(x) == 2:
return x[1]
elif type(x) == list and x[0] in color:
return x[0]
else:
return np.nan
df['Upholstery_color'] = df.Upholstery.apply(finder)
df['Upholstery_color'] = df['Upholstery_color'].fillna('Other')
# -
df["Upholstery_color"].value_counts(dropna=False)
df.drop("Upholstery", axis=1, inplace=True)
# ## Nr. of Doors
df['Nr_of_Doors'] = df['Nr. of Doors'].str[0].str.strip('\n').astype('float')
df.drop("Nr. of Doors", axis=1, inplace=True)
# ## Nr. of Seats
df['Nr_of_Seats'] = df['Nr. of Seats'].str[0].str.strip('\n').astype('float')
df.drop("Nr. of Seats", axis=1, inplace=True)
# ## Gearing Type
df['Gearing_Type'] = df['Gearing Type'].str[1]
df.drop("Gearing Type", axis=1, inplace=True)
# ## Displacement
df["Displacement"] = df.Displacement.str[0].str.strip('\n').str.replace(',','').str.extract('(\d{1,5})')[0].astype("float")
# +
# Alternatif yöntem (\n) ları sildikten sonrası için
# df.replace({"Displacement_cc" : {"," : "", " " : "", "cc" : ""}}, regex = True, inplace = True)
# -
df["Displacement_cc"] = df["Displacement"]
df.drop("Displacement", axis=1, inplace=True)
# ## Cylinders
df["Cylinders"] = df.Cylinders.str[0].str.strip('\n').astype("float")
# ## Weight
df["Weight_kg"] = df.Weight.str[0].str.strip('\n').str.replace(',','').str.extract('(\d{1,6})')[0].astype('float')
df.drop("Weight", axis=1, inplace=True)
# ## Drive chain
df['Drive_chain'] = df['Drive chain'].str[0].str.strip('\n')
df.drop("Drive chain", axis=1, inplace=True)
# ## Fuel
df["Fuel"] = df.Fuel.str[1].str.split("/").str[0].str.strip()
df["Fuel"] = df.Fuel.str.split("(")
df["Fuel"] = df.Fuel.apply(lambda x : x[0] if "Particulate Filter)" in x else x[0]).str.strip()
benzine = ["Gasoline", "Super 95","Regular","Super E10 95","Super Plus 98","Super Plus E10 98", "Others"]
lpg = ["LPG","Liquid petroleum gas", "CNG", "Biogas", "Domestic gas H"]
def fueltype(x):
if x in benzine:
return "Benzine"
elif x in lpg:
return "LPG/CNG"
else:
return x
df["Fuel"] = df.Fuel.apply(fueltype)
df.Fuel.value_counts(dropna=False)
# +
#Farklı bir yöntem
#oto["fuel_new"] = oto.Fuel.str[1]
#diesel_bool = oto["fuel_new"].str.contains("diesel", case = False, regex = True)
#lpg_bool = oto["fuel_new"].str.contains("lpg|cng|bio|domestic|electric", case = False, regex = True)
#oto.loc[diesel_bool, "fuel_new"] = "Diesel"
#oto.loc[lpg_bool, "fuel_new"] = "LPG/CNG"
#benz = list(oto.fuel_new.loc[lambda x : x != "Diesel"][lambda x : x != "LPG/CNG"].index)
#oto.fuel_new.iloc[benz] = "Benzine"
# -
# ## Consumption
df["Consumption"].value_counts(dropna=False).head
# +
def parser1(x):
if type(x) == float:
return np.nan
elif type(x[0]) == list:
if x[0] != []:
return x[0][0]
else:
return np.nan
else:
return x[1]
def parser2(x):
if type(x) == float:
return np.nan
elif type(x[0]) == list:
if x[1] != []:
return x[1][0]
else:
return np.nan
elif x[3].endswith(')'):
return x[3]
else:
return np.nan
def parser3(x):
if type(x) == float:
return np.nan
elif type(x[0]) == list:
if x[2] != []:
return x[2][0]
else:
return np.nan
elif type(x[0]) != list and x[3].endswith(')'):
return x[5]
else:
return np.nan
# -
df['cons_comb'] = df.Consumption.apply(parser1).str.extract('(\d{1,2}.\d|\d{1,3})')[0].astype("float")
df['cons_city'] = df.Consumption.apply(parser2).str.extract('(\d{1,2}.\d|\d{1,3})')[0].astype("float")
df['cons_country'] = df.Consumption.apply(parser3).str.extract('(\d{1,2}.\d|\d{1,3})')[0].astype("float")
df.drop('Consumption', axis=1,inplace=True)
# ## CO2 Emission
df["CO2_Emission"] = [item[0] if type(item) == list else item for item in df["CO2 Emission"]]
df["CO2_Emission"] = df["CO2_Emission"].str.strip("\n").str.rstrip(" g CO2/km (comb)").str.replace(",", ".").astype("float")
df.drop("CO2 Emission", axis=1, inplace=True)
# ## Emission Class
df["Emission_Class"] = [item[0] if type(item) == list else item for item in df["Emission Class"]]
df["Emission_Class"] = df["Emission_Class"].str.strip("\n")
df["Emission_Class"].value_counts(dropna=False)
df.replace({"Emission_Class" : {"Euro 6d-TEMP":"Euro 6", "Euro 6c":"Euro 6", "Euro 6d":"Euro 6"}}, regex = True, inplace = True)
df["Emission_Class"].value_counts(dropna=False)
df.drop("Emission Class", axis=1, inplace=True)
# ## Gears
df["Gears"] = df.Gears.str[0].str.strip('\n')
# ## Comfort & Convenience
df["Comfort&Convenience"] = [",".join(item) if type(item) == list else item for item in df["Comfort&Convenience"]]
# ## Entertainment & Media
df["Entertainment&Media"] = [",".join(item) if type(item) == list else item for item in df["Entertainment&Media"]]
# ## Extras
df["Extras"] = [",".join(item) if type(item) == list else item for item in df["Extras"]]
# ## Safety & Security
df["Safety&Security"] = [",".join(item) if type(item) == list else item for item in df["Safety&Security"]]
df.shape
df.info()
df.to_csv("clean_scout_20200923.csv", index=False)
| 03-DAwithPython_CarPricePrediction_EDA_project/EDA_scout_car_phase_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Author:<NAME><br>
# Machine Learnig
import numpy as np
from io import StringIO
import pandas as pd
data = '''\
Sunny,Warm,Normal,Strong,Warm,Same,1
Sunny,Warm,High,Strong,Warm,Same,1
Rainy,Cold,High,Strong,Warm,Change,0
Sunny,Warm,High,Strong,Cool,Change,1
'''
df = pd.read_csv(StringIO(data) , header=None)
print(df)
df.columns = ['Sky','AirTemp','Humidity','Wind','Water','Forecast','EnjoySport']
df
df['EnjoySport'].dtypes
def find_s(X,target):
my_hypothesis =None
for idx,val in enumerate(target):
if val==1:
my_hypothesis = X[idx].copy()
break
print('Selected hypothesis from training example:\t',my_hypothesis)
#we got the hypothesis lets generalize it using other training example
print('Generalizing hypothesis ...')
for idx,val in enumerate(X):
if target[idx]==1:
for i in range(len(my_hypothesis)):
if my_hypothesis[i]!=val[i]:
my_hypothesis[i]='?'
return my_hypothesis
target = df['EnjoySport']
X = df.drop(['EnjoySport'] , axis=1)
print(target,'\n\n\n',X)
Xarr = np.array(X)
Yarr = np.array(target)
print(Xarr,Yarr)
findS_hypothesis = find_s(X.values,target.values)
findS_hypothesis
test_data = '''\
'Sky','AirTemp','Humidity','Wind','Water','Forecast','EnjoySport'
Sunny,Warm,Normal,Strong,Cool,Change,1
'''
test_df = (pd.read_csv(StringIO(test_data)))
print(test_df)
x_test = np.array(['Sunny','Warm','Normal','Strong','Cool','Change'])
def predict(x , hypothesis):
count_ = 0
for i in range(len(hypothesis)):
if x[i]==hypothesis[i]:
count_ += 1
ratio_ = count_/len(hypothesis)
if ratio_ >=0.5 :
return 1
else :
return 0
predict(x_test,findS_hypothesis)
| 0. Tom Mitchell Exercise Solutions/find-Sv2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# A *Support Vector Machine* (SVM) is a powerful and versatile Machine Learning model, capable of performing linear or nonlinear classification, regression, and even outlier detection.
# You can think of an SVM classifier as fitting the widest possible street (represented by the parallel dashed lines) between the classes. This is called *large margin classification*.
# Instances on the margin lines are called support vectors.
# If we strictly impose that all instances must be off the street and on the right side, this is called *hard margin classification*.
# Hard margin classification is susceptible to outliers. To avoid these issues, use a more flexible model. The objective is to find a good balance between keeping the street as large as possible and limiting the margin violations (i.e., instances that end up in the middle of the street or even on the wrong side). This is called *soft margin classification*.
# For SVM, mathematical technique called the *kernel trick* can be applied. The kernel trick makes it possible to get the same result as if you had added many polynomial features, even with very high-degree polynomials, without actually having to add them.
# Another technique to tackle nonlinear problems is to add features computed using a *similarity function*, which measures how much each instance resembles a particular *landmark*.
# A *radial basis function* (RBF) is a real-valued function whose value depends only on the distance between the input and some fixed point, either the origin, or some other fixed point called center.
# Gaussian RBF
#
# $\phi_{\gamma}(\textbf{x}, l) = \text{exp}(-\gamma \parallel \textbf{x} - l \parallel^{2})$
# Other kernels exist but are used much more rarely. Some kernels are specialized for specific data structures. *String kernels* are sometimes used when classifying text documents or DNA sequences (e.g., using the *string subsequence kernel* or kernels based on the *Levenshtein distance*).
# Ab instance has *sparse features* if it has few nonzero features
# Adding more training instances within the margin does not affect SVM regression predictions; thus, the model is said to be $\epsilon$-*insensitive*.
# Linear SVM classifier prediction
#
# $\hat y = 0 \quad \text{if} \quad \textbf{w}^{T}\textbf{x} + b < 0$
#
# Otherwise $\hat y$ is 1.
# To get a large margin, we need to minimize $\parallel \textbf{w} \parallel$
# $t^{(i)} = -1$ if $y^{(i)} = 0$. Otherwise it is 1.
# Hard margin linear SVM classifier objective
#
# minimize $\frac{1}{2}\textbf{w}^{T}\textbf{w}$
#
# subject to $t^{(i)}(\textbf{w}^{T}\textbf{x}^{(i)} + b ) \geq 1$ for $i = 1, 2, \cdots, m$
# To get the soft margin objective, we need to introduce a *slack variable* $\zeta^{(i)} \geq 0$ for each instance: $\zeta^{(i)} \geq 0$ measures how much the instance is allowed to violate the margin.
# Soft margin linear SVM classifier objective
#
# minimize $\frac{1}{2}\textbf{w}^{T}\textbf{w} + C \sum\limits_{i = 1}^{m} \zeta^{(i)}$
#
# subject to $t^{(i)}(\textbf{w}^{T}\textbf{x}^{(i)} + b ) \geq 1 - \zeta^{(i)}$ and $\zeta^{(i)} \geq 0 $ for $i = 1, 2, \cdots, m$
# The hard margin and soft margin problems are both convex quadratic optimization problems with linear constraints. Such problems are known as *Quadratic Programming* (QP) problems (Look at pg 168 for further details).
# Given a constrained optimization problem, known as the *primal problem*, it is possible to express a different but closely related problem, called its *dual problem* (Look at pg 169 for further details).
# In Machine Learning, a *kernel* is a function capable of computing the dot product of some transformation, which is a function of two different vectors, based only on the original vectors, without having to compute (or even to know about) the transformation (Look at pg 169 through 172 for further details).
# Linear SVM classifier cost function
#
# $J(\textbf{w}, b) = \frac{1}{2}\textbf{w}^{T}\textbf{w} + C \sum\limits_{i = 1}^{m} max(0, 1 - t^{(i)}(\textbf{w}^{T}\textbf{x}^{(i)} + b ))$
# The function $max(0, 1 – t)$ is called the *hinge loss* function.
| ch05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Sparkmagic (PySpark)
# language: ''
# name: pysparkkernel
# ---
# # AWS Hail on EMR Bokeh Plotting Example
#
# This is taken from the [Hail Plotting Tutorial](https://hail.is/docs/0.2/tutorials/08-plotting.html) with adjustments for use with SageMaker Notebook instances and EMR.
# ### List EMR Master Nodes
#
# `~/SageMaker/bin/list-clusters` will output the IP of each master node in your account and check Livy connectivity.
# + language="bash"
# ~/SageMaker/bin/list-clusters
# -
# Use the Livy Endpoint above and start your session name `-s`, language `-l python`, the livy endpoint `-u`, and authentication type `-t`.
# %load_ext sparkmagic.magics
# %spark add -s aperry -l python -u http://172.21.20.228:8998 -t None
# ### Plot Retrieval from Master
#
# Set a local variable for the EMR Master IP. http requests will be sent to the Master to pull the remote Hail plots back to this notebook server. Notebook plots will be stored in a `plots` directory along side to your notebook. If the plots directory does not exist, it will be created.
#
# Once the plots have been pulled locally they will be shown inline.
# %%local
emrMasterIp = '172.21.20.228'
# +
# %%local
from IPython.display import display, HTML
import requests, os
def displayRemoteHtml(remoteFile):
url = 'http://' + emrMasterIp + remoteFile
req = requests.get(url)
# Strip leading /
localPath = remoteFile[1:]
# Drop the filename and create local target directory
targetPath = "/".join(localPath.split('/')[0:-1])
if not os.path.exists(targetPath):
os.makedirs(targetPath, exist_ok=True)
open(localPath, 'wb').write(req.content)
html = HTML(filename=localPath)
display(html)
# -
# ## Plotting Tutorial
#
# The Hail plot module allows for easy plotting of data. This notebook contains examples of how to use the plotting functions in this module, many of which can also be found in the first tutorial.
# +
# If using Hail version < 0.2.24, uncomment the following
# sc.addPyFile('/opt/hail/hail-python.zip')
import hail as hl
hl.init(sc)
from bokeh.io import show, output_notebook
from bokeh.layouts import gridplot
output_notebook()
# +
hl.utils.get_1kg('data/')
mt = hl.read_matrix_table('data/1kg.mt')
table = (hl.import_table('data/1kg_annotations.txt', impute=True)
.key_by('Sample'))
mt = mt.annotate_cols(**table[mt.s])
mt = hl.sample_qc(mt)
mt.describe()
# -
# ### Histogram
#
# The `histogram()` method takes as an argument an aggregated hist expression, as well as optional arguments for the legend and title of the plot.
# +
from bokeh.plotting import figure, output_file, save
dp_hist = mt.aggregate_entries(hl.expr.aggregators.hist(mt.DP, 0, 30, 30))
p = hl.plot.histogram(dp_hist, legend='DP', title='DP Histogram')
output_file("/plots/histogram3.html")
save(p)
# -
# %%local
displayRemoteHtml('/plots/histogram3.html')
# This method, like all Hail plotting methods, also allows us to pass in fields of our data set directly. Choosing not to specify the `range` and `bins` arguments would result in a range being computed based on the largest and smallest values in the dataset and a default bins value of 50.
# ### Cumulative Histogram
#
# The `cumulative_histogram()` method works in a similar way to `histogram()`.
p = hl.plot.cumulative_histogram(mt.DP, range=(0,30), bins=30)
output_file("/plots/cumulative_histogram.html")
save(p)
# %%local
displayRemoteHtml('/plots/cumulative_histogram.html')
# ### Scatter
#
# The `scatter()` method can also take in either Python types or Hail fields as arguments for x and y.
p = hl.plot.scatter(mt.sample_qc.dp_stats.mean, mt.sample_qc.call_rate, xlabel='Mean DP', ylabel='Call Rate')
output_file("/plots/scatter.html")
save(p)
# %%local
displayRemoteHtml('/plots/scatter.html')
# We can also pass in a Hail field as a `label` argument, which determines how to color the data points.
mt = mt.filter_cols((mt.sample_qc.dp_stats.mean >= 4) & (mt.sample_qc.call_rate >= 0.97))
ab = mt.AD[1] / hl.sum(mt.AD)
filter_condition_ab = ((mt.GT.is_hom_ref() & (ab <= 0.1)) |
(mt.GT.is_het() & (ab >= 0.25) & (ab <= 0.75)) |
(mt.GT.is_hom_var() & (ab >= 0.9)))
mt = mt.filter_entries(filter_condition_ab)
mt = hl.variant_qc(mt).cache()
common_mt = mt.filter_rows(mt.variant_qc.AF[1] > 0.01)
gwas = hl.linear_regression_rows(y=common_mt.CaffeineConsumption, x=common_mt.GT.n_alt_alleles(), covariates=[1.0])
pca_eigenvalues, pca_scores, _ = hl.hwe_normalized_pca(common_mt.GT)
p = hl.plot.scatter(pca_scores.scores[0], pca_scores.scores[1],
label=common_mt.cols()[pca_scores.s].SuperPopulation,
title='PCA', xlabel='PC1', ylabel='PC2', collect_all=True)
output_file("/plots/scatter2.html")
save(p)
# %%local
displayRemoteHtml('/plots/scatter2.html')
# Hail's downsample aggregator is incorporated into the `scatter()`, `qq()`, and `manhattan()` functions. The `collect_all` parameter tells the plot function whether to collect all values or downsample. Choosing not to set this parameter results in downsampling.
# +
p = hl.plot.scatter(pca_scores.scores[0], pca_scores.scores[1],
label=common_mt.cols()[pca_scores.s].SuperPopulation,
title='PCA', xlabel='PC1', ylabel='PC2', collect_all=True)
p2 = hl.plot.scatter(pca_scores.scores[0], pca_scores.scores[1],
label=common_mt.cols()[pca_scores.s].SuperPopulation,
title='PCA (downsampled)', xlabel='PC1', ylabel='PC2', collect_all=False, n_divisions=50)
# show(gridplot([p, p2], ncols=2, plot_width=400, plot_height=400))
grid = gridplot([p, p2], ncols=2, plot_width=400, plot_height=400)
output_file("/plots/gridplot.html")
save(grid)
# -
# %%local
displayRemoteHtml('/plots/gridplot.html')
# ### 2-D histogram
#
# For visualizing relationships between variables in large datasets (where scatter plots may be less informative since they highlight outliers), the `histogram_2d()` function will create a heatmap with the number of observations in each section of a 2-d grid based on two variables.
p = hl.plot.histogram2d(pca_scores.scores[0], pca_scores.scores[1])
output_file("/plots/2d-histogram.html")
save(p)
# %%local
displayRemoteHtml('/plots/2d-histogram.html')
# ### Q-Q (Quantile-Quantile)
#
# The `qq()` function requires either a Python type or a Hail field containing p-values to be plotted. This function also allows for downsampling.
# +
p = hl.plot.qq(gwas.p_value, collect_all=True)
p2 = hl.plot.qq(gwas.p_value, n_divisions=75)
# show(gridplot([p, p2], ncols=2, plot_width=400, plot_height=400))
qq = gridplot([p, p2], ncols=2, plot_width=400, plot_height=400)
output_file("/plots/qq.html")
save(qq)
# -
# %%local
displayRemoteHtml('/plots/qq.html')
# ### Manhattan
#
# The `manhattan()` function requires a Hail field containing p-values.
p = hl.plot.manhattan(gwas.p_value)
output_file("/plots/manhattan.html")
save(p)
# %%local
displayRemoteHtml('/plots/manhattan.html')
# We can also pass in a dictionary of fields that we would like to show up as we hover over a data point, and choose not to downsample if the dataset is relatively small.
hover_fields = dict([('alleles', gwas.alleles)])
p = hl.plot.manhattan(gwas.p_value, hover_fields=hover_fields, collect_all=True)
output_file("/plots/manhattan_hover.html")
save(p)
# %%local
displayRemoteHtml('/plots/manhattan_hover.html')
# Remove the Livy notebook session
# %spark cleanup
| jupyter/common-notebooks/hail-plotting-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# From http://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow/
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import math
from tqdm import tnrange, tqdm_notebook
# +
NHIDDEN = 24
STDEV = 0.5
KMIX = 24 # number of mixtures
NOUT = KMIX * 3 # pi, mu, stdev
NSAMPLE = 2400
# -
y_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
r_data = np.float32(np.random.normal(size=(NSAMPLE,1))) # random noise
x_data = np.float32(np.sin(0.75*y_data)*7.0+y_data*0.5+r_data*1.0)
plt.figure(figsize=(10,10))
plt.scatter(x_data, y_data)
plt.show()
# +
x = tf.placeholder(dtype=tf.float32, shape=[None,1], name="x")
y = tf.placeholder(dtype=tf.float32, shape=[None,1], name="y")
Wh = tf.Variable(tf.random_normal([1,NHIDDEN], stddev=STDEV, dtype=tf.float32))
bh = tf.Variable(tf.random_normal([1,NHIDDEN], stddev=STDEV, dtype=tf.float32))
Wo = tf.Variable(tf.random_normal([NHIDDEN,NOUT], stddev=STDEV, dtype=tf.float32))
bo = tf.Variable(tf.random_normal([1,NOUT], stddev=STDEV, dtype=tf.float32))
# -
hidden_layer = tf.nn.tanh(tf.matmul(x, Wh) + bh)
output = tf.matmul(hidden_layer,Wo) + bo
def get_mixture_coef(output):
out_pi = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
out_sigma = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
out_mu = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
out_pi, out_sigma, out_mu = tf.split(output, num_or_size_splits=3, axis=1)
max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)
out_pi = tf.subtract(out_pi, max_pi)
out_pi = tf.exp(out_pi)
normalize_pi = tf.reciprocal(tf.reduce_sum(out_pi, 1, keep_dims=True))
out_pi = tf.multiply(normalize_pi, out_pi)
out_sigma = tf.exp(out_sigma)
return out_pi, out_sigma, out_mu
out_pi, out_sigma, out_mu = get_mixture_coef(output)
# +
oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)
def tf_normal(y, mu, sigma):
result = tf.subtract(y, mu)
result = tf.multiply(result, tf.reciprocal(sigma))
result = -tf.square(result)/2
return tf.multiply(tf.exp(result),tf.reciprocal(sigma))*oneDivSqrtTwoPI
def get_lossfunc(out_pi, out_sigma, out_mu, y):
result = tf_normal(y, out_mu, out_sigma)
result = tf.multiply(result, out_pi)
result = tf.reduce_sum(result, 1, keep_dims=True)
result = -tf.log(result)
return tf.reduce_mean(result)
# -
lossfunc = get_lossfunc(out_pi, out_sigma, out_mu, y)
train_op = tf.train.AdamOptimizer().minimize(lossfunc)
# +
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
NEPOCH = 1000
loss = np.zeros(NEPOCH)
for i in tnrange(NEPOCH, desc='epoch #'):
if i == 0:
one_out_pi = tf.Tensor.eval(out_pi, feed_dict={x: x_data, y: y_data})
one_out_mu = tf.Tensor.eval(out_mu, feed_dict={x: x_data, y: y_data})
one_out_sg = tf.Tensor.eval(out_sigma, feed_dict={x: x_data, y: y_data})
sess.run(train_op,feed_dict={x: x_data, y: y_data})
loss[i] = sess.run(lossfunc, feed_dict={x: x_data, y: y_data})
# +
plt.figure(figsize=(10,10))
plt.plot(one_out_pi[0], label='pi')
plt.plot(one_out_mu[0], label='mu')
plt.plot(one_out_sg[0], label='sigma')
plt.legend()
plt.show()
# -
plt.figure(figsize=(15, 5))
plt.plot(np.arange(100, NEPOCH,1), loss[100:], 'r-')
plt.show()
x_test = np.float32(np.arange(-15,15,0.1))
NTEST = x_test.size
x_test = x_test.reshape(NTEST,1) # needs to be a matrix, not a vector
x_test.shape
plt.plot(x_test[:,0])
plt.show()
# +
def get_pi_idx(x, pdf):
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
print('error with sampling ensemble')
return -1
def generate_ensemble(out_pi, out_mu, out_sigma, M = 10):
NTEST = x_test.size
result = np.random.rand(NTEST, M) # initially random [0, 1]
rn = np.random.randn(NTEST, M) # normal random matrix (0.0, 1.0)
mu = 0
std = 0
idx = 0
# transforms result into random ensembles
for j in range(0, M):
for i in range(0, NTEST):
idx = get_pi_idx(result[i, j], out_pi[i])
mu = out_mu[i, idx]
std = out_sigma[i, idx]
result[i, j] = mu + rn[i, j]*std
return result
# +
out_pi_test, out_sigma_test, out_mu_test = sess.run(get_mixture_coef(output), feed_dict={x: x_test})
y_test = generate_ensemble(out_pi_test, out_mu_test, out_sigma_test)
# -
plt.figure(figsize=(10,10))
plt.plot(x_data,y_data,'ro', x_test,y_test,'bo',alpha=0.3)
plt.show()
| nb/otoro_blog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Publish a public visualization
#
# This example illustrate how to publish a public visualization
#
# Read more about Maps API Keys: https://carto.com/developers/auth-api/guides/types-of-API-Keys/
#
# > Note: CARTO Account credentials are needed to reproduce this example. https://carto.com/signup
# +
from cartoframes.auth import set_default_credentials
set_default_credentials('johnsmith', '<PASSWORD>')
# +
import geopandas
df = geopandas.read_file('../files/sustainable_palm_oil_production_mills.geojson')
df.head()
# +
from cartoframes.data import Dataset
ds = Dataset(df)
ds.upload(table_name='sustainable_palm_oil_production_mills', if_exists=Dataset.IF_EXISTS_REPLACE)
ds.update_dataset_info(privacy=Dataset.PRIVACY_PUBLIC)
# +
from cartoframes.viz import Map, Layer
map_viz = Map(Layer(ds))
map_viz.publish(name='sustainable_palm_oil_production_mills_map')
| examples/publish_and_share/publish_a_public_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
LARGE_VALUE = 100000000
def get_gini_score(branches, labels):
all_samples = [len(branch) for branch in branches]
nsamples = sum(all_samples)
gini_score = 0.0
for branch in branches:
branch_size = len(branch)
if branch_size == 0.0: continue
score = 0.0
for label in labels:
# calculate the number of samples
# that is labeled the given label
y = len(branch[branch[:, -1]==label])
# calculate its proportion to the branchesize
p = y / branch_size
score += (p**2)
gini_score += (1.0 - score) * (branch_size / nsamples)
return gini_score
# Returns two lists (left_set, right_set) of samples
# split the `samples` based on the feature at `feature_id` (col)
# the split_value is the splitting point
# We'll check later if this split is a good split
def try_potential_split(samples, split_value, feature_id):
left_branch = samples[samples[:, feature_id] < split_value]
right_branch = samples[samples[:, feature_id] >= split_value]
return left_branch, right_branch
def select_best_split(samples):
# The labels is contained at the last column of the numpy matrix
labels = samples[:, -1]
unique_labels = np.unique(labels)
nsamples = len(labels)
nfeatures = len(samples[-1, :]) - 1
best_feature_id = None
best_split_value = None
best_gini_score = LARGE_VALUE
best_two_branches = None
node = {}
for feature_id in range(nfeatures):
for sample in samples:
split_value = sample[feature_id]
two_branches = try_potential_split(samples, split_value, feature_id)
gini_score = get_gini_score(two_branches, unique_labels)
if gini_score < best_gini_score:
best_feature_id = feature_id
best_split_value = split_value
best_gini_score = gini_score
best_two_branches = two_branches
node['feature_id'] = best_feature_id
node['split_value'] = best_split_value
node['two_branches'] = best_two_branches
return node
# select the label given the branch of samples
# returns the most frequent label
def leaf_node_label(branch):
print("leafnode size:", len(branch[:, 0]))
return np.argmax(np.bincount([int(sample[-1]) for sample in branch]))
def recursive_build_tree(node, min_samples_leaf, min_samples_split, max_depth, depth):
left_branch, right_branch = node['two_branches']
del(node['two_branches'])
if len(left_branch) == 0 and len(right_branch) != 0:
label = leaf_node_label(right_branch)
node['left_label'] = label
node['right_label'] = label
return
if len(left_branch) != 0 and len(right_branch) == 0:
label = leaf_node_label(left_branch)
node['left_label'] = label
node['right_label'] = label
return
# left_branch and right_branch cannot be zero anymore
# at this point
if depth >= max_depth:
node['left_label'] = leaf_node_label(left_branch)
node['right_label'] = leaf_node_label(right_branch)
return
if len(left_branch) <= min_samples_leaf:
node['left_label'] = leaf_node_label(left_branch)
else:
if len(left_branch) > min_samples_split:
node['left_label'] = select_best_split(left_branch)
recursive_build_tree(node['left_label'], min_samples_leaf,
min_samples_split, max_depth, depth + 1)
if len(right_branch) <= min_samples_leaf:
node['right_label'] = leaf_node_label(right_branch)
else:
if len(left_branch) > min_samples_split:
node['right_label'] = select_best_split(right_branch)
recursive_build_tree(node['right_label'], min_samples_leaf,
min_samples_split, max_depth, depth + 1)
def decisionTree(data, max_depth, min_samples_leaf=1, min_samples_split=1):
root = select_best_split(data)
recursive_build_tree(root, min_samples_leaf, min_samples_split, max_depth, 1)
return root
# +
def print_tree_helper(depth):
for _ in range(depth + 1):
print("-", end="")
def print_tree(node, depth=0):
if isinstance(node, dict):
print_tree_helper(depth)
print(('> f{:d} < {:2.3f}'.format(node['feature_id'], node['split_value'])))
if 'left_label' in node.keys():
print_tree(node['left_label'], depth + 1)
print_tree_helper(depth)
print(('> f{:d} > {:2.3f}'.format(node['feature_id'], node['split_value'])))
if 'right_label' in node.keys():
print_tree(node['right_label'], depth + 1)
else:
print_tree_helper(depth)
print('-> [', node, ']')
# +
# example data set taken from
#https://machinelearningmastery.com/implement-decision-tree-algorithm-scratch-python/
dataset = [[2.771244718,1.784783929,0],
[1.728571309,1.169761413,0],
[3.678319846,2.81281357,0],
[3.961043357,2.61995032,0],
[2.999208922,2.209014212,0],
[7.497545867,3.162953546,1],
[9.00220326,3.339047188,1],
[7.444542326,0.476683375,1],
[10.12493903,3.234550982,1],
[6.642287351,3.319983761,1]]
tree = decisionTree(np.array(dataset), 3, 1, 1)
print_tree(tree)
# +
# Generate Synthetic Data Set
dataset = 10 * np.random.rand(200, 20)
labels = []
for i in range(200):
labels.append(np.random.randint(0, 10))
dataset[:, -1] = np.array(labels).T
#Test and print decision tree
tree = decisionTree(dataset, 12, 6, 8)
print_tree(tree)
| old-notes/old-ai/exam/decision-tree/decision-tree-v2.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
using Revise
using OpticalBlochEquations
using Plots
using BenchmarkTools
using DifferentialEquations
const λ = @with_unit 626 "nm"
const Γ = @with_unit 2π * 8.3 "MHz"
const M = @with_unit 50 "u"
const E₀ = c / λ
;
# +
# Generate states
m1 = manifold(F=0, ω=0, μ=0)
m2 = manifold(F=0, ω=E₀, μ=0, Γ=Γ)
states = [m1.states..., m2.states...]
δ = -2Γ #-4Γ
# Generate lasers
x̂ = [1, 0, 0]
k = (2π / λ) * x̂
l1 = Laser(x̂, [0, 1, 0], E₀ + δ, 1.5)
l2 = Laser(-x̂, [0, 1, 0], E₀ + δ, 1.5)
lasers = [l1, l2]
d = zeros(2, 2, 3)
d[2,1,2] = d[1,2,2] = 1.0
;
# -
freq_res = 1e-4
# +
using StaticArrays
ρ0 = zeros(ComplexF64, length(states), length(states))
ρ0[1,1] = 1
(dρ, ρ, p) = obe(states, lasers, d, ρ0, freq_res=freq_res)
p.particle.v = SVector(0.0, 0, 0)
@btime ρ!($dρ, $ρ, $p, $1.0)
;
# -
saved_values = SavedValues(Float64, Float64)
cb = SavingCallback((u, t, integrator) -> force(u, integrator.p), saved_values)
;
# Compute the period
T = (2π / freq_res)
# +
t_end = 1500.0;
tspan = (0., t_end); tstops=500:0.1:t_end
prob = ODEProblem(ρ!, ρ, tspan, p, callback=AutoAbstol(false, init_curmax=0.0))
v_rounded = round_vel(0.0, λ, Γ, freq_res)
p.particle.v = SVector(2.0, 0, 0)
# @time sol = solve(prob, alg=DP5(), abstol=1e-6, reltol=1e-4, dense=false, callback=cb)
@time sol = solve(prob, alg=DP5(), saveat=tstops, abstol=1e-6, reltol=1e-3, dense=false, callback=cb)
length(sol.t)
# +
vs = -10:0.1:10
forces = []
@time begin
for v in vs
saved_values.saveval .= 0
v_rounded = round_vel(v, λ, Γ, freq_res)
p.particle.v = SVector(v_rounded, 0, 0)
sol = solve(prob, alg=DP5(), saveat=tstops, abstol=1e-6, reltol=1e-4, dense=false, callback=cb)
# sol = solve(prob, alg=DP5(), abstol=1e-6, reltol=1e-4, dense=false, callback=cb)
push!(forces, mean(saved_values.saveval))
end
end
plot(vs, forces)
# +
# sol = solve(prob, alg=DP5(), abstol=1e-6, reltol=1e-4, dense=false, callback=cb)
# ;
# +
# pl = plot()
# for i in 1:length(states)
# sol_i = [real(x[i,i]) for x in sol.u]
# plot!(pl, sol.t[1:end], sol_i[1:end])
# end
# display(pl)
# -
plot([real(u[1,1]) for u in sol.u[end-1000:end]], ylim=[0.86, 0.88])
plot!([real(u[2,2]) for u in sol.u[end-1000:end]])
| examples/2LevelSystemExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Paper Figures
# +
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sys.path.append('../src')
import raybay
from utils import get_dose_df, get_pars_df, get_percent_diff
# -
# ## Utility Terms
x = np.linspace(23, 25)
y1 = 100*(24 - x)/24
y2 = y1.copy()
y2[x > 24] = (1 - y2[x > 24])*y2[x > 24]
plt.plot(x, y1, label='Linear utility')
plt.plot(x, y2, linestyle='--', label='Linear-quadratic utility')
plt.axvline(24, linestyle=':', c='g', label='$\gamma_2$')
plt.xlabel('D2cm MaxDose (Gy)')
plt.ylabel('Utility Term ($g_2$)')
plt.legend(bbox_to_anchor=(1, 1), frameon=False)
plt.savefig('utility.png', dpi=300, bbox_inches='tight')
# ## Dose Values
# +
def get_dose_diff(row):
return get_percent_diff(row, 'dose_val', 'goal_val')
full_df = pd.concat([get_dose_df(plan_type) for plan_type in ['clinical', 'random', 'bayes']])
full_df['dose_name'] = full_df.apply(lambda row: row['dose_name'].replace('_', ' '), axis=1)
full_df['percent_diff'] = full_df.apply(get_dose_diff, axis=1)
full_df['plan_type'] = full_df['plan_type'].str.title()
full_df['iter_type'] = 'full'
stop_df = pd.concat([get_dose_df(plan_type, stop=True) for plan_type in ['random', 'bayes']])
stop_df['dose_name'] = stop_df.apply(lambda row: row['dose_name'].replace('_', ' '), axis=1)
stop_df['percent_diff'] = stop_df.apply(get_dose_diff, axis=1)
stop_df['plan_type'] = stop_df['plan_type'].str.title()
stop_df['iter_type'] = 'stop'
compare_df = pd.concat([full_df, stop_df])
compare_df = compare_df[compare_df['plan_type'] != 'Clinical']
# -
# ### With 100 Iterations
palette = sns.color_palette('deep')
fig, ax = plt.subplots(4, 3, figsize=(16, 12))
for ii, dose in enumerate(full_df['dose_name'].unique()):
row, col = ii//3, np.mod(ii, 3)
sns.violinplot(x='plan_type', y='percent_diff', palette='deep', cut=0,
data=full_df[full_df['dose_name'] == dose], ax=ax[row][col])
if (row == 3 and col == 0) or (row == 2 and col > 0):
ax[row][col].set_xlabel('Optimal Dose Values')
else:
ax[row][col].set_xlabel('')
ax[row][col].set_xticklabels([])
if col == 0:
ax[row][col].set_ylabel('% Difference From $\gamma_i$')
else:
ax[row][col].set_ylabel('')
ax[row][col].set_title(dose)
ax[row][col].get_children()[0].set_facecolor(palette[0])
ax[row][col].get_children()[2].set_facecolor(palette[1])
ax[row][col].get_children()[4].set_facecolor(palette[2])
ax[3][1].set_visible(False)
ax[3][2].set_visible(False)
plt.savefig('dose_full.png', dpi=300, bbox_inches='tight')
# ### With Stopping Conditions
palette = sns.color_palette('deep')
fig, ax = plt.subplots(4, 3, figsize=(16, 12))
for ii, dose in enumerate(stop_df['dose_name'].unique()):
row, col = ii//3, np.mod(ii, 3)
sns.violinplot(x='plan_type', y='percent_diff', hue='iter_type', cut=0,
data=compare_df[compare_df['dose_name'] == dose], ax=ax[row][col])
ax[row][col].legend().set_visible(False)
if (row == 3 and col == 0) or (row == 2 and col > 0):
ax[row][col].set_xlabel('Optimal Dose Values')
ax[row][col].set_xticks([-0.2, 0.25, 0.8, 1.2])
ax[row][col].set_xticklabels([f"Random\n(100)", f"Random\n(stop)", f"Bayes\n(100)", f"Bayes\n(stop)"])
else:
ax[row][col].set_xlabel('')
ax[row][col].set_xticklabels([])
if col == 0:
ax[row][col].set_ylabel(f"% Difference From $\gamma_i$")
else:
ax[row][col].set_ylabel('')
ax[row][col].set_title(dose)
ax[row][col].get_children()[0].set_facecolor(palette[1])
ax[row][col].get_children()[2].set_facecolor(palette[3])
ax[row][col].get_children()[4].set_facecolor(palette[2])
ax[row][col].get_children()[6].set_facecolor(palette[4])
ax[3][1].set_visible(False)
ax[3][2].set_visible(False)
plt.savefig('dose_stop.png', dpi=300, bbox_inches='tight')
# ## Parameter Values
# +
def get_par_diff(row):
return get_percent_diff(row, 'par_val', 'goal_val')
pars_df = pd.concat([get_pars_df(plan_type) for plan_type in ['random', 'bayes']])
pars_df['par_name'] = pars_df.apply(lambda row: row['par_name'].replace('_', ' '), axis=1)
pars_df['percent_diff'] = pars_df.apply(get_par_diff, axis=1)
pars_df['plan_type'] = pars_df['plan_type'].str.title()
# -
# ### With 100 Iterations
fig, ax = plt.subplots(4, 3, figsize=(16, 12))
for ii, par in enumerate(pars_df['par_name'].unique()):
row, col = ii//3, np.mod(ii, 3)
sns.violinplot(x='plan_type', y='percent_diff', palette={'Random': palette[1], 'Bayes': palette[2]}, cut=0,
data=pars_df[pars_df['par_name'] == par], ax=ax[row][col])
if (row == 3 and col == 0) or (row == 2 and col > 0):
ax[row][col].set_xlabel('Optimal Dose Parameters')
else:
ax[row][col].set_xlabel('')
ax[row][col].set_xticklabels([])
if col == 0:
ax[row][col].set_ylabel('% Difference From $\gamma_i$')
else:
ax[row][col].set_ylabel('')
ax[row][col].set_title(par)
ax[row][col].get_children()[0].set_facecolor(palette[1])
ax[row][col].get_children()[2].set_facecolor(palette[2])
ax[3][1].set_visible(False)
ax[3][2].set_visible(False)
plt.savefig('parameters.png', dpi=300, bbox_inches='tight')
| results/paper_figures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
web_stats={'Day':[1,2,3,4,5,6],
'Visitors':[43,53,34,45,64,34],
'Bounce_Rate':[65,72,62,64,54,66]}
df=pd.DataFrame(web_stats)
#print(df) #df stands for data frame
#print(df.head()) #prints the first 5 rows
#print(df.tail()) #prints the last 5 rows
#Specifying the number in the parentheses gives that number of rows
#print(df.head(2))
#print(df.tail(2))
#df=df.set_index('Day')
#OR you can do this:
df.set_index('Day', inplace=True)
print(df)
#print (df['Visitors']) #prints specific column OR
print (df.Visitors)
#referencing multiple columns
print (df[['Bounce_Rate','Visitors']])
#making a list out of a column; this only work with one column because more than one would
#treat the dictionary like an array, which it isn't
print (df.Visitors.tolist())
#to make it an array
print (np.array(df[['Bounce_Rate','Visitors']]))
# -
| doc/Programs/JupyterFiles/Examples/Youtube Tutorials/Pandas Tutorial Sentdex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## Prerequisite
#
# Make sure you ran `snakemake` in this directory and have obtained all the files (stored under `output/`).
# ### Config
# +
import kipoi
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import snakemake
# Get all models and interval_sets for which we computed the predictions
models, interval_sets = list(zip(*list(zip(*snakemake.utils.listfiles("output/{model}/{interval_set}.tsv")))[1]))
models = kipoi.utils.unique_list(models)
interval_sets = kipoi.utils.unique_list(interval_sets)
# -
# ## Loading predictions from the tsv output
# +
merge_cols = ['metadata/ranges/chr', 'metadata/ranges/center', 'metadata/ranges/id', 'interval_set']
def read_preds_tsv(model, interval_set, output_dir):
df = pd.read_table("{}/{}/{}.tsv".format(output_dir, model, interval_set))
df['interval_set'] = interval_set
df['metadata/ranges/center'] = (df['metadata/ranges/start'] + df['metadata/ranges/end']) //2
del df['metadata/ranges/start']
del df['metadata/ranges/end']
del df['metadata/ranges/strand']
df.set_index(merge_cols, inplace=True)
assert df.columns.str.startswith("preds").all()
df.columns = df.columns.str.replace("preds", model)
if len(df.columns) > 1:
# multi-task model
try:
descr = kipoi.get_model_descr(model)
df.columns = model + "/" + pd.Series(descr.schema.targets.column_labels)
except:
pass
return df
# -
def logit(df):
if np.any(df < 0) or np.any(df > 1):
# values not from [0,1]
return df
df_logit = np.log(df + 1e-10/ (1-df))
return df_logit
df = pd.concat([logit(pd.concat([read_preds_tsv(m, interval_set, output_dir='output')
for interval_set in interval_sets], axis=0))
for m in models], axis=1)
df.min()
df.head()
# ### Average prediction per interval_set per model
df_agg = df.groupby(df.index.get_level_values("interval_set")).agg('mean').T
df_agg.plot(figsize=(20,4))
plt.xlabel("Model")
plt.ylabel("Average prediction");
# +
# See the sorted list
# print((df_agg['enhancer-regions'] - df_agg['random']).sort_values(ascending=False)[:200].to_string())
# -
# ## Loading predictions from the hdf5 output
from kipoi.readers import HDF5Reader
# Quick intro to HDF5Reader
f = "output/Basset/random.h5"
r = HDF5Reader(f)
r.open()
r.ls()
# load the values
d = HDF5Reader.load(f, unflatten=False)
d.keys()
# +
def df2index(df):
return pd.MultiIndex.from_arrays([df.values[:,i] for i in range(df.shape[1])], names=list(df.columns))
def read_preds_h5(model, interval_set, output_dir):
d = HDF5Reader.load("{}/{}/{}.h5".format(output_dir, model, interval_set), unflatten=False)
preds = d.pop("/preds")
if preds.ndim == 1:
preds = preds[:, np.newaxis]
assert preds.ndim == 2 # this example works only with 2 dim output arrays
metadata_table = pd.DataFrame(d)
metadata_table.columns = metadata_table.columns.str.replace("^/", "")
metadata_table['interval_set'] = interval_set
metadata_table['metadata/ranges/center'] = (metadata_table['metadata/ranges/start'] + metadata_table['metadata/ranges/end']) //2
del metadata_table['metadata/ranges/start']
del metadata_table['metadata/ranges/end']
del metadata_table['metadata/ranges/strand']
metadata_table = metadata_table[merge_cols] # re-order
if preds.shape[1] > 1:
descr = kipoi.get_model_descr(model)
return pd.DataFrame(preds,
columns=model + "/" + pd.Series(descr.schema.targets.column_labels),
index=df2index(metadata_table))
else:
return pd.DataFrame(preds, columns = model + "/" + pd.Series(np.arange(preds.shape[1]), dtype=str),
index=df2index(metadata_table))
# -
df = pd.concat([logit(pd.concat([read_preds_h5(m, interval_set, output_dir='output')
for interval_set in interval_sets], axis=0))
for m in models], axis=1)
df.head()
| 1-predict/load-visualize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Matplotlib 基础
# 在使用**Numpy**之前,需要了解一些画图的基础。
#
# **Matplotlib**是一个类似**Matlab**的工具包,主页地址为
#
# http://matplotlib.org
#
# 导入 `matplotlib` 和 `numpy`:
# %pylab
# ## plot 二维图
# ```python
# plot(y)
# plot(x, y)
# plot(x, y, format_string)
# ```
#
# 只给定 `y` 值,默认以下标为 `x` 轴:
# %matplotlib inline
x = linspace(0, 2 * pi, 50)
plot(sin(x))
# 给定 `x` 和 `y` 值:
plot(x, sin(x))
# 多条数据线:
plot(x, sin(x),
x, sin(2 * x))
# 使用字符串,给定线条参数:
plot(x, sin(x), 'r-^')
# 多线条:
plot(x, sin(x), 'b-o',
x, sin(2 * x), 'r-^')
# 更多参数设置,请查阅帮助。事实上,字符串使用的格式与**Matlab**相同。
# ## scatter 散点图
# ```python
# scatter(x, y)
# scatter(x, y, size)
# scatter(x, y, size, color)
# ```
#
# 假设我们想画二维散点图:
plot(x, sin(x), 'bo')
# 可以使用 `scatter` 达到同样的效果:
scatter(x, sin(x))
# 事实上,scatter函数与**Matlab**的用法相同,还可以指定它的大小,颜色等参数:
x = rand(200)
y = rand(200)
size = rand(200) * 30
color = rand(200)
scatter(x, y, size, color)
# 显示颜色条
colorbar()
# ## 多图
# 使用figure()命令产生新的图像:
t = linspace(0, 2*pi, 50)
x = sin(t)
y = cos(t)
figure()
plot(x)
figure()
plot(y)
# 或者使用 `subplot` 在一幅图中画多幅子图:
#
# subplot(row, column, index)
subplot(1, 2, 1)
plot(x)
subplot(1, 2, 2)
plot(y)
# ## 向图中添加数据
# 默认多次 `plot` 会叠加:
plot(x)
plot(y)
# 可以跟**Matlab**类似用 hold(False)关掉,这样新图会将原图覆盖:
plot(x)
hold(False)
plot(y)
# 恢复原来设定
hold(True)
# ## 标签
# 可以在 `plot` 中加入 `label` ,使用 `legend` 加上图例:
plot(x, label='sin')
plot(y, label='cos')
legend()
# 或者直接在 `legend`中加入:
plot(x)
plot(y)
legend(['sin', 'cos'])
# ## 坐标轴,标题,网格
# 可以设置坐标轴的标签和标题:
plot(x, sin(x))
xlabel('radians')
# 可以设置字体大小
ylabel('amplitude', fontsize='large')
title('Sin(x)')
# 用 'grid()' 来显示网格:
plot(x, sin(x))
xlabel('radians')
ylabel('amplitude', fontsize='large')
title('Sin(x)')
grid()
# ## 清除、关闭图像
# 清除已有的图像使用:
#
# clf()
#
# 关闭当前图像:
#
# close()
#
# 关闭所有图像:
#
# close('all')
# ## imshow 显示图片
# 灰度图片可以看成二维数组:
# 导入lena图片
from scipy.misc import lena
img = lena()
img
# 我们可以用 `imshow()` 来显示图片数据:
imshow(img,
# 设置坐标范围
extent = [-25, 25, -25, 25],
# 设置colormap
cmap = cm.bone)
colorbar()
# 更多参数和用法可以参阅帮助。
# 这里 `cm` 表示 `colormap`,可以看它的种类:
dir(cm)
# 使用不同的 `colormap` 会有不同的显示效果。
imshow(img, cmap=cm.RdGy_r)
# ## 从脚本中运行
# 在脚本中使用 `plot` 时,通常图像是不会直接显示的,需要增加 `show()` 选项,只有在遇到 `show()` 命令之后,图像才会显示。
# ## 直方图
# 从高斯分布随机生成1000个点得到的直方图:
hist(randn(1000))
# 更多例子请参考下列网站:
#
# http://matplotlib.org/gallery.html
| lijin-THU:notes-python/03-numpy/03.02-matplotlib-basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # 机器学习工程师纳米学位
# ## 深度学习
# ## 项目:搭建一个数字识别项目
#
# 在此文件中,我们提供给你了一个模板,以便于你根据项目的要求一步步实现要求的功能,进而完成整个项目。如果你认为需要导入另外的一些代码,请确保你正确导入了他们,并且包含在你的提交文件中。以**'练习'**开始的标题表示接下来你将开始实现你的项目。注意有一些练习是可选的,并且用**'可选'**标记出来了。
#
# 在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以'练习'开始的标题表示接下来的代码部分中有你必须要实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示!
#
# 除了实现代码外,你还必须回答一些与项目和你的实现有关的问题。每一个需要你回答的问题都会以**'问题 X'**为标题。请仔细阅读每个问题,并且在问题后的**'回答'**文字框中写出完整的答案。我们将根据你对问题的回答和撰写代码所实现的功能来对你提交的项目进行评分。
#
# >**注意:** Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。
# + [markdown] deletable=true editable=true
# ## 连接 mnist 的字符来合成数据
#
# 你可以通过连接[MNIST](http://yann.lecun.com/exdb/mnist/)的字符来合成数据来训练这个模型。为了快速导入数据集,我们可以使用 [Keras Datasets](https://keras.io/datasets/#mnist-database-of-handwritten-digits) [中文文档](http://keras-cn.readthedocs.io/en/latest/other/datasets/#mnist)。
# + [markdown] deletable=true editable=true
# ### 载入 mnist
# + deletable=true editable=true
from keras.datasets import mnist
(X_raw, y_raw), (X_raw_test, y_raw_test) = mnist.load_data()
n_train, n_test = X_raw.shape[0], X_raw_test.shape[0]
# + [markdown] deletable=true editable=true
# ### 可视化 mnist
#
# 我们可以通过 matplotlib 来可视化我们的原始数据集。
# + deletable=true editable=true
import matplotlib.pyplot as plt
import random
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
for i in range(15):
plt.subplot(3, 5, i+1)
index = random.randint(0, n_train-1)
plt.title(str(y_raw[index]))
plt.imshow(X_raw[index], cmap='gray')
plt.axis('off')
# + [markdown] deletable=true editable=true
# ### 练习:合成数据
#
# 你需要随机取随机张图片,然后将它们拼接成新的图片。
#
# + deletable=true editable=true
import numpy as np
n_class, n_len, width, height = 11, 5, 28, 28
def generate_dataset(X, y):
X_len = X.shape[0]
X_gen = np.zeros((X_len, height, width*n_len, 1), dtype=np.uint8)
y_gen = [np.zeros((X_len, n_class), dtype=np.uint8) for i in range(n_len)]
# 随机取1~5个数字,并拼接成新的图片
for i in range(X_len):
for j in range(n_len):
# 这里给定一个概率让空白取代数字
if 9 < random.randint(0,11):
y_gen[j][i][10] = 1
else:
index = random.randint(0, X_len-1)
X_gen[i,:,j*width:(j+1)*width, 0] = X[index]
y_gen[j][i][y[index]] = 1
return X_gen, y_gen
X_train, y_train = generate_dataset(X_raw, y_raw)
X_test, y_test = generate_dataset(X_raw_test, y_raw_test)
print(y_train[0].shape)
# + deletable=true editable=true
# 显示生成的图片
for i in range(15):
plt.subplot(5, 3, i+1)
index = random.randint(0, n_test-1)
title = ''
for j in range(n_len):
title += str(np.argmax(y_test[j][index])) + ','
plt.title(title)
plt.imshow(X_test[index][:,:,0], cmap='gray')
plt.axis('off')
# + [markdown] deletable=true editable=true
# ### 问题 1
# _你是如何合成数据集的?_
#
# **回答:**
# 通过两个循环
# 1. 外循环遍历了输入的的数据,如
# ```
# X_train, y_train = generate_dataset(X_raw, y_raw)
# ```
# 这里的数据的大小就是len(X_raw),外循环的次数也是这个。
# 2. 内循环的大小是5,因为根据题目,需要使用1-5个数字拼接合成一个图片。所以通过5次循环,每一次随机取一张图片
# 最后图片的矩阵由28x28变为为28x140。
# + [markdown] deletable=true editable=true
# ### 练习:设计并测试一个模型架构
#
# 设计并实现一个能够识别数字序列的深度学习模型。为了产生用于测试的合成数字序列,你可以进行如下的设置:比如,你可以限制一个数据序列最多五个数字,并在你的深度网络上使用五个分类器。同时,你有必要准备一个额外的“空白”的字符,以处理相对较短的数字序列。
#
# 在思考这个问题的时候有很多方面可以考虑:
#
# - 你的模型可以基于深度神经网络或者是卷积神经网络。
# - 你可以尝试是否在每个分类器间共享权值。
# - 你还可以在深度神经网络中使用循环网络来替换其中的分类层,并且将数字序列里的数字一个一个地输出。
#
# 在使用 Keras 搭建模型的时候,你可以使用 [泛型模型](http://keras-cn.readthedocs.io/en/latest/models/model/) 的方式来搭建多输出模型。
# + deletable=true editable=true
# 导入所需的模型
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers.core import Dropout
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Input, Dense
# + deletable=true editable=true
def train_model(X_train, y_train):
main_input = Input(shape=(28, 140, 1), name='main_input')
# 卷积层
x = Convolution2D(32, 3, 3, activation="relu")(main_input)
x = MaxPooling2D(pool_size = (2, 2))(x)
x = Convolution2D(32, 3, 3, activation="relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(32, 3, 3 ,activation="relu")(x)
x = Flatten()(x)
x = Dropout(0.4)(x)
# 分别训练5个数字
y_out = [Dense(11, activation='softmax')(x) for i in range(n_len)]
# 使用泛型输出5个数字的序列
model = Model(input=main_input, output=y_out)
model.compile(optimizer='rmsprop', loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(X_train, y_train, batch_size=1024, nb_epoch=8, shuffle=True, verbose=1, validation_split=0.2)
return model
model = train_model(X_train, y_train)
# + [markdown] deletable=true editable=true
# ### 问题 2
# _你为解决这个问题采取了什么技术?请详细介绍你使用的技术。_
#
# **回答:**
# 1. 我的模型构造上,采用了卷积神经网络作为训练,最后一层全连接层作为分类输出
# 2. 为何使用卷积神经网络:首先这是图像识别方面的,如果是使用全连接的深度神经网络,这需要大量的权重进行训练,耗时而且容易过拟合。
# 卷积神经网络通过使用卷积核这个方法,让同一个卷积核内的图像共享权重,这样不仅减少了权重的数量,而且容易在每一层生成图像的某种模式如轮廓等
# 3. 为何使用max pool:我们可以使用增大stride来增大卷积核移动的步伐从而减少feature map,但是这样会丢失很多信息。在保持小的stride的前提下可以使用maxpool的技术进行优化,一样能减少feature map
# 4. 为何使用dropout:使用dropout,在前一层到后面一层之间(这里是训练的最后一层卷积和全连接层的输入)随机选择一部分设置为0,在实践中这种方法很好的防止过拟合
# + [markdown] deletable=true editable=true
# ### 可视化你的网络模型
#
# 参考链接:[visualization](http://keras-cn.readthedocs.io/en/latest/other/visualization/)
#
# 可以是 PNG 格式,也可以是 SVG 格式。
# + deletable=true editable=true
from keras.utils.visualize_util import plot, model_to_dot
from IPython.display import Image, SVG
# 可视化网络模型
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
# + [markdown] deletable=true editable=true
# ### 问题 3
# _你最终的模型架构是什么样的?(什么类型的模型,层数,大小, 如何连接等)_
#
# **回答:**
# 1. 使用了卷积神经网络结合全连接的神经网络
# 2. 层数总共10层。4层卷积,maxpool 2层,input,dropout,output和flaten各一层
# 3. 连接顺序是input-cnn-maxpool-cnn-maxpool-cnn-cnn-dropout-flaten。然后分别5个1层的dense层,连接着flaten层
# + [markdown] deletable=true editable=true
# ### 练习:训练你的网络模型
#
# 你需要设置20%的数据作为验证集,以保证模型没有过拟合。
# + [markdown] deletable=true editable=true
# ### 练习:计算你的模型准确率
#
# 我们刚才得到了模型每个数字的准确率,现在让我们来计算整体准确率,按照完全预测正确数字序列的标准来计算。
#
# 比如 1,2,3,10,10 预测成了 1,2,10,10,10 算错,而不是算对了80%。
# + deletable=true editable=true
# 测试模型
prediction = model.predict(X_test)
# + deletable=true editable=true
# 计算测试集中数字序列的准确率
def evaluate(model):
right = 0
error = 0
for i in range (1000):
right_num = 0
for j in range(5):
y_pred_num = np.argmax(prediction[j][i])
y_test_num = np.argmax(y_test[j][i])
if y_pred_num == y_test_num:
right_num += 1
if right_num == 5:
right += 1
else:
error += 1
return float(right)/(right+error)
evaluate(model)
# + [markdown] deletable=true editable=true
# ### 问题 4
#
# _你的模型准确率有多少?你觉得你的模型足以解决问题吗?_
#
# **回答:**
# 模型的准确率达到0.945。我觉得还是足以解决问题的。
# + [markdown] deletable=true editable=true
# ### 预测值可视化
#
# 我们将模型的预测结果和真实值画出来,观察真实效果。
# + deletable=true editable=true
def get_result(result):
# 将 one_hot 编码解码
resultstr = ''
for i in range(n_len):
resultstr += str(np.argmax(result[i])) + ','
return resultstr
index = random.randint(0, n_test-1)
y_pred = model.predict(X_test[index].reshape(1, height, width*n_len, 1))
plt.title('real: %s\npred:%s'%(get_result([y_test[x][index] for x in range(n_len)]), get_result(y_pred)))
plt.imshow(X_test[index,:,:,0], cmap='gray')
plt.axis('off')
# + [markdown] deletable=true editable=true
# ### 保存模型
#
# 模型达到满意的效果以后,我们需要将模型的权值和结构保存,以便下次调用。
# + deletable=true editable=true
model.save_weights('model.h5')
with open('model.json', 'w') as f:
f.write(model.to_json())
| digit_recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
# Sample data
exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],
'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],
'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
# dataframe
df = pd.DataFrame(exam_data , index=labels)
# -
# ### 0. Visualize Dataframe
# Print out the `head` of the newly created DataFrame
# +
# Code goes here!
# -
# ### 1. Series
# Write a Python program to add, subtract, multiple and divide two Pandas Series.
#
# Sample Series: [2, 4, 6, 8, 10], [1, 3, 5, 7, 9]
# +
# Code goes here!
# -
# ### 2. DataFrame: Column Selection
# Write a Python program to select the 'name' and 'score' columns from the following DataFrame.
# +
# Code goes here!
# -
# ### 3. DataFrame: Filtering by row value
# Write a Python program to select the rows where the score is greater than 10
# +
# Code goes here!
# -
# ### 4. DataFrame: Update value
# Write a Python program to change the score in row 'd' to 11.5.
# +
# Code goes here!
# -
# ### 5. DataFrame: Append row
# Write a Python program to append a new row `k` to data frame with given values for each column.
# +
# Code goes here!
# -
# Now delete the new row and return the original DataFrame.
# +
# Code goes here!
# -
# ### 6. DataFrame: Create new column
# Write a Python program to create a novel column containing the product of ``score`` and ``attempts``
# +
# Code goes here!
# -
# ### 7. Grouping
#
# Group records w.r.t. the ``qualify`` feature, then compute the mean of
# - min, max, mean of the remaining features
# +
# Code goes here!
# -
# ### 8. Subsets
# Select the records indexed as ``a`` and ``b`` filtering in only th columns ``score`` and ``qualify``
# +
# Code goes here!
| Notebooks/Exercises - Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming and Database Fundamentals for Data Scientists - EAS503
# While Python provides many (about 69) built in functions for the programmers to use, we will look at a few important ones.
#
# ### Math
# `abs`, `complex`,`divmod`, `hex`, `max`, `min`, `oct`, `pow`, `round`, etc.
#
# ### Type Conversion/Handling
# `bin`, `bool`, `chr`, `dict`, `float`, `frozenset`, `int`, `list`, `long`, `set`, etc.
#
# ### Handling sequences
# `del`,`all`,`any`,`enumerate`,`next`,`sorted`
#
# ### Functional operations
# `filter`, `map`, `reduce`
#
# See here for a [full list](https://docs.python.org/3/library/functions.html)
# all - returns True if all elements in an iterable are true
x = [3,5,4,1]
x1 = [_x > 1 for _x in x]
print(x1)
print(all(x1))
# any - returns True if at least one element in an iterable is true
x = [3,5,4,1]
x1 = [_x > 6 for _x in x]
print(x1)
print(any(x1))
# ## Filter and map
# These built-in functions do not provide any performance benefits, but do make the code cleaner
# I want to remove all entries in a list that contain a certain pattern
l = ['Washington','Adams','Jefferson','Madison','Monroe','Adams','<NAME>']
# get all entries that have substring 'on' in them
#option 1:
newl = []
for _l in l:
if 'on' in _l:
newl.append(_l)
print(newl)
# ### Using filter
# First define an appropriate function
def myfun(x):
if 'on' in x:
return True
else:
return False
#equivalently one can say
#return 'on' in x
def myfun(x):
return 'on' in x
newl = list(filter(myfun,l))
print(newl)
newl = list(filter(lambda x: 'on' in x,l))
print(newl)
#note that the above statement is same as writing
newl = [_x for _x in l if myfun(_x)]
print(newl)
#the implementation is a little cleaner with filter
# #### Using lambda functions
# Often defining a function using def requires too much coding
# Instead one can define inline functions using lambda keyword
newl = list(filter(lambda x: 'on' in x ,l))
print(newl)
# The `lambda` function is only valid for the statement within which it is defined.
# ### Using map
# The `map` function is useful when you want to apply one function to every item in a sequence
# capitalize every entry in a list
l = ['washington','adams','jefferson','madison','monroe','adams','<NAME>']
newl = list(map(lambda x: x.capitalize(), l))
print(newl)
#map can be applied to multiple lists
l1 = ['washington','adams','jefferson','madison','monroe','adams','<NAME>']
l2 = ['george','john','thomas','james','james','john','martin']
#connect the first and last names and capitalize accordingly
newl = list(map(lambda x,y: y.capitalize()+" "+x.capitalize(), l1,l2))
print(newl)
# ### Using zip
# Another useful function to make simultaneously iterating over multiple lists easy
l1 = ['washington','adams','jefferson','madison','monroe','adams','<NAME>']
l2 = ['george','john','thomas','james','james','john','martin']
l3 = ['First','Second','Third','Four','Five','Six','Seven']
newl = []
for i,j,k in zip(l2,l1,l3):
newl.append(i.capitalize()+' '+j.capitalize()+' ('+k+')')
print(newl)
| notebooks/PythonBuiltinFunctions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cost of rigor
#
# The goal of science is inference.
# One major goal of medicine is also inference.
# Inference is the process by which we "figure out what's going on out there".
#
import numpy as np
import scipy
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import scipy.stats as stats
# +
def experiment(samples,p=0.5):
fair_coin = np.random.binomial(samples,0.5,1000)
our_coin = np.random.binomial(samples,p,1000)
#aggr = np.sum(fair_coin)
plt.figure()
plt.hist(fair_coin,color='green')
plt.hist(our_coin,color='red')
plt.xlim((0,100))
plt.ylim((0,1000))
interact(experiment,samples=(1,100,1),p=(0.0,1.0,0.1))
# -
# # Cost to enlightenment
# We can calculate how many flips it takes to achieve a $p<0.05$ but this also costs us \\$X dollars.
# Let's say we don't necessarily care to be sure to $p<0.05$ but $p<0.10$ is sufficient.
# But even this is arbitrary; can we find a rigorous way to maximize certainty with minimizing cost?
# This process is called optimization.
# # Priors
# The last thing we'll talk about are *priors*.
# This is a piece of the puzzle where we *explicitly* incorporate previous knowledge about what we're studying.
# For example, we already *know* that the person flipping the coin is someone who does some shady things because our friend was here yesterday and said as much.
#
| inprogress/ML_Med_cost_coin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Signal conditioning
# We are now going to see the chainlink to conduction the signals before either the data acquisition or the data analysis. Conditioning means for example correcting the signals from noise due to electromagnetic noise for example.
# ## Review of electronics
# Let's start with passive devices first.
#
# Most of the signals of interest for us as AC (ie time dependent).
#
# The instantaneous power of an electrical signal is $P(t) = V(t) I(t)$. The average power becomes:
# \begin{align*}
# <P> & = \frac{1}{T} \int_0^T V(t') I(t') dt' \\
# \end{align*}
#
# Energy stored in an electrical component:
# \begin{align*}
# E = \int_{0}^t V(t') I(t') dt'
# \end{align*}
#
#
#
# ### Resistor
# \begin{align*}
# V = RI
# \end{align*}
# _Averaged power dissipated by a resistor_
# \begin{align*}
# <P> & = \frac{1}{T} \int_0^t V(t') I(t') dt' \\
# & = \frac{V_{RMS}^2}{R} \\
# & = R I_{RMS}^2
# \end{align*}
#
# __RMS defintion__
# \begin{align*}
# V_{RMS} & = \sqrt{\frac{1}{T} \int_0^t V(t')^2 dt'}\\
# \end{align*}
#
# We now know from Fourier analysis, that a well behaved signal can be decomposed as a some of sinusoidal waves. It is therefore convenient to introduce a new definition of the amplitude of a signal that is not related to the actual amplitude of the signal, but to the average power carried by the harmonic, the RMS amplitude.
#
# RMS of a sinusoid $V(t) = V_0 \cos (\omega t + \phi )$:
# \begin{align*}
# V_{RMS}^2 & = \frac{1}{T} \int_0^t \left(V_0 \cos (\omega t' + \phi) \right)^2 dt'\\
# & = V_0^2 \frac{1}{2\pi} \int_0^{2\pi} \cos^2(x) dx\\
# & = \frac{V_0^2}{2} \\
# V_{RMS} & = \frac{V_0}{\sqrt{2}}
# \end{align*}
#
# $V_0$ is the amplitude of the sinusoidal signal, $V_{RMS}$ is the equivalent amplitude to the average power of the signal, $V_{pp} = 2 V_0$ is the peak to peak amplitude.
#
# ### Capacitor
# <img src="img/Capacitor.png" width="240">
# \begin{align*}
# I = C \frac{dV}{dt}
# \end{align*}
# _Energy stored in a capacitor_ $E = 1/2 CV^2$
#
#
# ### Inductor
# <img src="img/Inductor.png" width="240">
# \begin{align*}
# V = L \frac{dI}{dt}
# \end{align*}
# _Energy stored in an inductor_ $E = 1/2 LI^2$
#
# ### Impedance
# \begin{align*}
# V = Z I
# \end{align*}
# > resistor:
# \begin{align*}
# Z=R
# \end{align*}
#
# > capacitor:
# \begin{align*}
# Z=\frac{1}{i\omega C}
# \end{align*}
#
# > inductor:
# \begin{align*}
# Z=i \omega L
# \end{align*}
#
# ### Diode
# It allows current to flow only in one direction (forward biased), by impeding the current to flow in the other direction (reverse biased).
#
# __Half-Wave rectifier__
#
# <img src="img/HalfWaveRectifier.png" width="240">
#
# <img src="img/TimeResponseHalfWaveRectifier.png" width="240">
#
#
# ### Examples of circuits
# __Optimal load__
#
# ## Filters
# Filters aim to remove part of the signal/voltage (ie the harmonic(s)) that is unwanted.
#
# We are going to start with passive filters, ie they do not involve a powered circuit, and then we will see active filters, such as those built with operational amplifiers.
#
# The __gain__ of a filter is defined as the ratio of output to the input powers. In electronics, remember that $P\approx V^2$, so gain is also expressed to as a the ratio of output to input ampitudes:
# \begin{align*}
# G = \frac{|V_{out}|}{|V_{in}|}
# \end{align*}
# For a filter, it ranges between 0 and 1. It is commonly plotted in log scale (Bode plots). The gain is best ploted as gain vs frequency. Here is a diagram for an ideal filter:
#
# <img src="img/IdealLPFilter.png" width="240">
#
# The __cutoff frequency__, $f_{cutoff}$, is the dividing frequency over which the filter changes behavior.
#
# Due the typically the broad range of frequencies of interest and the need to better evaluate attenuation, filter diagram are best plotted with Bode plots that are log-log instead of linear-linear.
#
# There are four types of filters:
# > Low-pass
#
# > High-pass
#
# > Band-pass
#
# > Band-stop
#
# Let's start with circuits for passive filters and let's analyse them. Passive filters are made only of resistors, capacitors, and inductors.
#
# ### First order passive low-pass filter
# This simple circuit is created with just a resistor and an capacitor.
#
# <img src="img/LPRCFilter.png" width="240">
#
# \begin{align*}
# V_{out} & = \left( \frac{1/(i \omega C)}{R + 1/(i \omega C)} \right) V_{in} \\
# & = \left( \frac{1}{1 + i \omega RC} \right) V_{in}
# \end{align*}
# The cutoff frequency is:
# \begin{align*}
# \omega_{cutoff} & = \frac{1}{RC}\\
# f_{cutoff} = & \frac{\omega_{cutoff}}{2 \pi} = \frac{1}{2\pi RC}
# \end{align*}
#
# This equation looks very similar to the equation for the sinusoidal response of a first order dynamic system, just using complex notation.
#
# Gain:
# \begin{align*}
# G & = \frac{|V_{out}|}{|V_{in}|}\\
# & = \frac{1}{\sqrt{1+ \left( \frac{\omega}{\omega_{cutoff}}\right)^2 }} = \frac{1}{\sqrt{1+ \left( \frac{f}{f_{cutoff}}\right)^2 }}
# \end{align*}
#
# Phase shift:
# \begin{align*}
# \phi & = \arctan \left( \frac{\text{imaginary component}}{\text{real component}} \right) \\
# & = -\arctan \left( \frac{\omega}{\omega_{cutoff}} \right)
# \end{align*}
#
# <img src="img/1stLPFBodePlot.png" width="480">
#
# Output lags the input.
#
# _exercise_
#
# >Noise at 1000 Hz is superimposed on a “carrier” frequency of 10 Hz. It is desired to apply a first-order passive low-pass filter to remove the noise so that only the carrier signal remains.
#
# >>(a) Choose the cutoff frequency of the low-pass filter.
# +
import numpy
f_carrier = 10.
f_noise = 100000.
f_c = 100.
G_noise = 1/numpy.sqrt(1+(f_noise/f_c)**2)
G_carrier = 1/numpy.sqrt(1+(f_carrier/f_c)**2)
phi_carrier = - numpy.arctan(f_carrier/f_c) *180/numpy.pi
print('Gain noise', G_noise)
print('Gain carrier', G_carrier)
print( 'phase carrier', phi_carrier, ' deg')
# -
# >>(b) If a capacitor with capacitance of 0.10 $\mu$F is available, what resistor should be used?
R = 1/(2*numpy.pi*0.1E-6*f_c)
print('R ', R, 'Ohm')
# >>(c) How much of the noise is reduced by this filter?
print(1/G_noise)
# The gain is commonly expressed in dB, $G_{dB} = 20 \log_{10}\left( \frac{|V_{out}|}{|V_{in}|} \right) = 20 \log_{10}(G)$.
#
# ### First order passive high-pass filter
#
# <img src="img/HPRCFilter.png" width="240">
#
#
# \begin{align*}
# \frac{V_{out}}{V_{in}} = \frac{i \omega RC}{1 + i \omega RC}
# \end{align*}
#
# \begin{align*}
# f_{cutoff} = \frac{\omega_{cutoff}}{2 \pi} = \frac{1}{2\pi RC}
# \end{align*}
#
# Gain:
# \begin{align*}
# G = \frac{1}{\sqrt{1+ \left( \frac{f_{cutoff}}{f}\right)^2 }}
# \end{align*}
#
# Phase:
# \begin{align*}
# \phi = \arctan \left( \frac{\omega_{cutoff}}{\omega} \right)
# \end{align*}
#
# <img src="img/1stHPFBodePlot.png" width="480">
#
# Output leads the input.
#
# ### Higher order low-pass filter
#
# With passive devices, one can easily build a $2^{nd}$ order filter ($RLC$ components). Higher order filters are built using op-amps, they are active filters.
#
# In general for a Butterworth low-pass filter of order $n$, the gain is:
# \begin{align*}
# G = \frac{1}{\sqrt{1+ \left( \frac{f}{f_{cutoff}}\right)^{2n} }}
# \end{align*}
#
# <img src="img/HighOrderLPFilter.png" width="480">
#
# Higher order filters have much higher roll-off rate: the higher frequencies get attenuated much faster. A first-order filter has an attenuation rate of 20 dB/decade (or 8 dB/octave). A filter of order $n$ has an attenuation rate of $20 \times n$ dB/decade.
# _exercise_
#
# >One is interested in filtering 1 000 Hz noise with filter with cutoff frequency of $f_{cutoff} = 50$ Hz. What would be the attenuation of this harmonic with Butterworth filters of $1^{st}, 2^{nd}, \text{and } 4^{th}$ orders. Express the results in percentage and in decibel.
f = 1000
f_c = 100
G_1 = 1/numpy.sqrt(1+(f/f_c)**(2*1))
G_2 = 1/numpy.sqrt(1+(f/f_c)**(2*2))
G_4 = 1/numpy.sqrt(1+(f/f_c)**(2*4))
print('1st order ', G_1)
print('2nd order ', G_2)
print('4th order ', G_4)
# > A signal has a carrier frequency of 10 Hz and $A_{carrier}$ = 1 V, and a noise at frequency 1,000 Hz and amplitdue $A_{noise} = 0.1 $V. Your digital acquisition system has range $V_{Max} = 10$ V and $V_{min} = -10 $V, with a 12 bit ADC.
#
# > Design a signal conditioning block to make the best use of your hardware.
# ### Higher order high-pass filter
# \begin{align*}
# G = \frac{1}{\sqrt{1+\left( \frac{f_{cutoff}}{f} \right)^{2n} }}
# \end{align*}
# ## Operational Amplifiers
#
# <img src="img/Op-Amp_General.png" width="240">
#
# ### Open-loop operation
# \begin{align*}
# V_{out} = A \left( V_{in}(+) - V_{in}(-) \right)
# \end{align*}
# $A$: open-loop gain
#
# _exercise_
#
# >The open-loop gain of an op-amp is $A = 1 \times 10^6$. The high supply voltage $V^+_{supply} = 15.0\text{ V}$. The op-amp saturates at 13.9 V. Calculate the input voltage difference (Vp − Vn) that will cause saturation when the op-amp is operated in an open-loop configuration.
# ### Closed-loop/feedback operation
# In practice, to avoid saturation, most op-amps are used in closed loop operation. For an ideal op-amp, this forces to have $V_in(+) \sim V_in(-)$ for an ideal op-amp. We will see more on this later.
#
# __Real Op-Amp__
# \begin{align*}
# A_0 & \sim 10^6\\
# f_0 & \sim 10^6 \text{ Hz}\\
# R_{in} & \sim 10^6 \Omega \\
# R_{out} & \sim 1 \Omega \\
# \end{align*}
#
# __Ideal Op-Amp Rules__
# \begin{align*}
# A & = \infty\\
# R_{in} & = \infty \\
# R_{out} & = 0 \\
# I_{in} & =0 \\
# V_{in}(+) & = V_{in}(-)
# \end{align*}
#
#
# ### $\times 1$ Buffer
#
# <img src="img/Buffer.png" width="240">
#
# \begin{align*}
# V_{out} & = V_{in} \\
# R_{out} & = 0 \\
# R_{in} & = \infty
# \end{align*}
#
# ### Inverting Amplifier
#
# <img src="img/InvertingAmp.png" width="240">
#
# \begin{align*}
# V_{out} = - \frac{R_2}{R_1} V_{in}
# \end{align*}
#
# - Input impedance: $R_{in} = R_1$
# - Output impedance: $R_{out} = 0 \, \Omega$
#
# _exercise_
#
# > You have access to op-amps and many $10\text{ k}\Omega$. Show how these components can be used to double the voltage of an input signal. Use inverting circuits, and draw the circuit diagram.
#
#
# ### Non-Inverting Amplifier
#
# <img src="img/Non-InvertingAmp.png" width="240">
#
# \begin{align*}
# V_{out} = & \left( 1+ \frac{R_2}{R_1} \right) V_{in}
# \end{align*}
#
# - Input impedance: $R_{in} = \infty$
# - Output impedance: $R_{out} 0 \, \Omega$
#
#
# > You have access to op-amps and many $10\text{ k}\Omega$. Show how these components can be used to double the voltage of an input signal. Use non-inverting circuits, and draw the circuit diagram.
#
# ### Differencing Amplifier
#
# <img src="img/DifferencingAmp.png" width="240">
#
# \begin{align*}
# V_{out} = \frac{R_2}{R_1} \left( V_{in}(+) - V_{in}(-) \right)
# \end{align*}
#
# ### First-order, active, low-pass, inverting filter
#
# <img src="img/ActiveLPFilter.png" width="240">
# \begin{align*}
# \frac{V_{out}}{V_{in}} = & - \frac{R_2}{R_1} \frac{1}{1+i \omega R_2 C} \\
# = & - \frac{R_2}{R_1} \frac{1}{1+i \frac{f}{f_{cutoff}}} \\
# \text{with } f_{cutoff} = \frac{1}{2\pi R_2 C} \\
# \end{align*}
#
# ### Non-inverting Schmitt Trigger
# We saw this circuit in the module on digital data acquisition. It has a hysteresis by design which is accomplished with the positive feedback loop.
#
# <img src="img/SchmittTrigger_ciruit.png" width="240">
#
# <img src="img/SchmittTrigger.png" width="240">
#
# <img src="img/TTL_withNoise.png" width="360">
#
# ### Real op-amps effects
# #### Common Mode Rejection Ratio
#
# #### Gain-Bandwidth Product, GBP
# Internal cutoff frequency: $f_c$ or bandwidth
# $G_{theoretical}$: gain of the circuit under consideration
#
# GBP is constant: $GBP \sim 1$ MHz.
# \begin{align*}
# GBP & = G_{theoretical} \times f_c \\
# f_c & = \frac{GBP}{G_{theoretical}}
# \end{align*}
#
# At very high frequency op-amp acts like a low pass filter.
#
#
# <img src="img/GBP-741.png" width="240">
#
#
GBP = 1e6 # Hz
G_th = 10 # dimensionless
f_c = GBP/G_th
print('max frequency I can amplify : ', f_c, ' Hz')
# Hearing: 20 -20 000 Hz
# $f_{sampling} > 40000$ Hz
# MP3: f_s = 44.1 kHz
# take f_s = 40 kHz
# amplify by 100: can I use a single Op-amp circuit? ie single inverting amplifier stage if GBP = 1 MHz?
G_th = 100
GBP = 1e6
f_c = GBP/G_th
print(f_c , ' Hz')
# Solution:
# Use 2 inverting op-amp circuit in series of gain G = 10
# f_c of each stage is 100 kHz > 40 kHz!!!
# I can amplify my sound!
# In practice: $G_{max}$ < 30: because of GBP, and also this would force resistances ($R_1$ and $R_2$) to be either too low or too high.
# _exercise_
#
# >One wishes to measure a pressure pulse with a very sensitive pressure transducer. Unfortunately, the voltage output of the pressure transducer is on the order of $\pm 5$ mV and the smallest range of our Data Acquisition board is $\pm 100$ mV.
#
# >>To utilize the most of our DAQ board dynamic range, and hence increase our resolution, you wish to use an amplifier. What would be the gain you would select?
#
# >Because experiments are never easy, we have high-power electrical components just next to the DAQ system, which create a lot of electromagnetic noise.
#
# >>Will you choose an inverting or a non-inverting amplifier in this experiment? Explain why? (hint: here you probably want to maximize your signal to noise ratio, SNR).
#
# > One wishes to measure a impulse loading created by a detonation (i.e. a fast transient) with a pressure transducer. The signal needs to be amplified by a factor of 100 to measure this phenomena of frequency 20,000 Hz. The gain product bandwidth $GBP = 1$ MHz, of the 741 op-amps we have available in the lab.
#
# >>What kind of circuit will you use, i.e. how many amplifiers? Explain why?
#
# >> Will you use inverting or non-inverting amplifiers? Justify why?
#
#
# > A signal contains a frequency component at 100 Hz with amplitude 3.50 V, but there is also some undesired
# noise in the signal at 15,200 Hz, amplitude 0.20 V. The DC offset is negligible. You are not aware of the
# noise, so you sample the signal at a sampling frequency of 512 Hz and collect 4,096 data points with an 16-bit DAQ system with a range of $\pm 5$ V.
#
# >What is the frequency resolution of this signal?
#
# > What is the folding frequency of the resulting frequency spectrum?
#
# > You wish to add an anti-aliasing filter to the digital data acquisition system. The anti-aliasing filter is based on a first order low-pass filter. Select the cutoff frequency of the filter, justify why.
#
# > Estimate the amplitude of the filtered noise, as well as phase and amplitude of the original signal
#
# > How does this amplitude compare to the DAQ quantization error?
#
# > Ideally, one wishes to make the noise on the same order than the quantization error. What would be the order of the filter necessary to do so?
# +
Q = 20/2**17
print('Q = ', Q)
A_noise = 0.2 #V
G_filter = Q/A_noise
print('G_filter = ', G_filter)
f_carrier = 20.
f_noise = 10000.
f_c = 100.
G_noise = 1/numpy.sqrt(1+(f_noise/f_c)**4)
G_carrier = 1/numpy.sqrt(1+(f_carrier/f_c)**2)
phi_carrier = - numpy.arctan(f_carrier/f_c) *180/numpy.pi
print('Gain noise', G_noise, G_noise/G_filter)
print('Gain carrier', G_carrier)
print( 'phase carrier', phi_carrier, ' deg')
# -
# >>What is the frequency resolution of this signal?
#
# >> What is the folding frequency of the resulting frequency spectrum?
#
# >> You wish to add an anti-aliasing filter to the digital data acquisition system. The anti-aliasing filter is based on a first order low-pass filter. Select the cutoff frequency of the filter, justify why.
#
# >> Estimate the amplitude of the filtered noise, as well as phase and amplitude of the original signal
#
# >> How does this amplitude compare to the DAQ quantization error?
#
# >> Ideally, one wishes to make the noise on the same order than the quantization error. What would be the order of the filter necessary to do so?
#
#
# <img src="img/Op-Amp_Feedback.png" width="240">
#
# \begin{align*}
# V_{out} & = A(\omega) \left( V_{in} - f V_{out} \right)\\
# \frac{V_{out}}{V_{in}} & = \frac{A(\omega)}{1+f A(\omega)}\\
# \text{typically}\\
# A(\omega) & = \frac{A_0}{1+i\frac{\omega}{\omega_0}}
# \end{align*}
#
| Lectures/05_SignalConditioning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
# %pylab inline
def AofT(time,T, ai, taui):
return ai*np.exp(-time/taui)/(1.+np.exp(-T/(2*taui)))
from SimPEG import *
from simpegem1d.Waveform import CausalConv
import sys
sys.path.append("./DoubleLog/")
from plotting import mapDat
# +
class LinearSurvey(Survey.BaseSurvey):
nD = None
def __init__(self, time, **kwargs):
self.time = time
self.nD = time.size
def projectFields(self, u):
return u
class LinearProblem(Problem.BaseProblem):
surveyPair = LinearSurvey
def __init__(self, mesh, G, **kwargs):
Problem.BaseProblem.__init__(self, mesh, **kwargs)
self.G = G
def fields(self, m, u=None):
return self.G.dot(m)
def Jvec(self, m, v, u=None):
return self.G.dot(v)
def Jtvec(self, m, v, u=None):
return self.G.T.dot(v)
# -
# # Simple exponential basis
#
# $$ \mathbf{A}\mathbf{\alpha} = \mathbf{d}$$
tind = 12
time = np.load('./exampledata/timevtem.npy')[4:]
obs = np.load('./exampledata/souding.npy')[4:]
wave = np.loadtxt('/Users/sgkang/Dropbox/Shared/SeogiDikun/Milligan/Data/7042_106_wform.xyz', skiprows=7)
M = 81
tau = np.logspace(-5, -2, M)
twave = (np.arange(10000)+1)*1e-5
indstart = 4439
indend = 6000
t0_wave = twave[indstart:indend].min()
time_conv= twave[indstart:indend]-t0_wave
time_conv.max()
currentderiv = wave[indstart:indend]
currentderiv[time_conv>4.4e-3] = 0.
current = CausalConv(wave[indstart:indend], np.ones_like(wave[indstart:indend]), time_conv)
time_conv
# +
# figsize(6, 4)
plt.plot(time_conv, wave[indstart:indend], 'b.', lw=2, ms=4)
plt.plot(time_conv, wave[indstart:indend]*0., 'k--')
plt.plot(np.r_[4.4000000e-03, 4.4000000e-03], np.r_[-4.5, 4.5], 'k:')
plt.ylim(-4.5, 4.5)
# plt.xlim(-2e-4, 5.5e-3)
# -
current.min()
figsize(6, 4)
plt.plot(time_conv, current/current.max(), 'k-')
# plt.plot(time_conv, wave[indstart:indend]*0., 'k--')
plt.plot(np.r_[4.4000000e-03, 4.4000000e-03], np.r_[1e-6, 1e-2], 'k:')
plt.plot(time, np.zeros_like(time), 'r.')
plt.ylim(0, 1.)
from SimPEG import Mesh
from simpegem1d.Waveform import SineFun, SineFunDeriv, CausalConv
dt = 1e-5
t0 = 4.4000000e-03
ntime = time_conv.size
meshtime = Mesh.TensorMesh([dt*np.ones(ntime)], x0=[-dt/2.])
P = meshtime.getInterpolationMat(time+t0, 'CC')
# time_conv = meshtime.gridN
# currentderiv = SineFunDeriv(time_conv, t0)
# current = SineFun(time_conv, t0)
temp = np.exp(-time_conv/1e-2)/1e-2
out = CausalConv(temp, currentderiv, time_conv)
# plt.plot(time_conv, currentderiv)
plt.plot(time_conv, out)
time_conv.min(), time_conv.max()
P.shape
N = time.size
A = np.zeros((N, M))
for j in range(M):
A[:,j] = P*(CausalConv(1./tau[j]*np.exp(-time_conv/tau[j]), -currentderiv, time_conv))
mtrue = np.zeros(M)
np.random.seed(1)
inds = np.random.random_integers(0, 41, size=5)
mtrue[inds] = np.r_[0.1, 2, 1, 4, 5]
out = np.dot(A,mtrue)
# +
fig = plt.figure(figsize=(5,4))
ax = plt.subplot(111)
# for i, ind in enumerate(inds):
# temp, dum, dum = mapDat(mtrue[inds][i]*np.exp(-time/tau[ind]), 1e-5, stretch=2)
# plt.semilogx(time, temp, 'k', alpha = 0.5)
outmap, ticks, tickLabels = mapDat(obs,1e-3, stretch=3)
ax.plot(time*1e6, outmap, 'k', lw=2)
ax.plot(np.r_[-0.002, 0.]*1e6, np.zeros(2), 'k', lw=2)
ax.plot(np.zeros(2), np.r_[ticks.min(), ticks.max()], 'k--', lw=1)
ax.set_yticks(ticks)
ax.set_yticklabels(tickLabels)
ax.set_ylim(ticks.min(), ticks.max())
ax.plot(np.r_[-0.002, time.max()]*1e6, np.zeros(2), 'k--')
ax.set_xlim(-0.002*1e6, time.max()*1e6)
ax.set_xlabel("Time (micro-s)", fontsize = 16)
ax.set_ylabel("$db_z/dt (pV/A$-$m^4)$ ", fontsize = 16)
# ax.grid(True)
# +
# from pymatsolver import MumpsSolver
# -
ip = obs[obs<0.]
print obs[obs>0.]/abs(ip).max()
from SimPEG import Maps
weight = np.sqrt(np.diag(np.dot(A.T, A)))
mesh = Mesh.TensorMesh([M])
wmap = Maps.Weighting(mesh, weights=weight)
prob = LinearProblem(mesh, A)
survey = LinearSurvey(time)
survey.pair(prob)
# survey.makeSyntheticData(mtrue, std=0.01)
# survey.dobs = out
survey.dobs = obs
reg = Regularization.BaseRegularization(mesh, mapping=wmap)
dmis = DataMisfit.l2_DataMisfit(survey)
# uncert = 0.01*(abs(survey.dobs)+abs(ip).max())
uncert = 0.05*(abs(survey.dobs)+abs(ip).max())
# uncert = 0.05*(abs(survey.dobs))
dmis.Wd = 1./(uncert)
opt = Optimization.ProjectedGNCG(maxIter=10)
# opt = Optimization.InexactGaussNewton(maxIter=20)
# opt.lower = -1e-10
opt.upper = 1e-10
invProb = InvProblem.BaseInvProblem(dmis, reg, opt)
invProb.beta = 1e-5
beta = Directives.BetaSchedule()
beta.coolingFactor = 1
target = Directives.TargetMisfit()
inv = Inversion.BaseInversion(invProb, directiveList=[beta, target])
m0 = np.zeros(M)
opt.tolG = 1e-10
opt.tolG = 1e-10
reg.mref = np.zeros_like(M)
mrec = inv.run(m0)
plt.semilogx(tau, weight)
plt.semilogx(tau, mrec, '.')
mfund = mrec.copy()
mip = mrec.copy()
mfund[mfund<0.] = 0.
mip[mip>0.] = 0.
fund = np.dot(A, mfund)
ip = obs-invProb.dpred
tind = 7
print obs[tind], invProb.dpred[tind], ip[tind]
fig = plt.figure(figsize=(7,4.5))
ax = plt.subplot(111)
ax.plot(time, obs, 'k.-', lw=2)
# ax.set_ylim(1e-4, 1e0)
ax.set_xscale('linear')
ax.set_yscale('linear')
ax.set_xlim(time.min(), time.max())
ax.grid(True)
fig = plt.figure(figsize=(7,4.5))
ax = plt.subplot(111)
ax.plot(time, obs, 'k.-', lw=2)
ax.plot(time, -obs, 'k--', lw=2)
ax.plot(time, -invProb.dpred, 'b-', lw=2)
ax.plot(time, -ip, 'r.-', lw=2, ms=10)
ax.plot(time, uncert, 'g.-', lw=2, ms=10)
ax.set_ylim(1e-4, 1e0)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(time.min(), time.max())
ax.grid(True)
predmap, ticks, tickLabels = mapDat(invProb.dpred,1e-3, stretch=3)
# +
fig = plt.figure(figsize=(5,4))
ax = plt.subplot(111)
# for i, ind in enumerate(inds):
# temp, dum, dum = mapDat(mtrue[inds][i]*np.exp(-time/tau[ind]), 1e-5, stretch=2)
# plt.semilogx(time, temp, 'k', alpha = 0.5)
outmap, ticks, tickLabels = mapDat(obs,1e-3, stretch=3)
ax.plot(time*1e6, outmap, 'k', lw=2)
ax.plot(time*1e6, predmap, 'b', lw=2)
ax.plot(time[7:]*1e6, outmap[7:]-predmap[7:], 'r', lw=2)
ax.plot(np.r_[-0.002, 0.]*1e6, np.zeros(2), 'k', lw=2)
ax.plot(np.zeros(2), np.r_[ticks.min(), ticks.max()], 'k--', lw=1)
ax.set_yticks(ticks)
ax.set_yticklabels(tickLabels)
ax.set_ylim(ticks.min(), ticks.max())
ax.plot(np.r_[-0.002, time.max()]*1e6, np.zeros(2), 'k--')
ax.set_xlim(-0.002*1e6, time.max()*1e6)
ax.set_xlabel("Time (micro-s)", fontsize = 16)
ax.set_ylabel("$db_z/dt (pV/A$-$m^4)$ ", fontsize = 16)
# ax.grid(True)
# -
weight_d = np.sqrt(np.diag(np.dot(np.dot(np.diag(1./uncert), A), (np.dot(np.diag(1./uncert), A)).T)))
# weight_d = np.sqrt(np.diag( np.dot(A, A.T)))
plt.semilogx(time, weight_d)
| notebook/.ipynb_checkpoints/Milligan_ex_single-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from defense import *
from utils import *
# +
# with more output than the original RGD code
# for demo only
def remap(img,index):
n = img.shape[0]
m = img.shape[1]
if np.ndim(img)>2:
outimgnp = np.zeros((n,m,img.shape[2]))
else:
outimgnp = np.zeros((n,m))
for i in range(n):
for j in range(m):
outimgnp[j,i]=img[tuple(index[i][j])]
return outimgnp
def demoGD(img,distort_limit = 0.25):
num_steps = 10
xsteps = [1 + random.uniform(-distort_limit, distort_limit) for i in range(num_steps + 1)]
ysteps = [1 + random.uniform(-distort_limit, distort_limit) for i in range(num_steps + 1)]
height, width = img.shape[:2]
x_step = width // num_steps
y_step = height // num_steps
gridimg = np.copy(orig)
xind = np.arange(0,299,40)[1:]
yind = np.arange(0,299,40)[1:]
gridimg[xind] = [1.0,0.38,0.0]
gridimg[xind+1] = [1.0,0.38,0.0]
gridimg[:,yind] = [1.0,0.38,0.0]
gridimg[:,yind+1] = [1.0,0.38,0.0]
xx = np.zeros(width, np.float32)
prev = 0
xcur = []
for idx, x in enumerate(range(0, width, x_step)):
start = x
end = x + x_step
if end > width:
end = width
cur = width
else:
cur = prev + x_step * xsteps[idx]
xx[start:end] = np.linspace(prev, cur, end - start)
prev = cur
xcur.append(cur)
yy = np.zeros(height, np.float32)
prev = 0
ycur = []
for idx, y in enumerate(range(0, height, y_step)):
start = y
end = y + y_step
if end > height:
end = height
cur = height
else:
cur = prev + y_step * ysteps[idx]
yy[start:end] = np.linspace(prev, cur, end - start)
prev = cur
ycur.append(cur)
xx = np.round(xx).astype(int)
yy = np.round(yy).astype(int)
xx[xx >= 299] = 298
yy[yy >= 299] = 298
map_x, map_y = np.meshgrid(xx, yy)
index=np.dstack((map_y,map_x))
outgridimg = remap(gridimg,index)
if np.ndim(img)>2:
outgridimg = outgridimg.transpose(1,0,2)
else:
outgridimg = outgridimg.T
outimg = remap(img,index)
if np.ndim(img)>2:
outimg = outimg.transpose(1,0,2)
else:
outimg = outimg.T
return outgridimg,gridimg,outimg
# -
orig = load_image('cat.jpg')
gdedgrid,grid,gdedorig = demoGD(orig)
# +
#visionary demo
plt.rcParams['figure.figsize'] = (8.0, 8.0)
plt.axis('off')
plt.imshow(orig)
plt.show()
plt.axis('off')
plt.imshow(gdedorig)
plt.show()
# +
plt.axis('off')
plt.imshow(grid)
plt.show()
plt.axis('off')
plt.imshow(gdedgrid)
plt.show()
| remove_code/sotas/Advanced-Gradient-Obfuscating-master/RDG_visual_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# +
import sys
SOURCE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__name__)))
sys.path.insert(0, SOURCE_DIR)
# -
import malaya_speech
import malaya_speech.config
import malaya_speech.train.model.resnest_enhancement as unet
from malaya_speech.train.model import enhancement
import tensorflow as tf
tf.reset_default_graph()
x = tf.placeholder(tf.float32, (None, 1))
partitioned_x = malaya_speech.tf_featurization.pad_and_partition(x, 4096)
model = unet.Model(partitioned_x, channels_interval = 24)
model.logits
logits = tf.reshape(model.logits, (-1, 1))
logits = logits[:tf.shape(x)[0]]
logits
snr = enhancement.loss.snr(model.logits, partitioned_x)
sdr = enhancement.loss.sdr(model.logits, partitioned_x)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
import numpy as np
y, sr = malaya_speech.load('../speech/record/savewav_2020-09-06_21-48-43_174842.wav', sr = 22050)
y_, snr_, sdr_ = sess.run([logits, snr, sdr], feed_dict = {x: np.expand_dims(y, -1)})
y_.shape
# +
import IPython.display as ipd
ipd.Audio(y_.reshape((-1)), rate = 22050)
# -
snr_, sdr_
y
y_[:,0]
saver = tf.train.Saver()
saver.save(sess, 'test/model.ckpt')
# !ls -lh test
# !rm -rf test
| test/test-resnest-enhancement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import HTML
import pymunk
from pymunk.vec2d import Vec2d
import pymunk.matplotlib_util
def setup_space():
space = pymunk.Space()
space.gravity = 0,-9820
space.damping = 0.99
return space
def setup_balls(space):
width = 600
height = 600
for x in range(-100,150,50):
x += width / 2
offset_y = height/2
mass = 10
radius = 25
moment = pymunk.moment_for_circle(mass, 0, radius, (0,0))
body = pymunk.Body(mass, moment)
body.position = x, -125+offset_y
body.start_position = Vec2d(*body.position)
shape = pymunk.Circle(body, radius)
shape.elasticity = 0.9999999
space.add(body, shape)
pj = pymunk.PinJoint(space.static_body, body, (x, 125+offset_y), (0,0))
space.add(pj)
# We reuse the debug drawing code from pymunk.matplotlib_util. Currently this is not optimal since it doesnt reuse the artists added to the axes, instead we have to clear the axes each frame and redraw it which is quite expensive. Hopefully a future version of pymunk will support a better way to do it.
#
# For now if you want to draw a more complicated scene I recommend that you use pymunk.matplotlib_util as inspiration but write the drawing code yourself to better optimize the animation.
# +
fig = plt.figure()
ax = plt.axes(xlim=(0, 600), ylim=(0, 600))
ax.set_aspect("equal")
space = setup_space()
setup_balls(space)
o = pymunk.matplotlib_util.DrawOptions(ax)
space.shapes[1].body.apply_impulse_at_local_point((-12000,0))
def init():
space.debug_draw(o)
return []
def animate(dt):
#we run the animation with half speed intentionally to make it a little nicer to look at
for x in range(10):
space.step(1/50/10/2)
ax.clear()
space.debug_draw(o)
return []
frames = 105
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=20, blit=False)
HTML(anim.to_html5_video())
# -
| examples/newtons_cradle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# # Inferring parameters of SDEs using a Euler-Maruyama scheme
#
# _This notebook is derived from a presentation prepared for the Theoretical Neuroscience Group, Institute of Systems Neuroscience at Aix-Marseile University._
# + button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "-"}
# %pylab inline
import pymc3 as pm
import theano.tensor as tt
import scipy
from pymc3.distributions.timeseries import EulerMaruyama
# + [markdown] button=false deletable=true nbpresent={"id": "2325c7f9-37bd-4a65-aade-86bee1bff5e3"} new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Toy model 1
#
# Here's a scalar linear SDE in symbolic form
#
# $ dX_t = \lambda X_t + \sigma^2 dW_t $
#
# discretized with the Euler-Maruyama scheme
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# parameters
λ = -0.78
σ2 = 5e-3
N = 200
dt = 1e-1
# time series
x = 0.1
x_t = []
# simulate
for i in range(N):
x += dt * λ * x + sqrt(dt) * σ2 * randn()
x_t.append(x)
x_t = array(x_t)
# z_t noisy observation
z_t = x_t + randn(x_t.size) * 5e-3
# + button=false deletable=true nbpresent={"id": "0994bfef-45dc-48da-b6bf-c7b38d62bf11"} new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
figure(figsize=(10, 3))
subplot(121)
plot(x_t[:30], 'k', label='$x(t)$', alpha=0.5), plot(z_t[:30], 'r', label='$z(t)$', alpha=0.5)
title('Transient'), legend()
subplot(122)
plot(x_t[30:], 'k', label='$x(t)$', alpha=0.5), plot(z_t[30:], 'r', label='$z(t)$', alpha=0.5)
title('All time');
tight_layout()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# What is the inference we want to make? Since we've made a noisy observation of the generated time series, we need to estimate both $x(t)$ and $\lambda$.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# First, we rewrite our SDE as a function returning a tuple of the drift and diffusion coefficients
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
def lin_sde(x, lam):
return lam * x, σ2
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Next, we describe the probability model as a set of three stochastic variables, `lam`, `xh`, and `zh`:
# + button=false deletable=true nbpresent={"id": "4f90230d-f303-4b3b-a69e-304a632c6407"} new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "-"}
with pm.Model() as model:
# uniform prior, but we know it must be negative
lam = pm.Flat('lam')
# "hidden states" following a linear SDE distribution
# parametrized by time step (det. variable) and lam (random variable)
xh = EulerMaruyama('xh', dt, lin_sde, (lam, ), shape=N, testval=x_t)
# predicted observation
zh = pm.Normal('zh', mu=xh, sd=5e-3, observed=z_t)
# + [markdown] button=false deletable=true nbpresent={"id": "287d10b5-0193-4ffe-92a7-362993c4b72e"} new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Once the model is constructed, we perform inference, i.e. sample from the posterior distribution, in the following steps:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
with model:
# optimize to find the mode of the posterior as starting point for prob. mass
start = pm.find_MAP(vars=[xh], fmin=scipy.optimize.fmin_l_bfgs_b)
# "warm up" to transition from mode to prob. mass
step = pm.NUTS(scaling=start)
trace = pm.sample(1000, step, progressbar=True)
# sample from the prob. mass
step = pm.NUTS(scaling=trace[-1], gamma=.25)
trace = pm.sample(2000, step, start=trace[-1], progressbar=True)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Next, we plot some basic statistics on the samples from the posterior,
# + button=false deletable=true nbpresent={"id": "925f1829-24cb-4c28-9b6b-7e9c9e86f2fd"} new_sheet=false run_control={"read_only": false}
figure(figsize=(10, 3))
subplot(121)
plot(percentile(trace[xh], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$')
plot(x_t, 'r', label='$x(t)$')
legend()
subplot(122)
hist(trace[lam], 30, label='$\hat{\lambda}$', alpha=0.5)
axvline(λ, color='r', label='$\lambda$', alpha=0.5)
legend();
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# A model can fit the data precisely and still be wrong; we need to use _posterior predictive checks_ to assess if, under our fit model, the data our likely.
#
# In other words, we
# - assume the model is correct
# - simulate new observations
# - check that the new observations fit with the original data
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# generate trace from posterior
ppc_trace = pm.sample_ppc(trace, model=model)
# plot with data
figure(figsize=(10, 3))
plot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\% PP}(t)$')
plot(z_t, 'r', label='$z(t)$')
legend()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Note that
#
# - inference also estimates the initial conditions
# - the observed data $z(t)$ lies fully within the 95% interval of the PPC.
# - there are many other ways of evaluating fit
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ### Toy model 2
#
# As the next model, let's use a 2D deterministic oscillator,
# \begin{align}
# \dot{x} &= \tau (x - x^3/3 + y) \\
# \dot{y} &= \frac{1}{\tau} (a - x)
# \end{align}
#
# with noisy observation $z(t) = m x + (1 - m) y + N(0, 0.05)$.
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
N, τ, a, m, σ2 = 200, 3.0, 1.05, 0.2, 1e-1
xs, ys = [0.0], [1.0]
for i in range(N):
x, y = xs[-1], ys[-1]
dx = τ * (x - x**3.0/3.0 + y)
dy = (1.0 / τ) * (a - x)
xs.append(x + dt * dx + sqrt(dt) * σ2 * randn())
ys.append(y + dt * dy + sqrt(dt) * σ2 * randn())
xs, ys = array(xs), array(ys)
zs = m * xs + (1 - m) * ys + randn(xs.size) * 0.1
figure(figsize=(10, 2))
plot(xs, label='$x(t)$')
plot(ys, label='$y(t)$')
plot(zs, label='$z(t)$')
legend()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Now, estimate the hidden states $x(t)$ and $y(t)$, as well as parameters $\tau$, $a$ and $m$.
#
# As before, we rewrite our SDE as a function returned drift & diffusion coefficients:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
def osc_sde(xy, τ, a):
x, y = xy[:, 0], xy[:, 1]
dx = τ * (x - x**3.0/3.0 + y)
dy = (1.0 / τ) * (a - x)
dxy = tt.stack([dx, dy], axis=0).T
return dxy, σ2
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# As before, the Euler-Maruyama discretization of the SDE is written as a prediction of the state at step $i+1$ based on the state at step $i$.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# We can now write our statistical model as before, with uninformative priors on $\tau$, $a$ and $m$:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
xys = c_[xs, ys]
with pm.Model() as model:
τh = pm.Uniform('τh', lower=0.1, upper=5.0)
ah = pm.Uniform('ah', lower=0.5, upper=1.5)
mh = pm.Uniform('mh', lower=0.0, upper=1.0)
xyh = EulerMaruyama('xyh', dt, osc_sde, (τh, ah), shape=xys.shape, testval=xys)
zh = pm.Normal('zh', mu=mh * xyh[:, 0] + (1 - mh) * xyh[:, 1], sd=0.1, observed=zs)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# As with the linear SDE, we 1) find a MAP estimate, 2) warm up and 3) sample from the probability mass:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
with model:
# optimize to find the mode of the posterior as starting point for prob. mass
start = pm.find_MAP(vars=[xyh], fmin=scipy.optimize.fmin_l_bfgs_b)
# "warm up" to transition from mode to prob. mass
step = pm.NUTS(scaling=start)
trace = pm.sample(100, step, progressbar=True)
# sample from the prob. mass
step = pm.NUTS(scaling=trace[-1], gamma=.25)
trace = pm.sample(2000, step, start=trace[-1], progressbar=True)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Again, the result is a set of samples from the posterior, including our parameters of interest but also the hidden states
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
figure(figsize=(10, 6))
subplot(211)
plot(percentile(trace[xyh][..., 0], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$')
plot(xs, 'r', label='$x(t)$')
legend(loc=0)
subplot(234), hist(trace['τh']), axvline(τ), xlim([1.0, 4.0]), title('τ')
subplot(235), hist(trace['ah']), axvline(a), xlim([0, 2.0]), title('a')
subplot(236), hist(trace['mh']), axvline(m), xlim([0, 1]), title('m')
tight_layout()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Again, we can perform a posterior predictive check, that our data are likely given the fit model
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# generate trace from posterior
ppc_trace = pm.sample_ppc(trace, model=model)
# plot with data
figure(figsize=(10, 3))
plot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\% PP}(t)$')
plot(zs, 'r', label='$z(t)$')
legend()
| docs/source/notebooks/Euler-Maruyama and SDEs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: jupyterlab-debugger
# language: python
# name: jupyterlab-debugger
# ---
# +
import os
import pandas
from functools import reduce
import seaborn as sns
# https://pythonspeed.com/memory/
# https://pythonspeed.com/articles/chunking-pandas/
# https://stackoverflow.com/questions/38531195/writing-large-pandas-dataframes-to-csv-file-in-chunks
iris = sns.load_dataset('iris')
iris.to_csv('iris.csv')
try:
os.remove('out.csv')
except OSError:
pass
first_reduce = True
def reformat_data(chunk):
sepal_length = chunk[['sepal_length', 'petal_length']]
return sepal_length
def add(previous_result, new_result):
global first_reduce
if first_reduce:
previous_result.to_csv('out.csv', header=first_reduce, mode='a')
first_reduce = False
new_result.to_csv('out.csv', header=first_reduce, mode='a')
return previous_result.add(new_result, fill_value=0)
# MapReduce structure:
chunks = pandas.read_csv('iris.csv', chunksize=70)
processed_chunks = map(reformat_data, chunks)
result = reduce(add, processed_chunks)
print(result)
# -
| code/pandas/chunking_csv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Obsah dnesnej prednasky
#
# ## 1. Nemenne objekty a
#
# ## 2. Funkcie vyssej urovne
# + [markdown] slideshow={"slide_type": "slide"}
# # Sutaz
#
# ## [Project Euler](https://projecteuler.net/)
#
# * Vyriesit co najviac uloh funkcionalne
# * Najlepsi dostanu **plny pocet bodov** z Python casti zaverecnej skusky
# + [markdown] slideshow={"slide_type": "slide"}
# # Elm
#
# Mali ste mat jeden test na prednaske a jeden na cviceni
#
# Minimalne jeden sa nahradi malym projektom.
# + [markdown] slideshow={"slide_type": "slide"}
# # 1. Nemenné (Immutable) objekty
# ----------------------------------------------------
# + [markdown] slideshow={"slide_type": "slide"}
# # Nemenný objekt sa po vytvorení už nemôže meniť
# -
x = 'foo'
print(id(x))
print(id(x.upper()))
print(id(x + 'bar'))
# + [markdown] slideshow={"slide_type": "slide"}
# # Neznamena to, ze referencia na objekt sa nemoze menit
# v cisto funkcionalnom jazyku by sa nemalo diat ani to
# -
x = 'foo'
y = x
print(x, id(x))
x = 'bar'
print(x, id(x))# objekt foo sa nezmenil, to len x uz smeruje na iny objekt
print(y, id(y))
# + [markdown] slideshow={"slide_type": "slide"}
# # Nie je to to iste ako klucove slovo *final* v Jave
#
# Final premenna po vytvoreni nemoze smerovat na iny objekt
#
# Objekt samtny ale moze byt zmeneny
# -
# -- JAVA --
final List<Integer> list = new ArrayList<Integer>();
list = new ArrayList<Integer>(); // toto sa neskompiluje
# -- JAVA --
final List<Integer> list = new ArrayList<Integer>();
list.add(1); //toto prejde bez problemov
# -- JAVA --
final List<Integer> list = Collections.unmodifiableList(new ArrayList<Integer>(...)); //toto je immutable list
# + [markdown] slideshow={"slide_type": "slide"}
# # Imutable znamena, ze hociaka operacia nad objektom vytvori novy objekt namiesto toho aby zmenila ten povodny
# -
# retazec je immutable
x = 'foo'
y = x
print(x) # foo
y += 'bar'
print(x) # foo
print(y)
print(id(x))
print(id(y))
# + slideshow={"slide_type": "slide"}
# zoznam je mutable
x = [1, 2, 3]
y = x
print(x)
y += [3, 2, 1]
print(x)
# -
print(id(x))
print(id(y))
# + [markdown] slideshow={"slide_type": "slide"}
# # Pozor, v Pythone sa parametre funkcie predavaju referenciou
# Pri mutable objektoch to moze sposobit necakane veci ak neviete, co sa vo funkcii deje
# +
def func(val):
val += 'bar'
x = 'foo' # retazec je immutable, objekt sa nezmeni
print(x)
func(x)
print(x)
# +
def func(val):
val += [3, 2, 1]
x = [1, 2, 3] # zoznam je mutable, zmeni sa premenna mimo bloku funkcie
print(x)
func(x)
print(x)
# + [markdown] slideshow={"slide_type": "slide"}
# # Ak predate immutable objekt funkcii, tak vam ho funkcia urcite nezmeni
# + [markdown] slideshow={"slide_type": "slide"}
# # String je immutable
# Podobne ako vsetky zakladne typy
# -
a = 'text'
print(a)
print('Adresa je: {}'.format(id(a)))
# Znamena to, ze neviem menit hodnotu
a[0] = 'T'
print(a)
print('Adresa je: {}'.format(id(a)))
# + [markdown] slideshow={"slide_type": "slide"}
# # List je mutable
# -
a = [1,2,3,4,5]
print(a)
print('Adresa je: {}'.format(id(a)))
# Znamena to, ze neviem menit hodnotu
a[0] = 'T'
print(a)
print('Adresa je: {}'.format(id(a)))
# + [markdown] slideshow={"slide_type": "slide"}
# # Tuple je immutable
# -
t1 = (1, 2, 3, 4, 5)
t1
t1[1]
t1[1]=3
# + [markdown] slideshow={"slide_type": "slide"}
# # Nemennost moze komplikovat pracu s objektami
# -
t1 = (1, 2, 3, 4, 5)
# Ked chceme update, treba vyrobit novy objekt
t2 = t1[:2] + (17, ) + t1[3:]
t2
# alebo
l1 = list(t1)
l1[2] = 17
t2 = tuple(l1)
t2
# vs.
a = [1,2,3,4,5]
a[2] = 17
a
# + [markdown] slideshow={"slide_type": "slide"}
# # Preco je nemennost dobra
# + [markdown] slideshow={"slide_type": "slide"}
# # Netreba pocitat s tym, ze sa vam moze objekt zmenit
#
# * Je to bezpecnejsie.
# * vznika menej chyb
# * Lahsie sa debuguje
# + [markdown] slideshow={"slide_type": "slide"}
# # Lahsie sa testuje
#
# * staci test na jednu funkciu a nie celu skupinu objektov
# * Ak testujete fuknciu, ktora meni objekty, tak moze vzniknut viacero testovych pachov
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Toto je dovod, preco ma Test Driven Development (TDD) taky usepch
#
# * Testy sa píšu ešte pred kódom
# * Zamýšľate sa ako napísať kód tak, aby bol testovateľný
# * Bez toho aby ste o tom vedeli odstraňujete vedľajšie efekty
# * Snazite sa o to, aby na sebe funkcie co najmenej zaviseli
# * Pripravovanie objektov je pre vas zbytocnou komplikaciou
# * Zmena stavu objektu sposobuje, ze musite pisat velmi vela testov aby ste osetrili mnozstvo hranicnych stavov. A kedze sme tvroy lenive, tak nas to prirodzene vedie k tomu, aby sme pisali kod, ktory sa lahko testuje a nepouziva komplikovane obejkty a zavislosti od stavu.
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Da sa lahsie zdielat medzi vlaknami a procesmi
# * netreba synchronizovat pristup k objektom a stavu
# + [markdown] slideshow={"slide_type": "slide"}
# # Da sa hashovat
#
# * ak pouzijete premenlivy obejkt ako kluc a zmenite jeho stav, tak aj hodnota hashovacej funkkcie spocitanej z tohto objektu sa zmeni. To znamena, ze by ste po zmene obejktu uz nenasli povodny zaznam v hashovacej tabulke.
# * ak pouzijete nemenny objekt ako kluc, tak sa urcite nezmeni a ani hodnota hashovacej funkcie sa urcite nezmeni
# * hashovacia funkcia nad nim vzdy vrati rovnaku hodnotu
# + [markdown] slideshow={"slide_type": "slide"}
# # Objekty mozu byt mensie. Zaberaju menej miesta v pamati a operacie nad nimi mozu byt rychlejsie.
#
# * nepotrebujete reziu na to, aby ste umoznili velke mnzostvo transformacii.
# + [markdown] slideshow={"slide_type": "slide"}
# # Ale!!!
# * Je treba vytvarat velmi vela objektov.
# * Garbage collector sa narobi.
# +
# inspirovane https://www.youtube.com/watch?v=5qQQ3yzbKp8
employees = ['Jozo', 'Eva', 'Fero', 'Miro', 'Anna', 'Kristina']
output = '<ul>\n'
for employee in employees:
output += '\t<li>{}</li>\n'.format(employee)
# print('Adresa outputu je: {}'.format(id(output)))
output += '</ul>'
print(output)
# -
# Postupne vytvarame retazec, ktoreho velkost stale rastie a zakazdym sa vytvori novy a novy objekt. Kazdy docasny obejkt sa musi potom odstranit pomocou garbage collectoru. Zostane zachovana referencia len na ten posledny objekt.
# + [markdown] slideshow={"slide_type": "slide"}
# # Ako zabezpecit nemennost objektov?
#
# * konvencia
# * vynutit si ju
# + [markdown] slideshow={"slide_type": "slide"}
# # S vela vecami si mozeme pomoct kniznicou Pyrsistent
# -
import pyrsistent as ps
# + [markdown] slideshow={"slide_type": "slide"}
# # List / Vektor
# -
v1 = ps.pvector([1, 2, 3, 4])
v1 == ps.v(1, 2, 3, 4)
v1[1]
v1[1:3]
# + slideshow={"slide_type": "slide"}
v1[1] = 3
# -
v3 = v1.set(1, 5)
print(v3, id(v3))
print(v1, id(v1))
# + [markdown] slideshow={"slide_type": "slide"}
# # Map / dict
# -
m1 = ps.pmap({'a':1, 'b':2})
m1 == ps.m(a=1, b=2)
m1['a']
m1.b # toto s dict nejde
# + slideshow={"slide_type": "slide"}
print(m1.set('a', 3))
print(m1)
# -
print(id(m1), id(m1.set('a', 3)))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Transformacia mutable <=> immutable
# -
ps.freeze([1, {'a': 3}])
ps.thaw(ps.v(1, ps.m(a=3)))
# + [markdown] slideshow={"slide_type": "slide"}
# # ... a dalsie immutable struktury
# https://github.com/tobgu/pyrsistent
#
# * PVector, similar to a python list
# * PMap, similar to dict
# * PSet, similar to set
# * PRecord, a PMap on steroids with fixed fields, optional type and invariant checking and much more
# * PClass, a Python class fixed fields, optional type and invariant checking and much more
# * Checked collections, PVector, PMap and PSet with optional type and invariance checks and more
# * PBag, similar to collections.Counter
# * PList, a classic singly linked list
# * PDeque, similar to collections.deque
# * Immutable object type (immutable) built on the named tuple
# * freeze and thaw functions to convert between pythons standard collections and pyrsistent collections.
# * Flexible transformations of arbitrarily complex structures built from PMaps and PVectors.
# + [markdown] slideshow={"slide_type": "slide"}
# # Da sa nieco spravit s tou spotrebou pamati?
# + [markdown] slideshow={"slide_type": "slide"}
# # Po niektorych operaciach sa objekty dost podobaju
# + slideshow={"slide_type": "-"}
v1 = ps.v(0, 1, 2, 3, 4, 5, 6, 7, 8)
print(v1)
v2 = v1.set(5, 'beef')
print(v2)
# + [markdown] slideshow={"slide_type": "slide"}
# # Zdielanie casti datovej struktury
#
# pvector([0, 1, 2, 3, 4, 5, 6, 7, 8])
#
# pvector([0, 1, 2, 3, 4, 'beef', 6, 7, 8])
# 
# http://hypirion.com/musings/understanding-persistent-vector-pt-1
# + [markdown] slideshow={"slide_type": "slide"}
# # Nanestastie, Python toto nepodporuje
#
# Niektore funkcionalne jazyky ako napriklad Clojure ale ano.
# + [markdown] slideshow={"slide_type": "slide"}
# # 2. Higher order functions
# + [markdown] slideshow={"slide_type": "slide"}
# # Funkcional v LISPe (a inych funkcionalnych jazykoch) je funkcia, ktora ma ako argument funkciu alebo funkciu vracia
# * FUNCALL - vykonanie funkcie s argumentami
# * MAPCAR - zobrazenie
# * REMOVE-IF/REMOVE-IF-NOT - filter
# * REDUCE - redukcia
# * ...
# + [markdown] slideshow={"slide_type": "slide"}
# # V Pythone a inych jazykoch
# * **Funkcia vyssej urovne** (Higher order function) - je funkcia, ktora dostava funkciu ako parameter
# * **Generator** - je funkcia, ktora vracia funkciu
# + [markdown] slideshow={"slide_type": "slide"}
# # Funkcie vyssej urovne sa daju velmi dobre pouzit na spracovanie zoznamu
#
# Minimalne je to ich najcastejsie pouzitie. Casto sa ale pouzivaju aj na ine struktury: napr.: strom
#
# Najcastejsie operacie so zoznamom:
# * zobrazenie
# * filter
# * redukcia
# + [markdown] slideshow={"slide_type": "slide"}
# # Zobrazenie
#
# Aplikovanie funkcie/transformacie na vsetky prvky zoznamu a vytvorenie noveho zoznamu z transformovanych prvkov
# -
def process_item(x):
return x*x
item_list = [1,2,3,4,5,6]
# impertivny zapis
collection = []
for item in item_list:
partial_result = process_item(item)
collection.append(partial_result)
collection
# + slideshow={"slide_type": "slide"}
# C-like zapis
collection = [0] * len(item_list) # nahrada mallocu
index = 0
while index < len(item_list):
partial_result = process_item(item_list[index])
collection[index] = partial_result
index += 1
collection
# -
# # Zobrazenie je tak casta operacia, ze ma zmysel spravit nejaku abstrakciu, aby som to nemusel implementovat stale odznova.
# + [markdown] slideshow={"slide_type": "slide"}
# # Zobrazenie pomocou funkcie vyssej urovne je prehladnejsie
# -
def process_item(x):
return x*x
item_list = [1,2,3,4,5,6]
# funkcionalny zapis
collection = map(process_item, item_list)
list(collection)
# + [markdown] slideshow={"slide_type": "slide"}
# Nezaujima ma ako je `map` implementovane.
#
# Funkcia `map` predstavuje abstrakciu. Ak niekto zmeni implementaciu map, tak ma to niejak neovplyvni. Ak map a ani `process_item` nema ziadne vedlajsie vplyvy (su to ciste funkcie), tak su na sebe uplne nezavisle a mzoem ich menit bez toho aby som menil zvysok kodu.
#
# Viem co chcem dosiahnut a nezaujima ma ako sa to vykona. Deklarujem co chcem dostat a nemusim imperativne hovorit ako to chcem dostat.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Dalsi priklad pouzitia funkcie map
# +
def fahrenheit(T):
return ((float(9)/5)*T + 32)
def celsius(T):
return (float(5)/9)*(T-32)
temperatures = (36.5, 37, 37.5, 38, 39)
F = list(map(fahrenheit, temperatures))
C = list(map(celsius, map(fahrenheit, temperatures)))
print(F)
print(C)
# + [markdown] slideshow={"slide_type": "slide"}
# # Alebo este iny
# -
list(map(len, open('data/morho.txt')))
list(map(print, open('data/morho.txt')))
# + [markdown] slideshow={"slide_type": "slide"}
# # Funkcia *map* odstranuje potrebu udrzovat si stav
#
# * nepotrebujem ziadnu kolekciu, ktora je v nejakom case ciastocne naplnena
# * nepotrebujem ziadny index, ktory sa inkrementuje
# * nestaram sa o to, ako map funguje
# * iterativne, rekurziou, paralelne, distribuovane, pomocou indexu?
# * nestaram sa o vnutornu strukturu kolekcie
# * staci aby sa cez nu dalo iterovat (o tomto si povieme viac nabuduce)
# + [markdown] slideshow={"slide_type": "slide"}
# # Funkcia map by <NAME> implementovana napriklad takto
# -
def my_map(f, seq): # Takto by to <NAME> v pythone 2 a nie 3. Tam map vracia iterator (co je iterator vysvetlim neskor).
result = []
for x in seq:
result.append(f(x))
return result
# + [markdown] slideshow={"slide_type": "slide"}
# # Filter
#
# Dalsia velmi casta operacia
#
# Zo zoznamu sa vytvara novy zoznam s tymi prvkami, ktore splnaju podmienku
# -
item_list = [1,2,3,4,5,6]
def condition(x):
return(x % 2 == 0)
collection = []
for item in item_list:
if condition(item):
collection.append(item)
collection
# + [markdown] slideshow={"slide_type": "slide"}
# # Filter pomocou funkcie vyssej urovne
# -
item_list = [1,2,3,4,5,6]
def condition(x):
return(x % 2 == 0)
collection = filter(condition, item_list)
list(collection)
# + [markdown] slideshow={"slide_type": "slide"}
# # Dalsi priklad pouzitia funkcie *Filter*
# +
fibonacci = [0,1,1,2,3,5,8,13,21,34,55]
def is_even(x):
return x % 2 == 0
list(filter(is_even, fibonacci))
# + [markdown] slideshow={"slide_type": "slide"}
# # Redukcia
#
# reduce(func, seq, init)
#
# func(a, b)
#
# Opakovane aplikuje funkciu na sekvenciu.
#
# *func* prijma dva argumenty: hodnotu akumulatora a jeden prvok mnoziny
#
# Atributom *func* moze byt prvok sekvencie alebo navratova hodnota inej *func*
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Typicky priklad je suma prvkov zoznamu
# -
item_list = [47,11,42,13]
def add(a,b):
return(a+b)
# +
from functools import reduce
reduce(add, item_list)
# + [markdown] slideshow={"slide_type": "slide"}
# 
# -
total = 0 # Takto by to bolo imperativne
for item in item_list:
total = add(total, item)
total
# + [markdown] slideshow={"slide_type": "slide"}
# # Dalsi priklad - nasobenie prvkov zoznamu
# +
from functools import reduce
def mul(a,b):
return a * b
reduce(mul, [1,2,3,4,5])
# + [markdown] slideshow={"slide_type": "slide"}
# # Vela funkcii uz je predpripravenych
# -
from operator import add
from operator import mul
# + [markdown] slideshow={"slide_type": "slide"}
# # Da sa spracovavat aj nieco ine ako cisla
# +
from functools import reduce
from operator import add
print(reduce(add, open('data/morho.txt')))
# + [markdown] slideshow={"slide_type": "slide"}
# # Da sa napriklad pracovat s mnozinami
# -
from operator import or_
reduce(or_, ({1}, {1, 2}, {1, 3})) # union
from operator import and_
reduce(and_, ({1}, {1, 2}, {1, 3})) # intersection
# + [markdown] slideshow={"slide_type": "slide"}
# # Lambda funkcia
#
# anonymna funkcia
# -
my_sum = lambda x, y: x + y
my_sum(1,2)
# + [markdown] slideshow={"slide_type": "-"}
# * obemdzenie na jediny riadok
# * nepotrebuje return
# + [markdown] slideshow={"slide_type": "slide"}
# # Lambda je celkom prakticka ako parameter funkcie vyssej urovne
# -
item_list = [1,2,3,4,5]
print(list(map(lambda x: x**2, item_list)))
item_list = ["auto", "macka", "traktor"]
list(map(lambda x: x.upper(), item_list))
# + [markdown] slideshow={"slide_type": "slide"}
# # Spracovanie zoznamu (list comprehension)
# -
print(list(map(lambda x: x**2, [1,2,3,4,5])))
print([x**2 for x in [1,2,3,4,5]])
print(set(map(lambda x: x**2, [1,2,3,4,5])))
print({x**2 for x in [1,2,3,4,5]})
print(list(map(lambda x: (x, x**3), filter(lambda x: x % 2 == 0, [1,2,3,4,5]))))
print([(x, x**3) for x in [1,2,3,4,5] if x % 2 == 0])
# + [markdown] slideshow={"slide_type": "slide"}
# # Na co je to cele dobre - MapReduce
#
# * je programovací model (framework) vyvinutý a patentovaný spoločnosťou Google, Inc. v roku 2004
# * hlavným cieľom jeho vývoja bolo uľahčiť programátorom vytváranie dsitribuovaných aplikácií, ktoré spracovávajú veľké objemy dát
# * zložité výpočty nad veľkým objemom dát musia byť vykonávané paralelne a dsitribuovane, niekedy až na stovkách alebo tisíckach počítačov súčasne
# * pri takomto spracovaní sa treba okrem samotného výpočtu sústrediť napríklad aj na
# * rovnomerné rozdelenie záťaže všetkým dostupným počítačom
# * kontrolovanie výpadkov a porúch spolu s ich následným riešením
# * MapReduce prináša ďalšiu vrstvu abstrakcie medzi výpočet, ktorý sa má realizovať paralelne a jeho vykonanie na konkrétnom hardvéri
# * Keď napíšem program správne, tak sa nemusím starať na koľkých počítačoch bude bežať
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # GOTO priklad z netu
# Celkom pekny priklad na jednoduchu MapReduce ulohu v Pythone.
#
# Klasicky Word count priklad
#
# http://www.michael-noll.com/tutorials/writing-an-hadoop-mapreduce-program-in-python/
# + [markdown] slideshow={"slide_type": "slide"}
# --- pseudokod ---
# function map(String name, String document):
#
# // name: document name
#
# // document: document contents
#
# for each word w in document:
#
# emit (w, 1)
#
#
#
# function reduce(String word, Iterator partialCounts):
#
# // word: a word
#
# // partialCounts: a list of aggregated partial counts
#
# sum = 0
#
# for each pc in partialCounts:
#
# sum += pc
#
# emit (word, sum)
# + [markdown] slideshow={"slide_type": "slide"}
# # GOTO Spark
# + [markdown] slideshow={"slide_type": "slide"}
# # Nieco na dalsie studium
#
# * Balicek Operator - https://docs.python.org/3/library/operator.html
# * Balicek Itertools - https://docs.python.org/3/library/itertools.html
# * Balicek Functools - https://docs.python.org/3/library/functools.html
| 6/prednaska/Immutable a Higher order functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="a5fA5qAm5Afg"
if 'google.colab' in str(get_ipython()):
# !pip install -q condacolab
import condacolab
condacolab.install()
# + id="x0DJqotopcyb"
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell
# install NeMo
BRANCH = 'main'
if 'google.colab' in str(get_ipython()):
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
# + id="nYsp3SH24Tj_"
if 'google.colab' in str(get_ipython()):
# ! conda install -c conda-forge pynini=2.1.3
# ! mkdir images
# ! wget https://github.com/NVIDIA/NeMo/blob/$BRANCH/tutorials/text_processing/images/deployment.png -O images/deployment.png
# ! wget https://github.com/NVIDIA/NeMo/blob/$BRANCH/tutorials/text_processing/images/pipeline.png -O images/pipeline.png
# + id="CH7yR7cSwPKr"
import os
import wget
import pynini
import nemo_text_processing
# + [markdown] id="F-IrnmXMTevr"
# # Task Description
#
# Text normalization (TN) is a part of the Text-To-Speech (TTS) pre-processing pipeline. It could also be used for pre-processing Automatic Speech Recognition (ASR) training transcripts.
#
# TN is the task of converting text in written form to its spoken form to improve TTS. For example, `10:00` should be changed to `ten o'clock` and `10kg` to `ten kilograms`.
# + [markdown] id="xXRARM8XtK_g"
# # NeMo Text Normalization
#
# NeMo TN is based on weighted finite-state
# transducer (WFST) grammars. The tool uses [`Pynini`](https://github.com/kylebgorman/pynini) to construct WFSTs, and the created grammars can be exported and integrated into [`Sparrowhawk`](https://github.com/google/sparrowhawk) (an open-source version of [The Kestrel TTS text normalization system](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)) for production. The NeMo TN tool can be seen as a Python extension of `Sparrowhawk`.
#
# Currently, NeMo TN provides support for English and the following semiotic classes from the [Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish):
# DATE, CARDINAL, MEASURE, DECIMAL, ORDINAL, MONEY, TIME, TELEPHONE, ELECTRONIC, PLAIN. We additionally added the class `WHITELIST` for all whitelisted tokens whose verbalizations are directly looked up from a user-defined list.
#
# The toolkit is modular, easily extendable, and can be adapted to other languages and tasks like [inverse text normalization](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_Text_Normalization.ipynb). The Python environment enables an easy combination of text covering grammars with NNs.
#
# The rule-based system is divided into a classifier and a verbalizer following [Google's Kestrel](https://www.researchgate.net/profile/Richard_Sproat/publication/277932107_The_Kestrel_TTS_text_normalization_system/links/57308b1108aeaae23f5cc8c4/The-Kestrel-TTS-text-normalization-system.pdf) design: the classifier is responsible for detecting and classifying semiotic classes in the underlying text, the verbalizer the verbalizes the detected text segment.
# In the example `The alarm goes off at 10:30 a.m.`, the tagger for TIME detects `10:30 a.m.` as a valid time data with `hour=10`, `minutes=30`, `suffix=a.m.`, the verbalizer then turns this into `ten thirty a m`.
#
# The overall NeMo TN pipeline from development in `Pynini` to deployment in `Sparrowhawk` is shown below (example for ITN):
# 
#
#
# + [markdown] id="-IT1Xr9iW2Xr"
# # Quick Start
#
# ## Add TN to your Python TTS pre-processing workflow
#
# TN is a part of the `nemo_text_processing` package which is installed with `nemo_toolkit`. Installation instructions could be found [here](https://github.com/NVIDIA/NeMo/tree/main/README.rst).
# + id="Bfs7fa9lXDDh"
from nemo_text_processing.text_normalization.normalize import Normalizer
# creates normalizer object that works on lower cased input
normalizer = Normalizer(input_case='cased')
raw_text = "We paid $123 for this desk."
normalizer.normalize(raw_text, verbose=False)
# + [markdown] id="w5sX0SXbXoZp"
# In the above cell, `$123` would be converted to `one hundred twenty three dollars`, and the rest of the words remain the same.
#
# ## Run Text Normalization on an input from a file
#
# Use `run_predict.py` to convert a spoken text from a file `INPUT_FILE` to a written format and save the output to `OUTPUT_FILE`. Under the hood, `run_predict.py` is calling `normalize()` (see the above section).
# + id="UD-OuFmEOX3T"
# If you're running the notebook locally, update the NEMO_TEXT_PROCESSING_PATH below
# In Colab, a few required scripts will be downloaded from NeMo github
NEMO_TOOLS_PATH = '<UPDATE_PATH_TO_NeMo_root>/nemo_text_processing/text_normalization'
DATA_DIR = 'data_dir'
os.makedirs(DATA_DIR, exist_ok=True)
if 'google.colab' in str(get_ipython()):
NEMO_TOOLS_PATH = '.'
required_files = ['run_predict.py',
'run_evaluate.py']
for file in required_files:
if not os.path.exists(file):
file_path = 'https://raw.githubusercontent.com/NVIDIA/NeMo/' + BRANCH + '/nemo_text_processing/text_normalization/' + file
print(file_path)
wget.download(file_path)
elif not os.path.exists(NEMO_TOOLS_PATH):
raise ValueError(f'update path to NeMo root directory')
# + id="d4T0gXHwY3JZ"
INPUT_FILE = f'{DATA_DIR}/test.txt'
OUTPUT_FILE = f'{DATA_DIR}/test_tn.txt'
# ! echo "The alarm went off at 10:00." > $DATA_DIR/test.txt
# ! cat $INPUT_FILE
# ! python $NEMO_TOOLS_PATH/run_predict.py --input=$INPUT_FILE --output=$OUTPUT_FILE
# + id="F5wSJTI8ZFRg"
# check that the raw text was converted to the spoken form
# ! cat $OUTPUT_FILE
# + [markdown] id="RMT5lkPYzZHK"
# ## Run evaluation
#
# [Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish) consists of 1.1 billion words of English text from Wikipedia, divided across 100 files. The normalized text is obtained with The Kestrel TTS text normalization system).
#
# To run evaluation, the input file should follow the Google Text normalization dataset format. That is, every line of the file needs to have the format `<semiotic class>\t<unnormalized text>\t<self>` if it's trivial class or `<semiotic class>\t<unnormalized text>\t<normalized text>` in case of a semiotic class.
#
#
# Example evaluation run:
#
#
# `python run_evaluate.py \
# --input=./en_with_types/output-00001-of-00100 \
# [--input_case INPUT_CASE] \
# [--cat CATEGORY]`
#
# Use `--cat` to specify a `CATEGORY` to run evaluation on (all other categories are going to be excluded from evaluation). The option `--input_case` tells the algorithm that the input is either lower cased or cased.
#
#
#
#
# + id="u4zjeVVv-UXR"
eval_text = """PLAIN\ton\t<self>
DATE\t22 july 2012\tthe twenty second of july twenty twelve
PLAIN\tthey\t<self>
PLAIN\tworked\t<self>
PLAIN\tuntil\t<self>
TIME\t12:00\ttwelve o'clock
<eos>\t<eos>
"""
INPUT_FILE_EVAL = f"{DATA_DIR}/test_eval.txt"
with open(INPUT_FILE_EVAL, 'w') as fp:
fp.write(eval_text)
# ! cat $INPUT_FILE_EVAL
# + id="G7_5oXpObizP"
# ! python $NEMO_TOOLS_PATH/run_evaluate.py --input=$INPUT_FILE_EVAL
# + [markdown] id="bIvKBwRcH_9W"
# `run_evaluate.py` call will output both **sentence level** and **token level** accuracies.
# For our example, the expected output is the following:
#
# ```
# Loading training data: data_dir/test_eval.txt
# Sentence level evaluation...
# - Data: 1 sentences
# 100% 1/1 [00:00<00:00, 14.24it/s]
# - Normalized. Evaluating...
# - Accuracy: 1.0
# Token level evaluation...
# - Token type: PLAIN
# - Data: 4 tokens
# 100% 4/4 [00:00<00:00, 239.56it/s]
# - Denormalized. Evaluating...
# - Accuracy: 1.0
# - Token type: DATE
# - Data: 1 tokens
# 100% 1/1 [00:00<00:00, 33.69it/s]
# - Denormalized. Evaluating...
# - Accuracy: 1.0
# - Token type: TIME
# - Data: 1 tokens
# 100% 1/1 [00:00<00:00, 94.84it/s]
# - Denormalized. Evaluating...
# - Accuracy: 1.0
# - Accuracy: 1.0
# - Total: 6
#
# - Total: 6
#
# Class | Num Tokens | Normalization
# sent level | 1 | 1.0
# PLAIN | 4 | 1.0
# DATE | 1 | 1.0
# CARDINAL | 0 | 0
# LETTERS | 0 | 0
# VERBATIM | 0 | 0
# MEASURE | 0 | 0
# DECIMAL | 0 | 0
# ORDINAL | 0 | 0
# DIGIT | 0 | 0
# MONEY | 0 | 0
# TELEPHONE | 0 | 0
# ELECTRONIC | 0 | 0
# FRACTION | 0 | 0
# TIME | 1 | 1.0
# ADDRESS | 0 | 0
#
# ```
#
# + [markdown] id="L85ZaUJ_4TkF"
# # C++ deployment
#
# The instructions on how to export `Pynini` grammars and to run them with `Sparrowhawk`, could be found at [NeMo/tools/text_processing_deployment](https://github.com/NVIDIA/NeMo/tree/main/tools/text_processing_deployment).
# + [markdown] id="ENMDNl9C4TkF"
# # WFST and Common Pynini Operations
#
# See [NeMo Text Inverse Normalization Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_Text_Normalization.ipynb) for details.
# + [markdown] id="lcvT3P2lQ_GS"
# # References and Further Reading:
#
#
# - [<NAME>, Bakhturina, Evelina, Gorman, Kyle and <NAME>. "NeMo Inverse Text Normalization: From Development To Production." (2021)](https://arxiv.org/abs/2104.05055)
# - [Ebden, Peter, and <NAME>. "The Kestrel TTS text normalization system." Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)
# - [<NAME>. "Pynini: A Python library for weighted finite-state grammar compilation." Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)
# - [<NAME>, <NAME>, and <NAME>. "Weighted finite-state transducers in speech recognition." Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)
| tutorials/text_processing/Text_Normalization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
import matplotlib.pyplot as plt
import pandas as pd
# #### Create a small dataset
xs = [0., 1., 2., 3., 4., 5., 6., 7.]
ys = [-.82, -.94, -.12, .26, .39, .64, 1.02, 1]
plt.scatter(xs,ys)
# #### Define Variable
m = tf.Variable(-0.5)
b= tf.Variable(1.0)
# #### Define cost function
# +
#total_error = 0.0
#for x,y in zip(xs,ys):
# y_model = m*x + b
# total_error += (y-y_model)**2
ys_model = m*xs + b
total_error = tf.reduce_sum((ys-ys_model)**2)
# -
# #### Define optimizer
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(total_error)
# #### Define initializer
initialize_op = tf.global_variables_initializer()
# #### Execute session
with tf.Session() as session:
session.run(initialize_op)
n_epochs = 1000
for i in range(n_epochs):
session.run(optimizer_op)
slope, intercept = session.run([m,b])
slope
intercept
y_hat = intercept + slope*np.array(xs)
pd.DataFrame(list(zip(ys,y_hat)), columns=['y', 'y_hat'])
# +
fig, ax = plt.subplots()
ax.scatter(xs, ys)
x_min, x_max = ax.get_xlim()
y_min, y_max = intercept, intercept + slope*(x_max-x_min)
ax.plot([x_min,x_max], [y_min,y_max])
_ = ax.set_xlim([x_min,x_max])
# -
| notebooks/NN - TendorFlow - Basic - 3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import scipy.constants as const
# Pressure source - controls contributions to pressure
# Options: 'all', 'no_degeneracy', 'no_ideal_gas', 'no_radiation'
P_SOURCE = 'all'
assert P_SOURCE in ('all', 'no_degeneracy', 'no_ideal_gas', 'no_radiation'), 'Invalid pressure source chosen.'
# Parameter indices
IDX_DICT = {'rho': 0,'T': 1,'M': 2,'L': 3,'tau': 4}
pi = const.pi
G = const.G
c = const.c
k = const.k
sigma = 5.670373e-8
hbar = const.hbar
m_p = const.m_p
m_e = const.m_e
a = 4.0*sigma/c
M_sun = 1.98840987e+30
R_sun = 6.957e8
L_sun = 3.828e26
X = 0.73
Y = 0.25
Z = 0.02
gamma = 5.0/3.0
Lambda = 0.0 #!
X_CNO = 0.03*X
mu = (2.0*X + 0.75*Y + 0.5*Z)**(-1)
kappa_es = 0.02*(1 + X)
# Numerical integration constants
H_MIN = 1e3
H_MAX_CORE_LOW_M = 5e6
H_MAX_CORE_HIGH_M = 5e5
H_MAX_OUTER = 5e4
H_MAX_SURF = 5e3
TOL_CORE = 1e-3
TOL_OUTER = 1e-5
TOL_SURF = 1e-7
# Solver bound constants
R_0 = 1.0
M_MAX = 1e3 * M_sun
R_MAX = 1e10
DELTA_TAU_MIN = 1e-3
# Bisection constants
RHO_C_MIN = 2.5e2
RHO_C_MAX = 5.5e5
L_ERR_RANGE_MIN = 1e-3
L_ERR_MID_MIN = 1e-2
BISECTION_MAX = 50
# Plotting constants
DPI = 200
# -
# star equations
# +
from numpy import zeros, log10
# Degeneracy pressure
def P_degen(rho):
if P_SOURCE == 'no_degeneracy':
return 0.0
return (((3.0*(pi**2.0))**(2.0/3.0))*(hbar**2.0)*(rho**(5.0/3.0)))/(5.0*m_e*(m_p**(5.0/3.0)))
# Ideal gas pressure
def P_ideal(rho, T):
if P_SOURCE == 'no_ideal_gas':
return 0.0
return (rho*k*T)/(mu*m_p)
# Radiation pressure
def P_rad(T):
if P_SOURCE == 'no_radiation':
return 0.0
return (a*(T**4.0))/3.0
# Total pressure
def P(rho, T):
return P_degen(rho) + P_ideal(rho, T) + P_rad(T)
# Derivative of degeneracy pressure with respect to density
def dPdrho_degen(rho):
if P_SOURCE == 'no_degeneracy':
return 0.0
return (((3.0*(pi**2.0))**(2.0/3.0))*(hbar**2.0)*(rho**(2.0/3.0)))/(3.0*m_e*(m_p**(5.0/3.0)))
# Derivative of ideal gas pressure with respect to density
def dPdrho_ideal(T):
if P_SOURCE == 'no_ideal_gas':
return 0.0
return (k*T)/(mu*m_p)
# Derivative of total pressure with respect to density
def dPdrho(rho, T):
return dPdrho_degen(rho) + dPdrho_ideal(T)
# Derivative of ideal gas pressure with respect to temperature
def dPdT_ideal(rho):
if P_SOURCE == 'no_ideal_gas':
return 0.0
return (rho*k)/(mu*m_p)
# Derivative of radiation pressure with respect to temperature
def dPdT_rad(T):
if P_SOURCE == 'no_radiation':
return 0.0
return (4.0*a*(T**3.0))/3.0
# Derivative of total pressure with respect to temperature
def dPdT(rho, T):
return dPdT_ideal(rho) + dPdT_rad(T)
# ----------------------------------------------------------------------------------------------------------------------
### OPACITY EQUATIONS ###
# Free-free opacity
def kappa_ff(rho, T):
return 1.0e24 * (1.0 + X) * (Z + 0.0001) * ((rho/1e3)**0.7) * (T**(-3.5))
# H- opacity
def kappa_H(rho, T):
return 2.5e-32 * (Z/0.02) * ((rho/1e3)**0.5) * (T**9.0)
# Total opacity
def kappa(rho, T):
return ((1.0/kappa_H(rho, T)) + (1.0/max(kappa_es, kappa_ff(rho, T))))**(-1.0)
# ----------------------------------------------------------------------------------------------------------------------
### ENERGY GENERATION EQUATIONS ###
# PP-chain energy generation
def epsilon_PP(rho, T):
return 1.07e-7 * (rho/1e5) * (X**2.0) * ((T/1e6)**4.0)
# CNO-chain energy generation
def epsilon_CNO(rho, T):
return 8.24e-26 * (rho/1e5) * X_CNO * X * ((T / (1e6))**19.9)
# Total energy generation
def epsilon(rho, T):
return epsilon_PP(rho, T) + epsilon_CNO(rho, T)
# ----------------------------------------------------------------------------------------------------------------------
### STELLAR STRUCTURE ODES ###
# Derivative of optical depth with respect to radius
def dtaudr(rho, T):
return kappa(rho, T) * rho
# Derivative of PP-luminosity with respect to radius
def dLdr_PP(r, rho, T):
return 4.0 * pi * (r**2.0) * rho * epsilon_PP(rho, T)
# Derivative of CNO-luminosity with respect to radius
def dLdr_CNO(r, rho, T):
return 4.0 * pi * (r**2.0) * rho * epsilon_CNO(rho, T)
# Derivative of total luminosity with respect to radius
def dLdr(r, rho, T):
return 4.0 * pi * (r**2.0) * rho * epsilon(rho, T)
# Derivative of mass with respect to radius - dM/dr
def dMdr(r, rho):
return 4.0 * pi * (r**2.0) * rho
# Derivative of radiative temperature with respect to radius
def dTdr_rad(r, rho, T, M, L):
return (3.0*kappa(rho, T)*rho*L)/(64.0*pi*sigma*(T**3.0)*(r**2.0))
# Derivative of convective temperature with respect to radius
def dTdr_conv(r, rho, T, M, L):
return (1.0 - (1.0/gamma))*(1.0 + Lambda/r) * ((T*G*M*rho)/(P(rho, T)*(r**2.0)))
# Derivative of temperature with respect to radius
def dTdr(r, rho, T, M, L):
return -min(abs(dTdr_rad(r, rho, T, M, L)), abs(dTdr_conv(r, rho, T, M, L)))
# Derivative of density with respect to radius
def drhodr(r, rho, T, M, L):
return -((G*M*rho)/(r**2.0)*(1.0 + Lambda/r) + dPdT(rho, T)*dTdr(r, rho, T, M, L))/dPdrho(rho, T)
# ----------------------------------------------------------------------------------------------------------------------
### SYSTEM OF STELLAR STRUCTURE EQUATIONS ###
def stellar_structure_equations(r, u):
rho = u[IDX_DICT["rho"]]
T = u[IDX_DICT["T"]]
M = u[IDX_DICT["M"]]
L = u[IDX_DICT["L"]]
drho = drhodr(r, rho, T, M, L)
dT = dTdr(r, rho, T, M, L)
dM = dMdr(r, rho)
dL = dLdr(r, rho, T)
dtau = dtaudr(rho, T)
dudr = zeros(len(IDX_DICT))
dudr[IDX_DICT["rho"]] = drho
dudr[IDX_DICT["T"]] = dT
dudr[IDX_DICT["M"]] = dM
dudr[IDX_DICT["L"]] = dL
dudr[IDX_DICT["tau"]] = dtau
return dudr
# ----------------------------------------------------------------------------------------------------------------------
### INITIAL CONDITIONS ###
# Initial mass
def M_initial(r0, rho_c):
return (4.0/3.0) * pi * (r0**3) * rho_c
# Initial luminosity
def L_initial(r0, rho_c, T_c):
return (4.0/3.0) * pi * (r0**3) * rho_c * epsilon(rho_c, T_c)
# Initial optical depth
def tau_initial(r0, rho_c, T_c):
return kappa(rho_c, T_c) * rho_c * r0
# ----------------------------------------------------------------------------------------------------------------------
# Derivative of logP with respect to logT - dlogP/dlogT
def dlogPdlogT(P, T):
logP = log10(P)
logT = log10(T)
dlogP = [logP[i + 1] - logP[i] for i in range(len(P) - 1)]
dlogT = [logT[i + 1] - logT[i] for i in range(len(T) - 1)]
return [dlogP[i]/dlogT[i] for i in range(len(dlogP))]
# Finding index at which convection takes over
def get_conv_idx(dlogPdlogT_vals):
conv_start = 0
for idx in range(len(dlogPdlogT_vals)):
if abs(dlogPdlogT_vals[idx] - 2.5) < 0.1:
conv_start = idx
break
conv_end = 0
for idx in range(conv_start, len(dlogPdlogT_vals)):
if abs(dlogPdlogT_vals[idx] - 2.5) > 0.1:
conv_end = idx - 1
break
if conv_end == 0:
conv_end = -1
return conv_start, conv_end
# ----------------------------------------------------------------------------------------------------------------------
# -
# adapted version of RK45
# +
import numpy as np
def myRK4(system, r0, u0, h0, T_c):
# Calculating slope coefficients
k0 = h0 * system(r0, u0)
k1 = h0 * system(r0 + 1/5*h0, u0 + 1/5*k0)
k2 = h0 * system(r0 + 3/10*h0, u0 + 3/40*k0 + 9/40*k1)
k3 = h0 * system(r0 + 4/5*h0, u0 + 44/45*k0 - 56/15*k1 + 32/9*k2)
k4 = h0 * system(r0 + 8/9*h0, u0 + 19372/6561*k0 - 25360/2187*k1 + 64448/6561*k2 - 212/729*k3)
k5 = h0 * system(r0 + h0, u0 + 9017/3168*k0 - 355/33*k1 + 46732/5247*k2 + 49/176*k3 - 5103/18656*k4)
u_final = u0 + 35/384*k0 + 500/1113*k2 + 125/192*k3 - 2187/6784*k4 + 11/84*k5
k6 = h0 * system(r0 + h0, u_final)
# Determining fourth and fifth-order solutions
u1_4 = u0 + 5179/57600*k0 + 7571/16695*k2 + 393/640*k3 - 92097/339200*k4 + 187/2100*k5 + 1/40*k6
u1_5 = u_final
# Relative error on solutions
err = np.fabs(u1_5 - u1_4)
# Stepsize and tolerance control
h_min = H_MIN
tol = TOL_CORE
if T_c < 1.25e7:
h_max = H_MAX_CORE_LOW_M
if u1_5[IDX_DICT['T']]/T_c < 0.01:
h_max = H_MAX_SURF
tol = TOL_SURF
elif u1_5[IDX_DICT['T']]/T_c < 0.05:
h_max = H_MAX_OUTER
tol = TOL_OUTER
elif T_c >= 1.25e7:
h_max = H_MAX_CORE_HIGH_M
if u1_5[IDX_DICT['T']]/T_c < 0.005:
h_max = H_MAX_SURF
tol = TOL_SURF
elif u1_5[IDX_DICT['T']]/T_c < 0.025:
h_max = H_MAX_OUTER
tol = TOL_OUTER
# Stepsize update
no_zero_div = err==0
s = ((np.fabs(u1_5)*tol)/(2*(err + no_zero_div)))**(1/5)
h1 = h0 * np.min(s)
h1 = min(max(h1, h_min), h_max)
return h1, u1_5
# -
# solving star equations
# +
import numpy as np
# Gets solution values at a given solution iteration
def get_u0(star_params, idx):
u0 = [0.0]*len(IDX_DICT)
for param in IDX_DICT:
u0[IDX_DICT[param]] = star_params[param][idx]
return np.array(u0, float)
# Gets all stellar parameter values at a given solution iteration
def get_step_params(star_params, idx):
return {param: star_params[param][idx] for param in star_params}
# Updates the values of all stellar parameters
def update_params(star_params, u1):
for param in IDX_DICT:
star_params[param].append(u1[IDX_DICT[param]])
r = star_params['r'][-1]
rho = u1[IDX_DICT['rho']]
T = u1[IDX_DICT['T']]
M = u1[IDX_DICT['M']]
L = u1[IDX_DICT['L']]
star_params['P_degen'].append(P_degen(rho))
star_params['P_ideal'].append(P_ideal(rho, T))
star_params['P_rad'].append(P_rad(T))
star_params['P'].append(P(rho, T))
star_params['epsilon_PP'].append(epsilon_PP(rho, T))
star_params['epsilon_CNO'].append(epsilon_CNO(rho, T))
star_params['epsilon'].append(epsilon(rho, T))
star_params['dL_PP/dr'].append(dLdr_PP(r, rho, T))
star_params['dL_CNO/dr'].append(dLdr_CNO(r, rho, T))
star_params['dL/dr'].append(dLdr(r, rho, T))
star_params['kappa_ff'].append(kappa_ff(rho, T))
star_params['kappa_H'].append(kappa_H(rho, T))
star_params['kappa'].append(kappa(rho, T))
# ----------------------------------------------------------------------------------------------------------------------
### FINDING STAR SURFACE ###
# Determines if tau = tau(inf)
def at_tau_inf(step_params):
r = step_params['r']
rho = step_params['rho']
T = step_params['T']
M = step_params['M']
L = step_params['L']
drho = drhodr(r, rho, T, M, L)
delta_tau = (kappa(rho, T)*rho**2)/np.fabs(drho)
if (np.isnan(drho)) or ((drho != 0) and (delta_tau < DELTA_TAU_MIN)):
return True
else:
return False
# Gets index of star surface
def get_surf_idx(tau_vals):
tau_inf_idx = len(tau_vals) - 1
if np.isnan(tau_vals[tau_inf_idx]):
tau_inf_idx = len(tau_vals) - 2
tau_inf = tau_vals[tau_inf_idx]
tau_boundary_cond = tau_inf - np.array(tau_vals[0:tau_inf_idx]) - (2.0/3.0)
surf_idx = np.argmin(np.abs(tau_boundary_cond))
if surf_idx == 0:
return tau_inf_idx
else:
return surf_idx
# Gets parameters at star surface
def get_surf_params(star_params):
surf_idx = get_surf_idx(star_params['tau'])
surf_params = get_step_params(star_params, surf_idx)
return surf_params, surf_idx
# ----------------------------------------------------------------------------------------------------------------------
### SOLVING STELLAR STRUCTURE EQUATIONS ###
def solve_stellar_structure(rho_c, T_c):
star_params = {
'r': [R_0],
'rho': [rho_c],
'T': [T_c],
'M': [M_initial(R_0, rho_c)],
'L': [L_initial(R_0, rho_c, T_c)],
'tau': [tau_initial(R_0, rho_c, T_c)],
'P_degen': [P_degen(rho_c)],
'P_ideal': [P_ideal(rho_c, T_c)],
'P_rad': [P_rad(T_c)],
'P': [P(rho_c, T_c)],
'epsilon_PP': [epsilon_PP(rho_c, T_c)],
'epsilon_CNO': [epsilon_CNO(rho_c, T_c)],
'epsilon': [epsilon(rho_c, T_c)],
'dL_PP/dr': [dLdr_PP(R_0, rho_c, T_c)],
'dL_CNO/dr': [dLdr_CNO(R_0, rho_c, T_c)],
'dL/dr': [dLdr(R_0, rho_c, T_c)],
'kappa_ff': [kappa_ff(rho_c, T_c)],
'kappa_H': [kappa_H(rho_c, T_c)],
'kappa': [kappa(rho_c, T_c)]}
h = 1e4
step_count = 1
step_params = get_step_params(star_params, step_count - 1)
while (step_params['r'] < R_MAX) and (step_params['M'] < M_MAX) and (not at_tau_inf(step_params)):
star_params['r'].append(step_params['r'] + h)
u0 = get_u0(star_params, step_count - 1)
h, u1 = myRK4(stellar_structure_equations, step_params['r'], u0, h, T_c)
update_params(star_params, u1)
step_count += 1
step_params = get_step_params(star_params, step_count - 1)
surf_params, surf_idx = get_surf_params(star_params)
for param in star_params:
star_params[param] = np.array(star_params[param][:surf_idx])
return star_params
# ----------------------------------------------------------------------------------------------------------------------
# -
# Bisection method
# +
### BISECTION ###
# Determining the error in luminosity
def L_err(star_params):
surf_params, surf_idx = get_surf_params(star_params)
R_surf = surf_params['r']
T_surf = surf_params['T']
L_surf = surf_params['L']
L_obs = L_surf
L_exp = 4.0 * pi * sigma * (R_surf**2.0) * (T_surf**4.0)
return (L_obs - L_exp)/np.sqrt(L_obs * L_exp)
# Implementing bisection
def bisection(T_c):
min_params = solve_stellar_structure(RHO_C_MIN, T_c)
max_params = solve_stellar_structure(RHO_C_MAX, T_c)
rho_c_mid = (RHO_C_MIN + RHO_C_MAX)/2
mid_params = solve_stellar_structure(rho_c_mid, T_c)
bisection_count = 0
range_err = min_params['rho'][0] - max_params['rho'][0]
mid_err = L_err(mid_params)
#print('-'*80)
#print(f'Determining stellar structure for T_c = {T_c} K...')
#print('-'*80)
while (abs(range_err) > L_ERR_RANGE_MIN) and (abs(mid_err) > L_ERR_MID_MIN) and (bisection_count < BISECTION_MAX):
extra_zeros = (len(str(BISECTION_MAX)) - len(str(bisection_count + 1)))*'0'
#print(f'[Bisection {extra_zeros + str(bisection_count + 1)} | T_c = {T_c} K]')
#print(f'- Luminosity error = {mid_err}')
#print(f'- rho_c = {mid_params["rho"][0]} kg/m^3')
if np.isnan(mid_err) or mid_err > 0:
max_params = mid_params
elif mid_err < 0:
min_params = mid_params
rho_c_mid = (min_params['rho'][0] + max_params['rho'][0])/2
mid_params = solve_stellar_structure(rho_c_mid, T_c)
bisection_count += 1
range_err = min_params['rho'][0] - max_params['rho'][0]
mid_err = L_err(mid_params)
min_err = L_err(min_params)
max_err = L_err(max_params)
if (abs(mid_err) > L_ERR_MID_MIN) and (abs(max_err) < abs(mid_err)) and (abs(max_err) < abs(min_err)):
mid_params = max_params
elif (abs(mid_err) > L_ERR_MID_MIN) and (abs(min_err) < abs(mid_err)) and (abs(min_err) < abs(max_err)):
mid_params = min_params
final_err = L_err(mid_params)
rho_c = mid_params['rho'][0]
surf_params, surf_idx = get_surf_params(mid_params)
star_params = mid_params
for param in star_params:
star_params[param] = np.array(star_params[param][:surf_idx + 1])
r_surf = surf_params["r"]
M_surf = surf_params["M"]
L_surf = surf_params["L"]
T_comp = surf_params["T"]
T_corr = (L_surf/(4.0 * pi * (r_surf**2) * sigma))**(1.0/4.0)
#print('-'*80)
print(f'Bisection results for T_c = {T_c} K:')
print(f'- Steps taken: {bisection_count}')
print(f'- Final luminosity error: {final_err}')
print(f'- R = {r_surf/R_sun} R_sun ({r_surf} m)')
print(f'- M = {M_surf/M_sun} M_sun ({M_surf} kg)')
print(f'- L = {L_surf/L_sun} L_sun ({L_surf} W)')
print(f'- Computed T = {T_comp} K')
print(f'- Corrected T = {T_corr} K')
print(f'- rho_c = {rho_c} kg/m^3')
print('-'*80)
return surf_params, star_params
# -
# Ploting...
import matplotlib.pyplot as plt
# +
def plot_main_structure(r_vals, rho_vals, T_vals, M_vals, L_vals, conv_idx, T_c, res_path):
ax = sns.lineplot(x=r_vals, y=rho_vals, label=r'$\rho$')
ax = sns.lineplot(x=r_vals, y=T_vals, label=r'$T$')
ax = sns.lineplot(x=r_vals, y=M_vals, label=r'$M$')
ax = sns.lineplot(x=r_vals, y=L_vals, label=r'$L$')
plt.xlim([0,1])
ax.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax.set_ylabel(r'$\rho/\rho_c, \;\; T/T_c, \;\; M/M_{\mathrm{star}}, \;\; L/L_{\mathrm{star}}$')
ax.legend(loc='best')
plt.savefig(res_path + '/structure.png', dpi=DPI)
plt.show()
# Plotting optical depth
def plot_tau(r_vals, tau_vals, conv_idx, T_c, res_path):
ax = sns.lineplot(x=r_vals, y=tau_vals)
ax.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax.set_ylabel(r'$\tau$')
plt.xlim([0,1])
plt.savefig(res_path + '/tau.png', dpi=DPI)
plt.show()
# Plotting pressure
def plot_P(r_vals, P_degen_vals, P_ideal_vals, P_rad_vals, P_vals, conv_idx, T_c, res_path):
ax = sns.lineplot(x=r_vals, y=P_vals, label=r'$P_{\mathrm{total}}$')
ax = sns.lineplot(x=r_vals, y=P_degen_vals, ls='-.', label=r'$P_{\mathrm{deg}}$')
ax = sns.lineplot(x=r_vals, y=P_ideal_vals, ls='--', label=r'$P_{\mathrm{ideal}}$')
ax = sns.lineplot(x=r_vals, y=P_rad_vals, ls=':', label=r'$P_{\mathrm{rad}}$')
ax.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax.set_ylabel(r'$P/P_c$')
plt.xlim([0,1])
ax.legend(loc='best')
plt.savefig(res_path + '/pressure.png', dpi=DPI)
plt.show()
# Plotting dlogP/dlogT
def plot_dlogPdlogT(r_vals, dlogPdlogT_vals, conv_idx, T_c, res_path):
ax = sns.lineplot(x=r_vals[:-1], y=dlogPdlogT_vals,)
ax.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax.set_ylabel(r'$\mathrm{d}\log{P}/\mathrm{d}\log{T}$')
plt.xlim([0,1])
plt.savefig(res_path + '/dlogPdlogT.png', dpi=DPI)
plt.show()
# Plotting energy generation
def plot_epsilon(r_vals, epsilon_PP_vals, epsilon_CNO_vals, epsilon_vals, conv_idx, T_c, res_path):
ax = sns.lineplot(x=r_vals, y=epsilon_vals, label=r'$\epsilon$')
ax = sns.lineplot(x=r_vals, y=epsilon_PP_vals, ls='--', label=r'$\epsilon_{\mathrm{PP}}$')
ax = sns.lineplot(x=r_vals, y=epsilon_CNO_vals, ls='-.', label=r'$\epsilon_{\mathrm{CNO}}$')
ax.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax.set_ylabel(r'$\epsilon \;\; (\mathrm{W/kg})$')
ax.legend(loc='best')
plt.xlim([0,1])
plt.savefig(res_path + '/epsilon.png', dpi=DPI)
plt.show()
# Plotting change in luminosity with radius
def plot_dLdr(r_vals, dLdr_PP_vals, dLdr_CNO_vals, dLdr_vals, conv_idx, T_c, res_path):
ax = sns.lineplot(x=r_vals, y=dLdr_vals, label=r'$\mathrm{d}L/\mathrm{d}r$')
ax = sns.lineplot(x=r_vals, y=dLdr_PP_vals, ls='--', label=r'$\mathrm{d}L_{\mathrm{PP}}/\mathrm{d}r$')
ax = sns.lineplot(x=r_vals, y=dLdr_CNO_vals, ls='-.', label=r'$\mathrm{d}L_{\mathrm{CNO}}/\mathrm{d}r$')
ax.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax.set_ylabel(r'$\mathrm{d}L/\mathrm{d}r \;\; (\mathrm{W/m})$')
ax.legend(loc='best')
plt.xlim([0,1])
plt.savefig(res_path + '/dLdr.png', dpi=DPI)
plt.show()
# Plotting opacity
def plot_kappa(r_vals, kappa_es_vals, kappa_ff_vals, kappa_H_vals, kappa_vals, conv_idx, T_c, res_path):
# Plotting only the total kappa value
ax1 = sns.lineplot(x=r_vals, y=kappa_vals,)
ax1.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax1.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax1.set_ylabel(r'$\log_{10}(\kappa) \;\; (\mathrm{m}^2\mathrm{/kg})$')
plt.xlim([0,1])
plt.savefig(res_path + '/kappa_main.png', dpi=DPI)
plt.show()
# Plotting all kappa values
ax2 = sns.lineplot(x=r_vals, y=kappa_vals, label=r'$\kappa$')
ax2 = sns.lineplot(x=r_vals, y=kappa_es_vals, ls=':', label=r'$\kappa_{\mathrm{es}}$')
ax2 = sns.lineplot(x=r_vals, y=kappa_ff_vals, ls='--', label=r'$\kappa_{\mathrm{ff}}$')
ax2 = sns.lineplot(x=r_vals, y=kappa_H_vals, ls='-.', label=r'$\kappa_{\mathrm{H}^-}$')
ax2.axvspan(r_vals[conv_idx[0]], r_vals[conv_idx[1]], alpha=0.5)
ax2.set_xlabel(r'$r/R_{\mathrm{star}}$')
ax2.set_ylabel(r'$\log_{10}(\kappa) \;\; (\mathrm{m}^2\mathrm{/kg})$')
ax2.legend(loc='best')
plt.xlim([0,1])
plt.savefig(res_path + '/kappa_all.png', dpi=DPI)
plt.show()
# ----------------------------------------------------------------------------------------------------------------------
### PLOTTING STAR PARAMETER VALUES ###
def plot_star_params(T_c, res_path):
# Getting structure values
surf_params, star_params = bisection(T_c)
r_surf = surf_params['r']
rho_c = star_params['rho'][0]
T_surf = surf_params['T']
M_surf = surf_params['M']
L_surf = surf_params['L']
P_c = star_params['P'][0]
r_vals = star_params['r']
rho_vals = star_params['rho']
T_vals = star_params['T']
M_vals = star_params['M']
L_vals = star_params['L']
tau_vals = star_params['tau']
P_degen_vals = star_params['P_degen']
P_ideal_vals = star_params['P_ideal']
P_rad_vals = star_params['P_rad']
P_vals = star_params['P']
epsilon_PP_vals = star_params['epsilon_PP']
epsilon_CNO_vals = star_params['epsilon_CNO']
epsilon_vals = star_params['epsilon']
dLdr_PP_vals = star_params['dL_PP/dr']
dLdr_CNO_vals = star_params['dL_CNO/dr']
dLdr_vals = star_params['dL/dr']
kappa_ff_vals = star_params['kappa_ff']
kappa_H_vals = star_params['kappa_H']
kappa_vals = star_params['kappa']
dlogPdlogT_vals = np.array(dlogPdlogT(P_vals, T_vals))
conv_idx = get_conv_idx(dlogPdlogT_vals)
# Calculating plotting values
scaled_r = r_vals/r_surf
scaled_rho = rho_vals/rho_c
scaled_T = T_vals/T_c
scaled_M = M_vals/M_surf
scaled_L = L_vals/L_surf
scaled_P_degen = P_degen_vals/P_c
scaled_P_ideal = P_ideal_vals/P_c
scaled_P_rad = P_rad_vals/P_c
scaled_P = P_vals/P_c
log_kappa = np.log10(kappa_vals)
log_kappa_es = np.log10(kappa_es * np.ones_like(kappa_vals))
log_kappa_ff = np.log10(kappa_ff_vals)
log_kappa_H = np.log10(kappa_H_vals)
# Plotting results
print('Plotting stellar parameters...')
print('-'*80)
plot_main_structure(scaled_r, scaled_rho, scaled_T, scaled_M, scaled_L, conv_idx, T_c, res_path)
print('Plotted rho, T, M, L vs. r')
plot_tau(scaled_r, tau_vals, conv_idx, T_c, res_path)
print('Plotted tau vs. r')
plot_P(scaled_r, scaled_P_degen, scaled_P_ideal, scaled_P_rad, scaled_P, conv_idx, T_c, res_path)
print('Plotted P vs. r')
plot_dlogPdlogT(scaled_r, dlogPdlogT_vals, conv_idx, T_c, res_path)
print('Plotted dlogP/dlogT vs. r')
plot_epsilon(scaled_r, epsilon_PP_vals, epsilon_CNO_vals, epsilon_vals, conv_idx, T_c, res_path)
print('Plotted epsilon vs. r')
plot_dLdr(scaled_r, dLdr_PP_vals, dLdr_CNO_vals, dLdr_vals, conv_idx, T_c, res_path)
print('Plotted dL/dr vs. r')
plot_kappa(scaled_r, log_kappa_es, log_kappa_ff, log_kappa_H, log_kappa, conv_idx, T_c, res_path)
print('Plotted kappa vs. r')
print('-'*80)
print('Plotting complete.')
"""
# Saving star parameters
print('Saving results...')
with open(res_path + '/star_params.txt', 'w') as f:
f.write(f'T_c = {T_c} K\n')
f.write(f'rho_c = {rho_c} kg/m^3\n')
f.write(f'R = {r_surf/R_sun} R_sun\n')
f.write(f'M = {M_surf/M_sun} M_sun\n')
f.write(f'L = {L_surf/L_sun} L_sun\n')
f.write(f'T_calc = {T_surf} K\n')
f.write(f'T_corr = {(L_surf/(4.0 * pi * (r_surf**2) * sigma))**(1.0/4.0)} K')
f.close()
print('Results saved.')
print('-'*80)
"""
# ----------------------------------------------------------------------------------------------------------------------
# -
import seaborn as sns
from joblib import Parallel, delayed
import numpy as np
# +
# MAIN SEQUENCE PLOTS
# Plotting luminosity vs. temperature
def plot_L_vs_T(T_vals, L_vals, res_path, from_data):
plt.scatter(T_vals, L_vals, marker='o')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$T \;\; (\mathrm{K})$')
plt.ylabel(r'$L/L_{\odot}$')
plt.gca().invert_xaxis()
if from_data:
plt.savefig(res_path + '/HR_diagram_from_data.png', dpi=DPI)
else:
plt.savefig(res_path + '/HR_diagram.png', dpi=DPI)
plt.show()
# Plotting luminosity vs. mass
def plot_L_vs_M(M_vals, L_vals, res_path, from_data):
plt.scatter(M_vals, L_vals, marker='o')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$M/M_{\odot}$')
plt.ylabel(r'$L/L_{\odot}$')
if from_data:
plt.savefig(res_path + '/M_L_relation_from_data.png', dpi=DPI)
else:
plt.savefig(res_path + '/M_L_relation.png', dpi=DPI)
plt.show()
# Plotting radius vs. mass
def plot_R_vs_M(M_vals, R_vals, res_path, from_data):
plt.scatter(M_vals, R_vals, marker='o')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$M/M_{\odot}$',)
plt.ylabel(r'$R/R_{\odot}$')
if from_data:
plt.savefig(res_path + '/M_R_relation_from_data.png', dpi=DPI)
else:
plt.savefig(res_path + '/M_R_relation.png', dpi=DPI)
plt.show()
# ----------------------------------------------------------------------------------------------------------------------
### PLOTTING MAIN SEQUENCE ###
# Plotting the main sequence using the stellar structure solver
def main_sequence_vals(T_c):
surf_params, star_params = bisection(T_c)
rho_c = star_params['rho'][0]
R = surf_params['r']
M = surf_params['M']
L = surf_params['L']
T = (L/(4.0 * pi * (R**2) * sigma))**(1.0/4.0)
return rho_c, R, T, M, L
def plot_main_sequence(T_c_min, T_c_max, N, res_path):
# Calculating main sequence values
T_c_vals = np.linspace(T_c_min, T_c_max, N)
main_sequence = np.array(Parallel(n_jobs=4)(delayed(main_sequence_vals)(T_c) for T_c in T_c_vals))
rho_c_vals = main_sequence[:,0]
R_vals = main_sequence[:,1]/R_sun
T_vals = main_sequence[:,2]
M_vals = main_sequence[:,3]/M_sun
L_vals = main_sequence[:,4]/L_sun
# Plotting results
print('Plotting main sequence...')
print('-'*80)
plot_L_vs_T(T_vals, L_vals, res_path, False)
print('Plotted Hertzsprung-Russell diagram')
plot_L_vs_M(M_vals, L_vals, res_path, False)
print('Plotted mass-luminosity relation')
plot_R_vs_M(M_vals, R_vals, res_path, False)
print('Plotted mass-radius relation')
print('-'*80)
print('Plotting complete.')
# Saving results
print('Saving results...')
with open(res_path + '/main_sequence_values.txt', 'w') as f:
f.write('T_c rho_c R T M L\n')
for i in range(len(T_c_vals)):
f.write(f'{T_c_vals[i]} {rho_c_vals[i]} {R_vals[i]} {T_vals[i]} {M_vals[i]} {L_vals[i]}\n')
f.close()
print('Results saved.')
print('-'*80)
# ----------------------------------------------------------------------------------------------------------------------
# -
Lambda = 0
plot_main_sequence(30000, 1e7, 30, 'MS')
Lambda = 1e8
plot_main_sequence(30000, 1e7, 30, 'MS_pos')
Lambda = -1e4
plot_main_sequence(30000, 1e7, 30, 'MS_neg_e4')
Lambda = -1e5
plot_main_sequence(30000, 1e7, 30, 'MS_neg_e5')
Lambda = 1e7
plot_main_sequence(30000, 1e7, 30, 'MS_pos_e7')
Lambda = 5e8
plot_main_sequence(30000, 1e7, 30, 'MS_pos_5e8')
Lambda = -1e7
plot_main_sequence(30000, 1e7, 30, 'MS_neg_e7')
| En/star.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
nums_1d = np.ones((3), dtype = np.int8)
nums_1d
nums_2d = np.ones((3, 4), dtype = np.int8)
nums_2d
nums_3d = np.ones((2, 3, 4), dtype = np.int8)
nums_3d
array_nums = np.array([3,4])
array_nums * 2
array_nums_more = np.array([10,20])
array_nums * array_nums_more
array_nums + array_nums_more
array_nums - array_nums_more
array_nums / array_nums_more
np.ndarray((2,4), np.int8) # garbage data
from numpy import random
rands = np.random.randn(2,4) * 100 # random
rands.astype('int8')
# +
#np.save("rands.npy", rands)
# -
for i in rands:
print(i)
| app/application_patterns/pandas/learn_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import Pacakges
# %load_ext cuda_device
# %CUDA_VISIBLE_DEVICES 2
# +
import argparse
from pathlib import Path
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from utils import Options, overlap_ratio
from models.mdnet import MDNet, BCELoss
from models.extractor import SampleGenerator, RegionExtractor
from models.regressor import BBRegressor
# -
# ## Extract samples
def forward_samples(model, image, samples, opts, out_layer='conv3'):
model.eval()
extractor = RegionExtractor(image, samples, opts.img_size, opts.padding, opts.batch_test)
for i, regions in enumerate(extractor):
if opts.use_gpu:
regions = regions.cuda()
with torch.no_grad():
feat = model(regions, out_layer=out_layer)
feats = torch.cat((feats, feat.detach().clone()), 0) if i else feat.detach().clone()
return feats
# ## MDNet Train
def train(model, criterion, optimizer,
pos_feats, neg_feats, maxiter, opts,
in_layer='fc4'):
model.train()
batch_pos = opts.batch_pos
batch_neg = opts.batch_neg
batch_test = opts.batch_test
batch_neg_cand = max(opts.batch_neg_cand, batch_neg)
pos_idx = np.random.permutation(pos_feats.size(0))
neg_idx = np.random.permutation(neg_feats.size(0))
while len(pos_idx) < batch_pos * maxiter:
pos_idx = np.concatenate([pos_idx, np.random.permutation(pos_feats.size(0))])
while len(neg_idx) < batch_neg_cand * maxiter:
neg_idx = np.concatenate([neg_idx, np.random.permutation(neg_feats.size(0))])
pos_pointer = 0
neg_pointer = 0
for _ in range(maxiter):
# select pos idx
pos_next = pos_pointer + batch_pos
pos_cur_idx = pos_idx[pos_pointer:pos_next]
pos_cur_idx = pos_feats.new(pos_cur_idx).long()
pos_pointer = pos_next
# select neg idx
neg_next = neg_pointer + batch_neg_cand
neg_cur_idx = neg_idx[neg_pointer:neg_next]
neg_cur_idx = neg_feats.new(neg_cur_idx).long()
neg_pointer = neg_next
# create batch
batch_pos_feats = pos_feats[pos_cur_idx]
batch_neg_feats = neg_feats[neg_cur_idx]
# hard negative mining
if batch_neg_cand > batch_neg:
model.eval()
for start in range(0, batch_neg_cand, batch_test):
end = min(start + batch_test, batch_neg_cand)
with torch.no_grad():
score = model(batch_neg_feats[start:end], in_layer=in_layer)
if start == 0:
neg_cand_score = score.detach()[:, 1].clone()
else:
neg_cand_score = torch.cat((neg_cand_score, score.detach()[:, 1].clone()), 0)
_, top_idx = neg_cand_score.topk(batch_neg)
batch_neg_feats = batch_neg_feats[top_idx]
model.train()
# forward
pos_score = model(batch_pos_feats, in_layer=in_layer)
neg_score = model(batch_neg_feats, in_layer=in_layer)
# optimize
loss = criterion(pos_score, neg_score)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opts.grad_clip)
optimizer.step()
# ## Main function for MDNet
def main(images, init_bbox, ground_truths, opts):
device = ('cuda' if opts.use_gpu else 'cpu')
model = MDNet(opts.model_path).to(device)
criterion = BCELoss()
# Set learnable parameters
for k, p in model.params.items():
p.requires_grad = any([k.startswith(l) for l in opts.ft_layers])
# Set optimizer states
def set_optimizer(lr_base, lr_mult, momentum=0.9, w_decay=0.0005):
param_list = []
for k, p in filter(lambda kp: kp[1].requires_grad, model.params.items()):
lr = lr_base
for l, m in lr_mult.items():
if k.startswith(l):
lr = lr_base * m
param_list.append({'params': [p], 'lr': lr})
return optim.SGD(param_list, lr=lr, momentum=momentum, weight_decay=w_decay)
init_optimizer = set_optimizer(opts.lr_init, opts.lr_mult)
update_optimizer = set_optimizer(opts.lr_update, opts.lr_mult)
# Load first image
image = Image.open(images[0]).convert('RGB')
# Draw pos/neg samples
pos_examples = SampleGenerator('gaussian', image.size, opts.trans_pos, opts.scale_pos)(
init_bbox, opts.n_pos_init, opts.overlap_pos_init)
neg_examples = np.concatenate([
SampleGenerator('uniform', image.size, opts.trans_neg_init, opts.scale_neg_init)(
init_bbox, int(opts.n_neg_init * 0.5), opts.overlap_neg_init),
SampleGenerator('whole', image.size)(
init_bbox, int(opts.n_neg_init * 0.5), opts.overlap_neg_init)])
neg_examples = np.random.permutation(neg_examples)
# Extract pos/neg features
pos_feats = forward_samples(model, image, pos_examples, opts)
neg_feats = forward_samples(model, image, neg_examples, opts)
# Initial training
train(model, criterion, init_optimizer, pos_feats, neg_feats, opts.maxiter_init, opts)
del init_optimizer, neg_feats
torch.cuda.empty_cache()
# Train bbox regressor
bbreg_examples = SampleGenerator('uniform', image.size, opts.trans_bbreg, opts.scale_bbreg, opts.aspect_bbreg)\
(init_bbox, opts.n_bbreg, opts.overlap_bbreg)
bbreg_feats = forward_samples(model, image, bbreg_examples, opts)
bbreg = BBRegressor(image.size)
bbreg.train(bbreg_feats, bbreg_examples, init_bbox)
del bbreg_feats
torch.cuda.empty_cache()
# Init sample generators for update
sample_generator = SampleGenerator('gaussian', image.size, opts.trans, opts.scale)
pos_generator = SampleGenerator('gaussian', image.size, opts.trans_pos, opts.scale_pos)
neg_generator = SampleGenerator('uniform', image.size, opts.trans_neg, opts.scale_neg)
# Init pos/neg features for update
neg_examples = neg_generator(init_bbox, opts.n_neg_update, opts.overlap_neg_init)
neg_feats = forward_samples(model, image, neg_examples, opts)
pos_feats_all = [pos_feats]
neg_feats_all = [neg_feats]
# Main loop
for i, image in enumerate(images[1:], 1):
image = Image.open(image).convert('RGB')
# Estimate target bbox
samples = sample_generator(init_bbox, opts.n_samples)
sample_scores = forward_samples(model, image, samples, opts, out_layer='fc6')
top_scores, top_idx = sample_scores[:, 1].topk(5)
top_idx = top_idx.cpu()
target_score = top_scores.mean()
init_bbox = samples[top_idx]
if top_idx.shape[0] > 1:
init_bbox = init_bbox.mean(axis=0)
success = target_score > 0
# Expand search area at failure
sample_generator.trans = opts.trans if success else min(sample_generator.trans * 1.1, opts.trans_limit)
# Bbox regression
if success:
bbreg_samples = samples[top_idx]
if top_idx.shape[0] == 1:
bbreg_samples = bbreg_samples[None, :]
bbreg_feats = forward_samples(model, image, bbreg_samples, opts)
bbreg_samples = bbreg.predict(bbreg_feats, bbreg_samples)
bbreg_bbox = bbreg_samples.mean(axis=0)
else:
bbreg_bbox = init_bbox
yield init_bbox, bbreg_bbox, overlap_ratio(ground_truths[i], bbreg_bbox)[0], target_score
# Data collect
if success:
pos_examples = pos_generator(init_bbox, opts.n_pos_update, opts.overlap_pos_update)
pos_feats = forward_samples(model, image, pos_examples, opts)
pos_feats_all.append(pos_feats)
if len(pos_feats_all) > opts.n_frames_long:
del pos_feats_all[0]
neg_examples = neg_generator(init_bbox, opts.n_neg_update, opts.overlap_neg_update)
neg_feats = forward_samples(model, image, neg_examples, opts)
neg_feats_all.append(neg_feats)
if len(neg_feats_all) > opts.n_frames_short:
del neg_feats_all[0]
# Short term update
if not success:
nframes = min(opts.n_frames_short, len(pos_feats_all))
pos_data = torch.cat(pos_feats_all[-nframes:], 0)
neg_data = torch.cat(neg_feats_all, 0)
train(model, criterion, update_optimizer, pos_data, neg_data, opts.maxiter_update, opts)
# Long term update
elif i % opts.long_interval == 0:
pos_data = torch.cat(pos_feats_all, 0)
neg_data = torch.cat(neg_feats_all, 0)
train(model, criterion, update_optimizer, pos_data, neg_data, opts.maxiter_update, opts)
torch.cuda.empty_cache()
# ### (Optional) Refresh image output in IPython
#
from IPython.display import clear_output
# %matplotlib inline
# ## Run!
np.random.seed(0)
torch.manual_seed(0)
# +
options = Options()
dataset = Path('./datasets/DragonBaby')
images = list(sorted(dataset.joinpath('img').glob('*.jpg')))
ground_truths = pd.read_csv(str(dataset.joinpath('groundtruth_rect.txt')), header=None).values
iou, success = 0, 0
# Run tracker
for i, (result, (x, y, w, h), overlap, score) in \
enumerate(main(images, ground_truths[0], ground_truths, options), 1):
clear_output(wait=True)
image = np.asarray(Image.open(images[i]).convert('RGB'))
gx, gy, gw, gh = ground_truths[i]
cv2.rectangle(image, (int(gx), int(gy)), (int(gx+gw), int(gy+gh)), (0, 255, 0), 2)
cv2.rectangle(image, (int(x), int(y)), (int(x+w), int(y+h)), (255, 0, 0), 2)
iou += overlap
success += overlap > .5
plt.imshow(image)
plt.pause(.1)
plt.title(f'#{i}/{len(images)-1}, Overlap {overlap:.3f}, Score {score:.3f}')
plt.draw()
iou /= len(images) - 1
print(f'Mean IOU: {iou:.3f}, Success: {success} / {len(images)-1}')
# -
| legacy/MDNet/tracking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# #### Serias
# +
import pandas as pd
data = pd.Series(["Январь", "Февраль", "Март", "Апрель"],
index = ['Первый', "Второй", "Третий", "Четвёртый"])
display(data)
# -
data.loc["Первый"]
data.loc[["Первый", "Третий"]]
data = pd.Series(list(range(10, 1001)))
data
print(data.loc[10] + data.loc[23] - data.loc[245] + data.iloc[122])
# #### DataFrame
df = pd.DataFrame([ [1,2,3], [2,3,4] ],
columns = ['foo', 'bar', 'baz'],
index = ['foobar', 'foobaz'])
df
pd.DataFrame({'Тестовая колонка':[1,5]})
# +
import pandas as pd
football = pd.read_csv('data/data_sf.csv', index_col=[0])
# display(football.head())
football.head()
# +
import pandas as pd
football = pd.read_csv('data/data_sf.csv')
display(football.info())
# -
football.info(null_counts=True)
football.describe()
football.describe(include=['object'])
football['Age'].mean()
football[['Composure']].info()
football['ShortPassing'].describe()
football['Wage'].sum()
football['Value'].min()
football['Wage'].mean()
football[football['Wage'] > football['Wage'].mean()]['SprintSpeed'].mean()
football[football['Wage'] < football['Wage'].mean()]['SprintSpeed'].mean()
football[football['Wage'] == football['Wage'].max()]['Position']
football[football['Nationality'] == 'Brazil']['Penalties'].sum()
football[football['HeadingAccuracy'] > 50 ]['Age'].mean()
football[(football['Composure'] > football['Composure'] * 0.9) &
(football['Reactions'] > football['Reactions'] * 0.9)]['Age'].min()
football[(football['Composure'] > football['Composure'].max() * 0.9) &
(football['Reactions'] > football['Reactions'].max() * 0.9)]['Age'].min()
football[football['Age'] == football['Age'].max()]['Reactions'].mean() -\
football[football['Age'] == football['Age'].min()]['Reactions'].mean()
football[football['Value'] > football['Value'].mean()]['Nationality'].value_counts()
football[(football['Position'] == 'GK') & (football['GKReflexes'] == football['GKReflexes'].max())]['Wage'].max() /\
football[(football['Position'] == 'GK') & (football['GKHandling'] == football['GKHandling'].max())]['Wage'].max()
football[football['Aggression'] == football['Aggression'].max()]['ShotPower'].mean() /\
football[football['Aggression'] == football['Aggression'].min()]['ShotPower'].mean()
| python-pandas/Python5-Pandas.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Discriminative Classification
# + [markdown] slideshow={"slide_type": "slide"}
# ### Preliminaries
#
# - Goal
# - Introduction to discriminative classification models
# - Materials
# - Mandatory
# - These lecture notes
# - Optional
# - Bishop pp. 213 - 217 (Laplace approximation)
# - Bishop pp. 217 - 220 (Bayesian logistic regression)
# - [<NAME> (2005), Discriminative models, not discriminative training](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Minka-2005-Discriminative-models-not-discriminative-training.pdf)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Challenge: difficult class-conditional data distributions
#
# Our task will be the same as in the preceding class on (generative) classification. But this time, the class-conditional data distributions look very non-Gaussian, yet the linear discriminative boundary looks easy enough:
# -
using Pkg;Pkg.activate("probprog/workspace");Pkg.instantiate()
using Random; Random.seed!(1234);
IJulia.clear_output();
# + slideshow={"slide_type": "slide"}
# Generate dataset {(x1,y1),...,(xN,yN)}
# x is a 2-d feature vector [x_1;x_2]
# y ∈ {false,true} is a binary class label
# p(x|y) is multi-modal (mixture of uniform and Gaussian distributions)
using PyPlot
include("./scripts/lesson8_helpers.jl")
N = 200
X, y = genDataset(N) # Generate data set, collect in matrix X and vector y
X_c1 = X[:,findall(.!y)]'; X_c2 = X[:,findall(y)]' # Split X based on class label
X_test = [3.75; 1.0] # Features of 'new' data point
function plotDataSet()
plot(X_c1[:,1], X_c1[:,2], "bx", markersize=8)
plot(X_c2[:,1], X_c2[:,2], "r+", markersize=8, fillstyle="none")
plot(X_test[1], X_test[2], "ko")
xlabel(L"x_1"); ylabel(L"x_2");
legend([L"y=0", L"y=1",L"y=?"], loc=2)
xlim([-2;10]); ylim([-4, 8])
end
plotDataSet();
# + [markdown] slideshow={"slide_type": "slide"}
# ### Main Idea of Discriminative Classification
#
# - Again, a data set is given by $D = \{(x_1,y_1),\dotsc,(x_N,y_N)\}$ with $x_n \in \mathbb{R}^M$ and $y_n \in \mathcal{C}_k$, with $k=1,\ldots,K$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Sometimes, the precise assumptions of the (multinomial-Gaussian) generative model
# $$p(x_n,y_n\in\mathcal{C}_k|\theta) = \pi_k \cdot \mathcal{N}(x_n|\mu_k,\Sigma)$$
# clearly do not match the data distribution.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Here's an **IDEA**! Let's model the posterior $$p(y_n\in\mathcal{C}_k|x_n)$$ *directly*, without any assumptions on the class densities.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Of course, this implies also that we build direct models for the **discrimination boundaries**, which are given by
# $$\frac{p(y_n\in\mathcal{C}_k|x_n)}{p(y_n\in\mathcal{C}_j|x_n)} \overset{!}{=} 1$$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Bayesian Logistic Regression
#
# - We will work this idea out for a 2-class problem. Assume a data set is given by $D = \{(x_1,y_1),\dotsc,(x_N,y_N)\}$ with $x_n \in \mathbb{R}^M$ and $y_n \in \{0,1\}$.
# + [markdown] slideshow={"slide_type": "slide"}
# ##### Model Specification
#
# - What model should we use for the posterior distribution $p(y_n \in \mathcal{C}_k|x_n)$?
# + [markdown] slideshow={"slide_type": "fragment"}
# - In Logistic Regression, we take inspiration from the generative approach, where the **softmax** function "emerged" as the posterior. Here, we **choose** the 2-class softmax function (which is called the [**logistic** function](https://en.wikipedia.org/wiki/Logistic_function)) with linear discrimination bounderies for the posterior class probability:
# $$
# p(y_n =1 \,|\, x_n, w) = \sigma(w^T x_n) \,.
# $$
# where $$\sigma(a) = \frac{1}{1+e^{-a}}$$ is the _logistic_ function.
# + [markdown] slideshow={"slide_type": "slide"}
#
# <p style="text-align:center;"><img src="./figures/Figure4.9.png" width="500px"></p>
#
# - (Bishop fig.4.9). The logistic function $\sigma(a) = 1/(1+e^{-a})$ (red), together with the <a id="scaled-probit">scaled probit function</a> $\Phi(\lambda a)$, for $\lambda^2=\pi/8$ (in blue). We will use this approximation later in the [Laplace approximation](#gaussian-cdf).
#
# + [markdown] slideshow={"slide_type": "fragment"}
# - Indeed, for this choice of posterior class probabilities, the discrimination boundary is a straight line, see [Exercises](https://nbviewer.jupyter.org/github/bertdv/BMLIP/blob/master/lessons/exercises/Exercises-with-Solutions.ipynb).
# + [markdown] slideshow={"slide_type": "subslide"}
# - Adding the other class ($y_n=0$) leads to the following posterior class distribution:
# $$\begin{align*}
# p(y_n \,|\, x_n, w) &= \mathrm{Bernoulli}\left(y_n \,|\, \sigma(w^T x_n) \right) \\
# &= \sigma(w^T x_n)^{y_n} \left(1 - \sigma(w^T x_n)\right)^{(1-y_n)} \tag{B-4.89} \\
# &= \sigma\left( (2y_n-1) w^T x_n\right)
# \end{align*}$$
# - Note that for the 3rd equality, we have made use of the fact that $\sigma(-a) = 1-\sigma(a)$.
# - Each of these three models in B-4.89 are **equivalent**. We mention all three notational options since they all appear in the literature.
# + [markdown] slideshow={"slide_type": "subslide"}
# - This choice for the class posterior is called **logistic regression**, in analogy to **linear regression** (where $p(y_n|x_n,w) = \mathcal{N}(y_n|w^T x_n,\sigma^2)$).
#
# + [markdown] slideshow={"slide_type": "fragment"}
#
# - Note that in this model specification, we do not impose a Gaussian structure on the class features. In the discriminative approach, the parameters $w$ are **not** structured into $\{\mu,\Sigma,\pi \}$. This provides discriminative approach with more flexibility than the generative approach.
#
# + [markdown] slideshow={"slide_type": "fragment"}
# - In *Bayesian* logistic regression, we add a **Gaussian prior on the weights**:
# $$\begin{align*}
# p(w) = \mathcal{N}(w \,|\, m_0, S_0) \tag{B-4.140}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "slide"}
# ##### <a id="#logistic-regression-posterior">Inference</a>
#
# - The posterior for the weights follows by Bayes rule
# $$\begin{align*}
# \underbrace{p(w \,|\, D)}_{\text{posterior}} &\propto p(w) p(D|w) \\ &= \underbrace{\mathcal{N}(w \,|\, m_0, S_0)}_{\text{prior}} \cdot \underbrace{\prod_{n=1}^N \sigma\left( (2y_n-1) w^T x_n\right)}_{\text{likelihood}} \tag{B-4.142}
# \end{align*}$$
#
# - In principle, Bayesian inference is done now. Unfortunately, the posterior is not Gaussian and the evidence $p(D)$ is also not analytically computable. (We will deal with this later).
# + [markdown] slideshow={"slide_type": "slide"}
# ##### Predictive distribution
#
# - For a new data point $x_\bullet$, the predictive distribution for $y_\bullet$ is given by
# $$\begin{align*}
# p(y_\bullet = 1 \mid x_\bullet, D) &= \int p(y_\bullet = 1 \,|\, x_\bullet, w) \, p(w\,|\, D) \,\mathrm{d}w \\
# &= \int \sigma(w^T x_\bullet) \, p(w\,|\, D) \,\mathrm{d}w \tag{B-4.145}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - After substitution of $p(w | D)$ from B-4.142, we have an integral that is not solvable in closed-form.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Many methods have been developed to approximate the integrals for the predictive distribution and evidence. Here, we present the **Laplace approximation**, which is one of the simplest methods with broad applicability to Bayesian calculations.
# + [markdown] slideshow={"slide_type": "slide"}
# ### The Laplace Approximation
#
# - The central idea of the Laplace approximation is to approximate a (possibly unnormalized) distribution $f(z)$ by a Gaussian distribution $q(z)$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Note that $\log q(z)$ is a second-order polynomial in $z$, so we will find the Gaussian by fitting a parabola to $\log f(z)$.
#
#
# -
# ##### Example
#
# <p style="text-align:center;"><img src="./figures/Figure4.14a.png" width="500px"></p>
#
# - (Bishop fig.4.14a). Laplace approximation (in red) to the distribution $p(z)\propto \exp(-z^2/2)\sigma(20z+4)$, where $\sigma(a)=1/(1+e^{-a})$. The Laplace approximation is centered on the mode of $p(z)$.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Working out the Laplace Approximation
#
# ##### estimation of mean
#
# - The mean ($z_0$) of $q(z)$ is placed on the mode of $\log f(z)$, i.e.,
#
# $$z_0 = \arg\max_z \log f(z) \tag{B-4.126}$$
# + [markdown] slideshow={"slide_type": "slide"}
# ##### estimation of precision matrix
#
# - Since the gradient $\nabla \left. f(z) \right|_{z=z_0}$ vanishes at the mode, we can (Taylor) expand $\log f(z)$ around $z=z_0$ as
# $$\begin{align*}
# \log f(z) &\approx \log f(z_0) + \overbrace{\left(\nabla \log f(z_0)\right)^T (z-z_0)}^{=0 \text{ at }z=z_0} + \ldots \\
# &\qquad + \frac{1}{2} (z-z_0)^T \left(\nabla \nabla \log f(z_0)\right) (z-z_0) \\
# &= \log f(z_0) - \frac{1}{2} (z-z_0)^T A (z-z_0) \tag{B-4.131}
# \end{align*}$$
# where the [Hessian matrix](https://en.wikipedia.org/wiki/Hessian_matrix) $A$ is defined by
# $$
# A = - \nabla \nabla \left. \log f(z) \right|_{z=z_0} \tag{B-4.132}
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# ##### Laplace approximation construction
#
# - After taking exponentials in eq. B-4.131, we obtain
#
# $$
# f(z) \approx f(z_0) \exp\left( - \frac{1}{2} (z-z_0)^T A (z-z_0)\right)
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - We can now identify $q(z)$ as
# $$
# q(z) = \mathcal{N}\left( z\,|\,z_0, A^{-1}\right) \tag{B-4.134}
# $$
# with $z_0$ and $A$ defined by eqs. B-4.126 and B-4.132.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Bayesian Logistic Regression with the Laplace Approximation
#
# - Let's get back to the challenge of computing the predictive class distribution (B-4.145) for Bayesian logistic regression. We first work out the Gaussian Laplace approximation $q(w)$ to the [posterior weight distribution](#logistic-regression-posterior)
# $$\begin{align*}
# \underbrace{p(w | D)}_{\text{posterior}} \propto \underbrace{\mathcal{N}(w \,|\, m_0, S_0)}_{\text{prior}} \cdot \underbrace{\prod_{n=1}^N \sigma\left( (2y_n-1) w^T x_n\right)}_{\text{likelihood}} \tag{B-4.142}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# ##### A Gausian Laplace approximation to the weights posterior
#
# - Since we have a differentiable expression for $\log p(w | D)$, it is straightforward to compute the gradient and Hessian (for [proof, see optional slide](#gradient-hessian)):
# $$\begin{align*}
# \nabla_w \log p(w | D) &= S_0^{-1}\cdot \left(m_0-w\right) + \sum_n (2y_n-1) (1-\sigma_n) x_n \\
# \nabla\nabla_w \log p(w | D) &= -S_0^{-1} - \sum_n \sigma_n (1-\sigma_n) x_n x_n^T \tag{B-4.143}
# \end{align*}$$
# where we used shorthand $\sigma_n$ for $\sigma\left( (2y_n-1) w^T x_n\right)$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - We can now use the gradient $\nabla_w \log p(w | D)$ to find the **mode** $w_{N}$ of $\log p(w|D)$ (eg by some gradient-based optimization procedure) and then use the Hessian $\nabla\nabla_w \log p(w | D)$ to get the variance of $q(w)$, leading to a <a id="Laplace-posterior-logistic-regression">**Gaussian approximate weights posterior**</a>:
# $$
# q(w) = \mathcal{N}\left(w\,|\, w_{N}, S_N\right) \tag{B-4.144}
# $$
# with
# $$
# S_N^{-1} = S_0^{-1} + \sum_n \sigma_n (1-\sigma_n) x_n x_n^T \tag{B-4.143}
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Using the Laplace-approximated parameter posterior to evaluate the predictive distribution
#
# - In the analytically unsolveable expressions for evidence and the predictive distribution (estimating the class of a new observation), we proceed with using the Laplace approximation to the weights posterior. For a new observation $x_\bullet$, the class probability is now
# $$\begin{align*}
# p(y_\bullet = 1 \mid x_\bullet, D) &= \int p(y_\bullet = 1 \,|\, x_\bullet, w) \cdot p(w\,|\, D) \,\mathrm{d}w \\
# &\approx \int p(y_\bullet = 1 \,|\, x_\bullet, w) \cdot \underbrace{q(w)}_{\text{Gaussian}} \,\mathrm{d}w \\
# &= \int \sigma(w^T x_\bullet) \cdot \mathcal{N}\left(w \,|\, w_N, S_N\right) \,\mathrm{d}w \tag{B-4.145}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - This looks better but we need two more clever tricks to evaluate this expression.
# 1. First, note that $w$ appears in $\sigma(w^T x_\bullet)$ as an inner product, so through substitution of $a:=w^T x_\bullet$, the expression simplifies to an integral over the scalar $a$ (see Bishop for derivation):
# $$\begin{align*}
# p(y_\bullet = 1 \mid x_\bullet, D) &\approx \int \sigma(a) \, \mathcal{N}\left(a\,|\, \mu_a, \sigma_a^2\right) \,\mathrm{d}a \tag{B-4.151}\\
# \mu_a &= w^T_{N} x_\bullet \tag{B-4.149}\\
# \sigma_a^2 &= x^T_\bullet S_N x_\bullet \tag{B-4.150}
# \end{align*}$$
# 1. Secondly, while the integral of the product of a logistic function with a Gaussian is not analytically solvable, the integral of the product of a Gaussian cumulative distribution function (CDF, also known as the [probit function](#scaled-probit)) with a Gaussian _does_ have a closed-form solution. Fortunately,
# $$\Phi(\lambda a) \approx \sigma(a)$$
# with the <a id="gaussian-cdf">Gaussian</a> CDF $\Phi(x)= \frac{1}{\sqrt(2\pi)}\int_{-\infty}^{x}e^{-t^2/2}\mathrm{d}t$, $ \lambda^2= \pi / 8 $ and $\sigma(a) = 1/(1+e^{-a})$.
# Thus, substituting $\Phi(\lambda a)$ with $ \lambda^2= \pi / 8 $ for $\sigma(a)$ leads to
#
# $$\begin{align*}
# p(y_\bullet = 1 \mid x_\bullet, D) &= \int \sigma(w^T x_\bullet) \cdot p(w|D) \,\mathrm{d}w \\
# &\approx \int \underbrace{\Phi(\lambda a)}_{\text{probit function}} \cdot \underbrace{\mathcal{N}\left(a\,|\, \mu_a, \sigma_a^2\right)}_{\text{Gaussian}} \,\mathrm{d}a \\
# &= \Phi\left( \frac{\mu_a}{\sqrt(\lambda^{-2} +\sigma_a^2)}\right) \tag{B-4.152}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - We now have an approximate but **closed-form expression for the predictive class distribution for a new observation** with a Bayesian logistic regression model.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Note that, by [Eq.B-4.143](#Laplace-posterior-logistic-regression), the variance $S_N$ (and consequently $\sigma_a^2$) for the weight vector depends on the distribution of the training set. Large uncertainty about the weights (in areas with little training data and uninformative prior variance $S_0$) takes the posterior class probability eq. B-4.152 closer to $0.5$. Does that make sense?
# + [markdown] slideshow={"slide_type": "fragment"}
# - Apparently, the Laplace approximation leads to a closed-form solutions for Bayesian logistic regression (although admittedly, the derivation is no walk in the park).
# + [markdown] slideshow={"slide_type": "slide"}
# ### ML Estimation for Discriminative Classification
#
# - Rather than the computationally involved Bayesian method, in practice, discriminative classification is often executed through maximum likelihood estimation.
# + [markdown] slideshow={"slide_type": "fragment"}
# - With the usual 1-of-K encoding scheme for classes ($y_{nk}=1$ if $x_n \in \mathcal{C}_k$, otherwise $y_{nk}=0$), the log-likelihood for a $K$-dimensional discriminative classifier is
#
# $$\begin{align*}
# \mathrm{L}(\theta) &= \log \prod_n \prod_k {p(\mathcal{C}_k|x_n,\theta)}^{y_{nk}} \\
# &= \log \prod_n \prod_k \Bigg(\underbrace{\frac{e^{\theta_k^T x_n}}{ \sum_j e^{\theta_j^T x_n}}}_{\text{softmax function}}\Bigg)^{y_{nk}} \\
# &= \sum_n \sum_k y_{kn} \log \big( \frac{e^{\theta_k^T x_n}}{ \sum_j e^{\theta_j^T x_n}} \big)
# \end{align*}$$
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
#
# - Computing the gradient $\nabla_{\theta_k} \mathrm{L}(\theta)$ leads to (for [proof, see optional slide below](#ML-for-LG))
#
# $$
# \nabla_{\theta_k} \mathrm{L}(\theta) = \sum_n \underbrace{\big( \underbrace{y_{nk}}_{\text{target}} - \underbrace{\frac{e^{\theta_k^T x_n}}{ \sum_j e^{\theta_j^T x_n}}}_{\text{prediction}} \big)}_{\text{prediction error}}\cdot x_n
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
#
# - Compare this to the [gradient for _linear_ regression](https://nbviewer.jupyter.org/github/bertdv/BMLIP/blob/master/lessons/notebooks/Regression.ipynb#regression-gradient):
#
# $$
# \nabla_\theta \mathrm{L}(\theta) = \sum_n \left(y_n - \theta^T x_n \right) x_n
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - In both cases
#
# $$
# \nabla_\theta \mathrm{L} = \sum_n \left( \text{target}_n - \text{prediction}_n \right) \cdot \text{input}_n
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - The parameter vector $\theta$ for logistic regression can be estimated through iterative gradient-based adaptation. E.g. (with iteration index $i$),
# $$
# \hat{\theta}^{(i+1)} = \hat{\theta}^{(i)} + \eta \cdot \left. \nabla_\theta \mathrm{L}(\theta) \right|_{\theta = \hat{\theta}^{(i)}}
# $$
# - Note that, while in the Bayesian approach we get to update $\theta$ with [**precision-weighted** prediction errors](https://nbviewer.jupyter.org/github/bertdv/BMLIP/blob/master/lessons/notebooks/The-Gaussian-Distribution.ipynb#precision-weighted-update) (which is optimal), in the maximum likelihood approach, we weigh the prediction errors with **input** values (which is less precise).
# + [markdown] slideshow={"slide_type": "slide"}
# #### CODE EXAMPLE
#
# Let us perform ML estimation of $w$ on the data set from the introduction. To allow an offset in the discrimination boundary, we add a constant 1 to the feature vector $x$. We only have to specify the (negative) log-likelihood and the gradient w.r.t. $w$. Then, we use an off-the-shelf optimisation library to minimize the negative log-likelihood.
#
# We plot the resulting maximum likelihood discrimination boundary. For comparison we also plot the ML discrimination boundary obtained from the [code example in the generative Gaussian classifier lesson](https://nbviewer.jupyter.org/github/bertdv/BMLIP/blob/master/lessons/notebooks/Generative-Classification.ipynb#code-generative-classification-example).
# + slideshow={"slide_type": "slide"}
using Optim # Optimization library
y_1 = zeros(length(y))# class 1 indicator vector
y_1[findall(y)] .= 1
X_ext = vcat(X, ones(1, length(y))) # Extend X with a row of ones to allow an offset in the discrimination boundary
# Implement negative log-likelihood function
function negative_log_likelihood(θ::Vector)
# Return negative log-likelihood: -L(θ)
p_1 = 1.0 ./ (1.0 .+ exp.(-X_ext' * θ)) # P(C1|X,θ)
return -sum(log.( (y_1 .* p_1) + ((1 .- y_1).*(1 .- p_1))) ) # negative log-likelihood
end
# Use Optim.jl optimiser to minimize the negative log-likelihood function w.r.t. θ
results = optimize(negative_log_likelihood, zeros(3), LBFGS())
θ = results.minimizer
# Plot the data set and ML discrimination boundary
plotDataSet()
p_1(x) = 1.0 ./ (1.0 .+ exp(-([x;1.]' * θ)))
boundary(x1) = -1 ./ θ[2] * (θ[1]*x1 .+ θ[3])
plot([-2.;10.], boundary([-2.; 10.]), "k-");
# # Also fit the generative Gaussian model from lesson 7 and plot the resulting discrimination boundary for comparison
generative_boundary = buildGenerativeDiscriminationBoundary(X, y)
plot([-2.;10.], generative_boundary([-2;10]), "k:");
legend([L"y=0";L"y=1";L"y=?";"Discr. boundary";"Gen. boundary"], loc=3);
# Given $\hat{\theta}$, we can classify a new input $x_\bullet = [3.75, 1.0]^T$:
x_test = [3.75;1.0]
println("P(C1|x•,θ) = $(p_1(x_test))")
# + [markdown] slideshow={"slide_type": "slide"}
# - The generative model gives a bad result because the feature distribution of one class is clearly non-Gaussian: the model does not fit the data well.
#
# - The discriminative approach does not suffer from this problem because it makes no assumptions about the feature distribition $p(x|y)$, it just estimates the conditional class distribution $p(y|x)$ directly.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Recap Classification
#
# <table>
# <tr> <td></td><td style="text-align:center"><b>Generative</b></td> <td style="text-align:center"><b>Discriminative (ML)</b></td> </tr>
#
# <tr> <td>1</td><td>Like <b>density estimation</b>, model joint prob.
# $$p(\mathcal{C}_k) p(x|\mathcal{C}_k) = \pi_k \mathcal{N}(\mu_k,\Sigma)$$</td> <td>Like (linear) <b>regression</b>, model conditional
# $$p(\mathcal{C}_k|x,\theta)$$</td> </tr>
#
# <tr> <td>2</td><td>Leads to <b>softmax</b> posterior class probability
# $$ p(\mathcal{C}_k|x,\theta ) = e^{\theta_k^T x}/Z$$
# with <b>structured</b> $\theta$</td> <td> <b>Choose</b> also softmax posterior class probability
# $$ p(\mathcal{C}_k|x,\theta ) = e^{\theta_k^T x}/Z$$
# but now with 'free' $\theta$</td> </tr>
#
# <tr> <td>3</td><td>For Gaussian $p(x|\mathcal{C}_k)$ and multinomial priors,
# $$\hat \theta_k = \left[ {\begin{array}{c}
# { - \frac{1}{2} \mu_k^T \sigma^{-1} \mu_k + \log \pi_k} \\
# {\sigma^{-1} \mu_k } \\
# \end{array}} \right]$$
# <b>in one shot</b>.</td> <td>Find $\hat\theta_k$ through gradient-based adaptation
# $$\nabla_{\theta_k}\mathrm{L}(\theta) = \sum_n \Big( y_{nk} - \frac{e^{\theta_k^T x_n}}{\sum_{k^\prime} e^{\theta_{k^\prime}^T x_n}} \Big)\, x_n$$ </td> </tr>
# </table>
# + [markdown] slideshow={"slide_type": "slide"}
# ## <center> OPTIONAL SLIDES </center>
# + [markdown] slideshow={"slide_type": "slide"}
#
# ### <a id="ML-for-LG">Proof of Derivative of Log-likelihood for Logistic Regression</a>
#
#
# - The Log-likelihood is $
# \mathrm{L}(\theta) = \log \prod_n \prod_k {\underbrace{p(\mathcal{C}_k|x_n,\theta)}_{p_{nk}}}^{y_{nk}} = \sum_{n,k} y_{nk} \log p_{nk}$
#
#
# - Use the fact that the softmax $\phi_k \equiv e^{a_k} / {\sum_j e^{a_j}}$ has analytical derivative:
#
# $$ \begin{align*}
# \frac{\partial \phi_k}{\partial a_j} &= \frac{(\sum_j e^{a_j})e^{a_k}\delta_{kj}-e^{a_j}e^{a_k}}{(\sum_j e^{a_j})^2} = \frac{e^{a_k}}{\sum_j e^{a_j}}\delta_{kj} - \frac{e^{a_j}}{\sum_j e^{a_j}} \frac{e^{a_k}}{\sum_j e^{a_j}}\\
# &= \phi_k \cdot(\delta_{kj}-\phi_j)
# \end{align*}$$
#
# <!---
# % - Again we try to minimize the cross-entropy ($\sum_{nk} y_{nk} \log \frac{y_{nk}}{p_{nk}}$) between the data `targets' $t_{nk}$ and the model outputs $p_{nk}$.
# --->
#
# - Take the derivative of $\mathrm{L}(\theta)$ (or: how to spend a hour ...)
# $$\begin{align*}
# \nabla_{\theta_j} \mathrm{L}(\theta) &= \sum_{n,k} \frac{\partial \mathrm{L}_{nk}}{\partial p_{nk}} \cdot\frac{\partial p_{nk}}{\partial a_{nj}}\cdot\frac{\partial a_{nj}}{\partial \theta_j} \\
# &= \sum_{n,k} \frac{y_{nk}}{p_{nk}} \cdot p_{nk} (\delta_{kj}-p_{nj}) \cdot x_n \\
# &= \sum_n \Big( y_{nj} (1-p_{nj}) -\sum_{k\neq j} y_{nk} p_{nj} \Big) \cdot x_n \\
# &= \sum_n \left( y_{nj} - p_{nj} \right)\cdot x_n \\
# &= \sum_n \Big( \underbrace{y_{nj}}_{\text{target}} - \underbrace{\frac{e^{\theta_j^T x_n}}{\sum_{j^\prime} e^{\theta_{j^\prime}^T x_n}}}_{\text{prediction}} \Big)\cdot x_n
# \end{align*}$$
# -
# ### <a id="gradient-hessian">Proof of gradient and Hessian for Laplace Approximation of Posterior</a>
#
# - We will start with the posterior
# $$\begin{align*}
# \underbrace{p(w | D)}_{\text{posterior}} \propto \underbrace{\mathcal{N}(w \,|\, m_0, S_0)}_{\text{prior}} \cdot \underbrace{\prod_{n=1}^N \sigma\big( \underbrace{(2y_n-1) w^T x_n}_{a_n}\big)}_{\text{likelihood}} \tag{B-4.142}
# \end{align*}$$
# from which it follows that
# $$\begin{align*}
# \log p(w | D) \propto -\frac{1}{2}\log |S_0| -\frac{1}{2} (w-m_0)^T S_0^{-1} (w-m_0) +\sum_n \log \sigma\left( a_n\right)
# \end{align*}$$
# and the gradient
# $$\begin{align*}
# \nabla_{w}\log p(w | D) &\propto \underbrace{S_0^{-1} (m_0-w)}_{\text{SRM-5b}} +\sum_n \underbrace{\frac{1}{\sigma(a_n)}}_{\frac{\partial \log \sigma(a_n)}{\partial \sigma(a_n)}} \cdot \underbrace{\sigma(a_n) \cdot (1-\sigma(a_n))}_{\frac{\partial \sigma(a_n)}{\partial a_n}} \cdot \underbrace{(2y_n-1)x_n}_{\frac{\partial a_n}{\partial w} \text{ (see SRM-5a)}} \\
# &= S_0^{-1} (m_0-w) + \sum_n (2y_n-1) (1-\sigma(a_n)) x_n \quad \text{(gradient)}
# \end{align*}$$
# where we used $\sigma^\prime(a) = \sigma(a)\cdot (1-\sigma(a))$.
#
# - For the Hessian, we continue to differentiate the transpose of the gradient, leading to
# $$\begin{align*}
# \nabla\nabla_{w}\log p(w | D) &= \nabla_{w} \left(S_0^{-1} (m_0-w)\right)^T - \sum_n (2y_n-1) x_n \nabla_{w}\sigma(a_n)^T \\ &= -S_0^{-1} - \sum_n (2y_n-1) x_n \cdot \underbrace{\sigma(a_n)\cdot (1-\sigma(a_n))}_{\frac{\partial \sigma(a_n)^T}{\partial a_n^T}}\cdot \underbrace{(2y_n-1) x_n^T}_{\frac{\partial a_n^T}{\partial w}} \\
# &= -S_0^{-1} - \sum_n \sigma(a_n)\cdot (1-\sigma(a_n))\cdot x_n x_n^T \quad \text{(Hessian)}
# \end{align*}$$
# since $(2y_n-1)^2=1$ for $y_n \in \{0,1\}$.
#
# + slideshow={"slide_type": "skip"}
open("../../styles/aipstyle.html") do f
display("text/html", read(f,String))
end
# -
| lessons/notebooks/Discriminative-Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python385jvsc74a57bd0e85a6016b40488e530ae281b142fbe7409b6c75bd8ab1bedd69f524c7df14d09
# ---
# ### In this script the final XGBoost model is trained and tested on the JSI dataset. This script was also used for the parametric sweep, with parameters edited by hand using a greedy optimization method and results are recorded in a table (very tedious and archaic...I should have set up a loop!)
import numpy as np
import pandas as pd
import os
from sklearn.metrics import mean_squared_error as MSE
from sklearn.metrics import mean_absolute_error as MAE
from sklearn.model_selection import train_test_split
import xgboost as xgb
import time
import matplotlib.pyplot as plt
# +
# All subject data included in one .csv file 'pwrtbl_all.csv'
# This data has had outliers removed from sensors and has had smoothing applied
# The person division occur at the following points
# Person A [0:796219] or [:796219]
# Person B [A:1276358]
# Person C [B:1804959]
# Person D [C:2311275]
# Person E [D:2847245]
# Person F [E:3245064]
# Person G [F:3763122]
# Person H [G:4160941]
# Person I [H:4712016]
# Person J [I:5147172] or [I:]
# -
# Load the data
# Loading this 5M line .csv file in with pandas and then converting to numpy is faster than directly loading into numpy with np.genfromtxt()
dataraw = pd.read_csv('pwrtbl_all.csv')
dataraw = dataraw.to_numpy()
# Just splitting the people into separate arrays
divisions = [0, 796219, 1276358, 1804959, 2311275, 2847245, 3245064, 3763122, 4160941, 4712016, 5147172]
data = []
for i in range(0,len(divisions)-1):
data.append(dataraw[divisions[i]:divisions[i+1],:])
tr = []; ts = []
# Define sets describing who is included in the training and testing sets
fullset = set({0,1,2,3,4,5,6,7,8,9})
trainset = set({0,1,2,3,4,5,6,7})
for i in trainset:
tr.append(data[i])
# Set difference to find the persons in the test set
for i in fullset - trainset:
ts.append(data[i])
# Now concatenate the training and testing sets each into a continuous array
tr = np.concatenate(tr, axis = 0)
ts = np.concatenate(ts, axis = 0)
# Break into the X and y arrays for train and test
# Last columns corresponds to the MET value
Xtr = tr[:,:-1]; ytr = tr[:,-1]
Xts = ts[:,:-1]; yts = ts[:,-1]
# Cleaning up all the previous arrays to save memory
del dataraw, data, tr, ts
# Now we will train an XGBoost model
# There are a lot of parameters here, and it is important to understand what each of them does when building our model
# Learning_rate - boosting learning rate, how quickly and strongly the learners are added to the ensemble
# Colsample_bytree - percentage of columns randomly sampled for each tree or estimator
# Max_depth - maximum depth per tree. USed as a way to tune the "weakness" of the learners. In general this value is very low between 1 to 5
# N_estimators - number of estimators or decision trees that comprise the overall ensemble
# Reg_alpha - L1 regularization weight
# Reg_lambda - L2 regularization weight
# Gamma - min split loss, essentially the gain a potnetial split must provide to be considered. This effectively prunes the trees and prevents them from overfitting with meaningless splits
start = time.time()
mdl = xgb.XGBRegressor( \
learning_rate = 0.05, \
colsample_bytree = 0.5, \
max_depth = 5, \
reg_alpha = 0, \
reg_lambda = 1, \
gamma = 50, \
n_estimators = 200, \
verbosity = 1 \
).fit(Xtr,ytr)
pred = mdl.predict(Xts)
end = time.time()
print('RMSE:',np.sqrt(MSE(yts,pred)),'\tMAE:',MAE(yts,pred), '\tTime: ', (end - start))
plt.figure(figsize = (10,7))
plt.plot(pred, label = 'Actual MET')
plt.plot(yts, label = 'Predicted MET')
plt.xlabel('Instance'); plt.ylabel('Normalized MET'); plt.legend()
plt.show()
# # ROC Curve
from sklearn.metrics import roc_curve, auc, accuracy_score, precision_score, recall_score
# To construct a ROC curve we need to convert this regression problem into a classification problem. Since we normalized our data, we know it to be centered around 0, thus I will classify MET values above 0 as true, or 1, and MET values below 0 as false, or 0.
yts_class = yts > 0
pred_class = pred > 0
# yts_class = np.zeros(len(yts))
# pred_class = np.zeros(len(pred))
# divs = [-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1]
# for i in range(len(divs)-1):
# yts_class += i * ((divs[i] < yts) * (yts < divs[i+1]))
# pred_class += i * ((divs[i] < pred) * (pred < divs[i+1]))
print('Accuracy:', accuracy_score(yts_class, pred_class))
print('Precision:', precision_score(yts_class, pred_class))
print('Recall:', recall_score(yts_class, pred_class))
# Compute micro-average ROC curve and ROC area
import matplotlib
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
matplotlib.rc('font', **font)
fpr, tpr, _ = roc_curve(yts_class, pred_class)
roc_auc = auc(fpr, tpr)
plt.figure(figsize = (10,7))
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (Area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.02, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.show()
# # Confusion Matrix
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
import matplotlib.cm as cm
# Plotting confusion matrix
C = confusion_matrix(yts_class, pred_class, labels = [0,1], normalize = None)
disp = ConfusionMatrixDisplay(confusion_matrix = C, display_labels = ['0','1'])
disp.plot(values_format = 'd', cmap = cm.Oranges)
plt.show()
| model_code/xgboost_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="62dboqxD3Xd0"
#PREDICT FUEL EFFICIENCY
# Use seaborn for pairplot
# !pip install -q seaborn
# Use some functions from tensorflow_docs
# !pip install -q git+https://github.com/tensorflow/docs
# + id="7J5rwWHz4Jyz"
#IMPORT LIBRARIES
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from numpy import sqrt
# Make NumPy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
# + id="52jfY48d4KQl" colab={"base_uri": "https://localhost:8080/"} outputId="a25a4591-f527-46b1-9a3a-b7555f0109c2"
#IMPORT TENSORFLOW AND KERAS
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
import tensorflow_docs as tfdocs
import tensorflow_docs.plots
import tensorflow_docs.modeling
# + id="JkVBxdIV4Kt5" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="20a87d59-3518-43e3-fe80-c24dad983a41"
#GET THE DATA
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
# + id="t6p5ynqb3YEI" colab={"base_uri": "https://localhost:8080/", "height": 203} outputId="1ae5fbfa-e25a-4cd2-9326-abbc68cf5922"
#IMPORT THE DATASET USING PANDAS
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
# + id="DbCwi0yJ3Yk7" colab={"base_uri": "https://localhost:8080/"} outputId="04fc0720-1453-4073-ae93-e3fdf7bd4269"
#CLEAN THE DATA
dataset.isna().sum()
# + id="1S8_sth73ZFK"
#To keep this initial tutorial simple drop those rows.
dataset = dataset.dropna()
# + id="I--pBC5N5uBF" colab={"base_uri": "https://localhost:8080/"} outputId="a2682c9f-faad-4091-91c6-185bb8064232"
#The "Origin" column is really categorical, not numeric. So convert that to a one-hot:
dataset['Origin'] = dataset['Origin'].map({1: 'USA', 2: 'Europe', 3: 'Japan'})
# + id="voYRhLyF5uYY" colab={"base_uri": "https://localhost:8080/", "height": 203} outputId="6172c932-0760-44f6-f30f-34bc077b5323"
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
dataset.tail()
# + id="B7ZnXJlF5vEQ"
#SPLIT DATASET INTO TRAINNG AND TESTING
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# + id="E-ew4ORq5vvb" colab={"base_uri": "https://localhost:8080/", "height": 744} outputId="8958205e-157a-4db7-87f2-b7e29f528705"
#INSPECT THE DATA
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
# + id="C_R93YnS5wFT" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="3e218ac5-0d79-418c-8f26-2107a47b63f6"
#OVERALL STATISTICS
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
# + id="_aFtywBp5wxa"
#Separate the target value—the "label"—from the features. This label is the value that you will train the model to predict
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
# + colab={"base_uri": "https://localhost:8080/"} id="Qa7qGbc_FAWZ" outputId="88cc6a03-928f-4a71-b027-211032518c0b"
#Normalization layer
normalizer = tf.keras.layers.Normalization(axis=-1)
normalizer.adapt(np.array(train_features))
print(normalizer.mean.numpy())
# + colab={"base_uri": "https://localhost:8080/"} id="1yBMB2XwFPrQ" outputId="0e43bcc0-1ef2-486c-dca6-d8e39344bef0"
#When the layer is called, it returns the input data, with each feature independently normalized
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
# + id="dtePvjQX6rqt"
#NORMALIZE THE DATA
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# + [markdown] id="0mdCcYZ33p4x"
# LINEAR REGRESSION WITH ONE INPUT
#
#
#
# + id="s2W3ls5N2AAy"
#create a NumPy array made of the 'Horsepower' features
horsepower = np.array(train_features['Horsepower'])
horsepower_normalizer = layers.Normalization(input_shape=[1,], axis=None)
horsepower_normalizer.adapt(horsepower)
# + colab={"base_uri": "https://localhost:8080/"} id="yT2C9Y4S2HoZ" outputId="c8ee4726-610b-477e-ee63-09f73093ebae"
#Keras sequential model for linear regression
horsepower_model = tf.keras.Sequential([
horsepower_normalizer,
layers.Dense(units=1)
])
horsepower_model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="8stmrSFr2RTo" outputId="df575f3d-46c1-47ce-aecc-a480f33ae98d"
#Take a batch of 10 examples from the training data and call model.predict on it
horsepower_model.predict(horsepower[:10])
# + id="kquCokfg2uvI"
#Compile loss and optimizer
horsepower_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
# + colab={"base_uri": "https://localhost:8080/"} id="bwwkIWw12yjA" outputId="044b253a-137d-48b0-bf4a-5dfd5e64aaea"
#Keras Model.fit to execute the training for 100 epochs
# %%time
history = horsepower_model.fit(
train_features['Horsepower'],
train_labels,
epochs=100,
# Suppress logging.
verbose=0,
# Calculate validation results on 20% of the training data.
validation_split = 0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="qJqUygya25lI" outputId="b499b53b-2eec-4101-da89-c660416b29ed"
#Visualize the model's training progress using the stats stored in the history object
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
# + id="1BqJd9Yu253g"
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
# + id="8Tvr0flQ3DSp"
#Collect the results on the test set for later
test_results = {}
test_results['horsepower_model'] = horsepower_model.evaluate(
test_features['Horsepower'],
test_labels, verbose=0)
# + id="m_ibxlOU3DiR"
#model's predictions
x = tf.linspace(0.0, 250, 251)
y = horsepower_model.predict(x)
# + id="lZZGxdsk3J6h"
def plot_horsepower(x, y):
plt.scatter(train_features['Horsepower'], train_labels, label='Data')
plt.plot(x, y, color='k', label='Predictions')
plt.xlabel('Horsepower')
plt.ylabel('MPG')
plt.legend()
# + [markdown] id="pUmA4CWI5PHp"
# LINEAR REGRESSION WITH MULTIPLE INPUTS
# + id="RLh4N32j5lyB"
#two-step Keras Sequential model
linear_model = tf.keras.Sequential([
normalizer,
layers.Dense(units=1)
])
# + colab={"base_uri": "https://localhost:8080/"} id="UlQ9_uN_5mBo" outputId="a6183d9a-5b13-4021-9b55-8caed63d2b26"
#Take a batch of 10 examples from the training data and call model.predict on it
linear_model.predict(train_features[:10])
# + colab={"base_uri": "https://localhost:8080/"} id="sE9j-ktQ5mPw" outputId="c0438d41-94e8-4139-8398-12b77cfd2333"
linear_model.layers[1].kernel
# + id="yroeGDGJ5mdQ"
#Configure the model with Keras Model.compile and train with Model.fit for 100 epochs
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
# + colab={"base_uri": "https://localhost:8080/"} id="mifW4amC5mqY" outputId="7807206c-4949-416d-9d35-35fbf5ad64c9"
# %%time
history = linear_model.fit(
train_features,
train_labels,
epochs=100,
# Suppress logging.
verbose=0,
# Calculate validation results on 20% of the training data.
validation_split = 0.2)
# + id="OHceSpLN6W3h"
#Collect the results on the test set for later
test_results['linear_model'] = linear_model.evaluate(
test_features, test_labels, verbose=0)
# + [markdown] id="Eaq7JwIT6vEp"
# REGRESSION WITH DEEP NEURAL NETWORK
# + id="0x2EAKlh6sJc"
#BUILD THE MODEL
def build_and_compile_model(norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
# + [markdown] id="AQpvkHoI7QmR"
# REGRESSION USING DNN AND MULTIPLE INPUTS
# + id="iw4ELwML6shO" colab={"base_uri": "https://localhost:8080/"} outputId="83777a2f-c5bd-494e-a269-972518d59c3f"
dnn_model = build_and_compile_model(normalizer)
dnn_model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="3D8B5PaZ7jCh" outputId="e5e381f5-1556-4740-83fc-8d41a9b30f91"
# %%time
history = dnn_model.fit(
train_features,
train_labels,
validation_split=0.2,
verbose=0, epochs=100)
# + id="g3q3jBgi7p6A"
#Collect the results on the test set
test_results['dnn_model'] = dnn_model.evaluate(test_features, test_labels, verbose=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="oZdR6OF_8MIC" outputId="b799565a-8581-447c-b59b-39406827bb89"
#TEST SET PERFORMANCE
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="o-hRSWn28Ml4" outputId="2a3343e9-63d9-44cd-d5c4-bce39c53dc3f"
#Make predictions using Model.predict
test_predictions = dnn_model.predict(test_features).flatten()
a = plt.axes(aspect='equal')
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
lims = [0, 50]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="1i0zjiBd9GLB" outputId="fbcd8001-0b94-4aa7-e126-63b5a962e517"
#Error Distributions
error = test_predictions - test_labels
plt.hist(error, bins=25)
plt.xlabel('Prediction Error [MPG]')
_ = plt.ylabel('Count')
# + colab={"base_uri": "https://localhost:8080/"} id="ugsbxpdc9QN5" outputId="a071b9be-589e-428f-a901-c4cd367aff4a"
#Saving model for future use
dnn_model.save('dnn_model')
# + id="ZKzAfNxw-DaA"
#same output after model reloading
reloaded = tf.keras.models.load_model('dnn_model')
test_results['reloaded'] = reloaded.evaluate(
test_features, test_labels, verbose=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 172} id="Gvlj6PtH-WCB" outputId="ee7a432c-cbf2-450f-b72a-1eeeaf321ce3"
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
| Datascience_With_Python/Deep Learning/Projects/Predicting Fuel Efficiency/predicting_fuel_efficiency.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/xhang24/xiaotong/blob/master/src/h303.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="R3uJxJbwtZjp" colab_type="text"
# Consider an european option with
# - call type
# - strike = 110
# - maturity = T
# underlying a Gbm stock with
# - initial: 100
# - interest rate: 4.75%
# - vol ratio: $\sigma$
#
# We denote this bsm price by $f(\sigma, T)$.
#
# - Let $\sigma = 20\%$ fixed. plot $T \mapsto f(0.2, T)$ when $T$ is ranging over $(0.5, 2)$.
#
# - Let $T = 1$ fixed. plot $\sigma \mapsto f(\sigma, 1)$ when $\sigma$ is ranging over $(.05, 0.5)$
#
# - Describe your observations. Do you think the same behavior is also true for put?
#
# - Could you prove your observations?
# + id="8Z78OFycRorh" colab_type="code" colab={}
class VanillaOption:
def __init__(
self,
otype = 1, # 1: 'call'
# -1: 'put'
strike = 110.,
maturity = 1.,
market_price = 10.):
self.otype = otype
self.strike = strike
self.maturity = maturity
self.market_price = market_price #this will be used for calibration
def payoff(self, s): #s: excercise price
otype = self.otype
k = self.strike
maturity = self.maturity
return max([0, (s - k)*otype])
# + id="-cYwGeYSkm98" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="64592ba8-e5a7-4b93-9530-57fd2457e149"
class Gbm:
def __init__(self,
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2
):
self.init_state = init_state
self.drift_ratio = drift_ratio
self.vol_ratio = vol_ratio
def bsm_price(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2)
* maturity) / (sigma * np.sqrt(maturity))
d2 = d1 - sigma * np.sqrt(maturity)
return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis
- otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2))
# + id="zviwEMQFQylN" colab_type="code" outputId="b12295f3-c9b7-4850-9337-816edf3197ec" colab={"base_uri": "https://localhost:8080/", "height": 541}
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
t=np.arange(0.5, 2.1)
voption=VanillaOption(otype=1, strike=110, maturity=t, market_price=10)
pr=Gbm()
value=pr.bsm_price(voption)
plt.plot(t, value)
plt.xlabel('time')
plt.ylabel('value')
plt.show()
sigma = np.linspace(0.05,0.5)
pr1 = Gbm(100., .0475, sigma)
voption2 = VanillaOption(otype=1, strike=110, maturity=1, market_price=10)
value2 = pr1.bsm_price(voption2)
plt.plot(sigma, value2)
plt.xlabel('sigma')
plt.ylabel('value')
plt.show()
# + id="nvN1g9e9Tb5w" colab_type="code" colab={}
| src/h303.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''.venv'': venv)'
# name: python3
# ---
# + slideshow={"slide_type": "slide"}
import course;course.header()
# + [markdown] slideshow={"slide_type": "slide"}
# # Pandas level 1
# Data wrangling 101
# + [markdown] slideshow={"slide_type": "-"}
# I'd like to say Pandas is numpy on steroids but it is actually much more.
#
# Pandas is the data science solution for Python and it build on top of the powerful numpy module.
# However, Pandas offers elements that are much more intuitive or go beyond what numpy has ever provided.
# Nevertheless, numpy is more performant in some cases (by a lot, yet remember when to optimize!)
#
# The perfect is the dead of the good.
# -- <NAME>
#
# Pandas was create [Wes McKinney](https://wesmckinney.com/pages/about.html) in the early 2008 at AQR capital management and I can recommend "Python for Data Analysis" from Wes, which was published via O'Reilly and "Pandas for Everyone" by <NAME>. The following Pandas chapters are inspired by the books.
# + [markdown] slideshow={"slide_type": "slide"}
# Pandas offers the two basic data structures
# * Series
# * Dataframes
#
# -
import pandas as pd
c = pd.Series(
[12, 13, 14, 121],
index=['r1', 'r2', 'r3', 'r4']
)
c
# Selecting from Series works like a dict :)
c['r2']
mask = c >= 13
mask
c[mask]
# Masks can be additive!
mask2 = c < 20
c[mask & mask2]
c * 10
# works also with vectorized math operations
import numpy as np
np.exp(c)
# Remember to use numpy functions as much as possible so data remains on the "C side". More below!
# Operations conserve index!
#
# Series are like ordered Dicts!
'r1' in c
# np.nan is the missing value indicator
d = pd.Series(
{
'r1': np.nan,
'r2': 0.2,
'r3': 0.2,
'r4': 0.4
}
)
d
# ### Which values are nan?
d.isna() # returns a mask!
# inverting with ~!
~d.isna()
d.notnull()
# ## indices are aligned automatically!
c
d = pd.Series(
[10,20,30,40],
index=['r2', 'r3', 'r4', 'r5']
)
d
c + d
# ## Renaming index
d.index = ['r1', 'r2', 'r3', 'r4'] # now the indices are the same in c and d!
c + d
# Naming things will help you to get your data organised better. Explicit is better than implicit! And remember to choose your names variable wisely - you will code read often than you write.
d.index.name = "variable"
d.name = "counts"
d
d.reset_index()
# Resetting index turns the index into a series so now we hav a DataFrame with two series!
type(d.reset_index())
# + [markdown] slideshow={"slide_type": "slide"}
# # Data frames
# Data frames are the pandas 2d data containers (if there is only one index dimension).
# In principle data frames are a list of Series, whereas each row is a series.
# -
df = pd.DataFrame(
[
c,
d, # this one we named :)
pd.Series([100,102,103,104], index=['r2', 'r3', 'r4', 'r5'])
]
)
df
# accessing a value
df.loc['counts', 'r2']
# Note: How pandas aligns your data automatically.
#
# If you want each series to be treated as column, just transpose
# DataFrames can be constructed in many different ways, see docu for more details
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html?highlight=dataframe#pandas.DataFrame
df = df.T
df
# Renaming columns in a data frame
df.columns = ['count1', 'count2', 'count3']
df
# Dataframes can equally be named, for your sanity, name them :)
df.columns.name = "Counts"
df.index.name = "variable"
df
# Now that you feel happy in the pandas world, some modules/functions require numpy arrays, how do you convert them ?
np_df = df.values
np_df
type(np_df)
# + [markdown] slideshow={"slide_type": "slide"}
# If you need to work "longer" on the numpy side, I suggest to transform the pandas dataframe to a numpy recarray, as names are preserved;
# -
# np_df = df.values #
np_df = df.to_records()
np_df
np_df['variable']
np_df[0]
np_df[0][2]
# ## C-side and Python side
# **Note**:
# Regular Python floats live in the Python world - Numpy and Pandas live in the "C world", hence their fast vectorized operations. If you can avoid it, don't cast between the worlds!
long_series = pd.Series(
np.random.randn(1000000),
)
# %%timeit -n 1
a = long_series.to_list() # to python list!
print(f"a is a {type(a)} now!")
pd.Series(a)
# %%timeit -n 1
a = long_series.to_numpy()
print(f"a is a {type(a)} now!")
pd.Series(a)
# # Operations between DataFrame and Series
df_small = pd.DataFrame([c, d])
df_small
c
df_small - c
# Next time you want to normalize each row of a data frame, one can define the correction factors as a series and just e.g. subtract it.
df
df.rename(
columns={'count1':'count_reference'},
inplace=True
)
df
# subselecting a set of columns!
df[["count2", 'count3']]
# **Note:**
# This only creates a view of the data!
# # Pandas IO
# Pandas comes with a wide array of input output modules see
# https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html
#
# **NOTE:** reading xlsx is _much_ slower than csv
# Your request: Scraping websites!
#
# Today with Pandas scraping wikipedia. In particular the oldest universities!
#
# Alternatively beautiful soup https://www.crummy.com/software/BeautifulSoup/bs4/doc/ or Scrapy https://scrapy.org/
url = "https://en.wikipedia.org/wiki/List_of_oldest_universities_in_continuous_operation"
# Lets bail out of the SSL context for the sake of this class :)
#
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
dfs = pd.read_html(url) # do you get SSL: CERTIFICATE_VERIFY_FAILED ?
len(dfs)
dfs[0].head()
udf = dfs[0]
udf.columns
# Multi index makes pandas very powerful but it takes time to get used to them, see more below.
#
# For now let's get rid of them...
udf.columns = [ e[0] for e in udf.columns ]
udf.head()
udf.columns = ['Year', 'University', 'H-Location', 'G-Location', 'Notes' ]
udf.head()
# Most of the time, this data needs cleanup, e.g. year should most optimally be a date or at least a year.
# ## Gather some basic information around the dataframe
udf.describe()
udf.info()
# Cleaning up data takes a lot time and needs to be done diligently!
#
# Let's clean-up the Year column
#
# Accessing the str properties!
udf['Year'].str.match(r'^(?P<year>[0-9]{4})')
udf.loc[15]
udf['year'] = udf.Year.str.extract(r'(?P<year>[0-9]{4})') # regex
udf.head()
udf.loc[15]
udf.shape
# (rows, columns)
# One cannot visualize all columns straight away in jupyter :( However redefining some options helps!
pd.set_option("max_columns", 2000)
# # Sorting
udf.head()
udf.sort_values(['year'])
# Sort_values has kwargs like ascending = True|False and values are defined by a list, ie sort first by, then by ...
udf.sort_values(['H-Location','year'])
# Let split the G-location into city and country!
tmp_df = udf['G-Location'].str.split(",")
display(tmp_df.head()) # not quite what we want .. we want two columns!
# How to get two columns?
tmp_df = udf['G-Location'].str.split(",", expand=True)
tmp_df.columns = ['G-City', 'G-Country']
tmp_df
udf = udf.join(tmp_df)
# there are many options to join frames
udf.head()
# # Deleting things
udf.head()
udf.drop(1)
udf.head(3)
udf.drop(columns=['G-Location', 'Year'])
# Dataframe or series are not automatically "adjusted" except you use `inpace=True`
udf
udf.drop(columns=['G-Location', 'Year'], inplace=True)
udf
# + [markdown] slideshow={"slide_type": "slide"}
# # slicing and dicing
# -
udf[:3] # df[:'r3'] works as well
# selecting one column!
udf['G-Country']
udf.describe()
# selecting one row
udf.loc[1]
udf.info()
# mask also work on df!
mask = udf['year'] < 1400
mask.head(10)
# casting columns into data types
udf.year = udf.year.astype(int)
_udf = udf.convert_dtypes()
_udf.info()
# mask also work on df!
mask = udf.year < 1400
mask.head(10)
udf[mask]
udf[udf['year'] < 1300] # reduces the data frame, again note! that is just a view, not a copy!
udf[udf['year'] < 1300].loc[1]
udf[udf['year'] > 1300].loc[1]
udf[udf['year'] > 1300].head(3)
# How would I know which index is the first one in my masked selection ?
# Answer: you don't need to if you use iloc! :)
udf[udf['year'] > 1300].iloc[0]
# ## more natural query - or isn't it?
udf.query("year > 1300").head(5)
udf.query("1349 > year > 1320")
# Using local variables in queries
upper_limit = 1400
udf.query("@upper_limit > year > 1320")
# ## Find the maximum for a given series or dataframe
udf['year'].idxmax()
# ## Unique values and their count
udf['G-Country'].unique()
udf['G-Country'].nunique()
udf['G-Country'].value_counts()
_udf = udf.set_index('University')
# Grab some ramdom rows
_udf.sample(5)
_udf.loc['Ruprecht Karl University of Heidelberg', ['Notes', 'year']]
_udf.loc['Ruprecht Karl University of Heidelberg', :]
# ## Done with Basics!
# Take a look at the cheat sheet for a summary
# https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf
# # Hierarchical indexing
s = pd.Series(
np.random.randn(5),
index = [
['p1','p1','p2','p2','p3'],
['a','b','a','d','a']
]
)
s
s.index
s.index.names = ['probability', 'type']
s
s['p1']
s[:, 'a'] # lower level
s2 = s.unstack()
print(type(s2))
s2
s3 = s2.stack()
print(type(s3))
s3
# ## Multindex with Dataframes
df = pd.DataFrame(
[
c,
c * 20,
d,
np.exp(d),
pd.Series(np.random.randn(4), index=['r2', 'r3', 'r4', 'r5'])
],
index = [
['p1','p1','p2','p2','p3'],
['a','b','a','d','a']
]
)
df.index.names = ['probability', 'type']
df
df = df.fillna(0)
df
# **Note**:
# You can create multi indeces from a regular dataframe!
df2 = df.reset_index()
df2
df2.set_index(['probability', 'type'])
df2 = df.swaplevel('probability', 'type')
df2
df2.sort_index(axis=0, level=0, inplace=True)
df2
# ## Natural slicing using `pandas.IndexSlice` objects
idx = pd.IndexSlice
df2.loc[
idx[:, ["p1", "p2"]],
:
]
# ## long and wide formats
# Long formats - easy to read and to handle for computers - each variable has its own column
#
# Wide formats - easy to read for humans - each observation has its own row
df3 = df2.reset_index()
df3.sort_values(["probability", "type"], inplace=True)
df3
df4 = df3.melt(
id_vars=['type','probability'],
var_name='r_stage',
value_name='score'
)
print(df4.shape)
df4.sort_values(["type", "probability"], inplace=True)
df4.head(7)
# Think of selecting data, for example for plotting that should have the following criteria
# * probability == p1
# * r_stage in [r2, r3]
#
#
# much easier in long format
# +
# going back to the more human friendlier version ! :)
df5 = df4.pivot_table(index=['type', 'probability'], columns='r_stage', values="score")
df5
# -
| notebooks/06.a.introduction_to_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/xhang24/20s_ma573/blob/master/src/hw8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="oJ_--dWY7rua" colab_type="code" colab={}
import numpy as np
from scipy.linalg import solve
def solve_function(e,interval_num):
r=e*(interval_num**2)
t=r
s=2*r+1
Rhf=np.linspace(0, 1, num=interval_num+1, endpoint=True)
Rhf[-1]=0
Lh=np.zeros((interval_num+1,interval_num+1))
Lh[0,0]=1
Lh[-1,-1]=1
for i in range(interval_num-1):
Lh[i+1,i]=-r
Lh[i+1,i+1]=s
Lh[i+1,i+2]=-t
uh=solve(Lh,Rhf)
return uh
# + id="W3maU8sO78LN" colab_type="code" colab={}
# + id="zznr7Uay73q3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d20857ff-2b43-45ac-de6a-7db9da867894"
solve_function(10**(-10),5)
# + id="5O4qLhFO8G-d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0f2136fb-af31-414e-d9ec-093a40e5e446"
CFD=solve_function(10**(-10),9)
CFD
# + id="nLzM2R8e8KZE" colab_type="code" colab={}
error_array=np.zeros(10)
Rhf=np.linspace(0, 1, num=10, endpoint=True)
for i in range(10):
x=Rhf[i]
e=10**(-10)
u=x-(np.exp((x-1)/np.sqrt(e))-np.exp(-(x+1)/np.sqrt(e)))/(1-np.exp(-2/np.sqrt(e)))
error=np.abs(u-CFD[i])
error_array[i]=error
# + id="lrjnhXAp8MM2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="f7cf8981-90aa-4ecc-8653-0217f8b070ce"
error_array
# + id="6JYNTmEXtO8T" colab_type="code" colab={}
# + [markdown] id="JGvNnG0wtSmK" colab_type="text"
# and same as the proof said the cfd looks better than fem.
# Compared error array with the FDE error shown in the report.
| src/hw8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="PviKho2fD7y_" colab_type="text"
# Copyright © 2020, Weta Digital, Ltd.
#
# SPDX-License-Identifier: Apache-2.0
#
# # PhysLight Imaging
#
# Here's a very simple idealized "renderer" that calculates the sRGB (linear) pixel values given a $2.5lx$ uniform environment light illuminating a 100% diffuse reflector.
#
# We'll set default camera parameters according to the exposure equation and verify that our output pixel values are exactly 1.
# + id="H6MexaUTEvIw" colab_type="code" colab={}
# !pip install -q colour-science
# !pip install -q matplotlib
import colour
from colour import SpectralShape, CMFS
import numpy as np
import math
# + id="gfvMasXqE0bv" colab_type="code" colab={}
# Convert the given SpectralDistribution to XYZ using CIE 1931 2-degree,
# optionally normalizing such that Y=1.
# Note that we define this here rather than using colour's in-built functions
# for the sake of clarity
def spectral_to_XYZ(sd, normalize=False):
cmf = CMFS['CIE 1931 2 Degree Standard Observer'].copy()
x_bar = cmf.align(SpectralShape(360, 780, 1)).values[:,0]
y_bar = cmf.align(SpectralShape(360, 780, 1)).values[:,1]
z_bar = cmf.align(SpectralShape(360, 780, 1)).values[:,2]
s = sd.copy().align(SpectralShape(360, 780, 1))
nm = s.wavelengths
x = np.trapz(x_bar * s.values, nm)
y = np.trapz(y_bar * s.values, nm)
z = np.trapz(z_bar * s.values, nm)
if normalize:
return [x, y, z] / y
else:
return [x, y, z]
# + [markdown] id="xaJkpeENFpKw" colab_type="text"
# We want to check our working against photometric quantities, to do this we'll want to normalize our light source such that its spectral distribution represents a luminance of $1 nit$. We do this by dividing by:
#
# $$K_m \int_{360nm}^{780nm} S(\lambda) \bar{y}(\lambda) d\lambda$$
#
#
# where $K_m = 683lm/W$.
#
# + id="BtwnIj-jF9Tc" colab_type="code" colab={}
def to_photometric(sd):
return spectral_to_XYZ(sd)[1] * 683
d65 = colour.ILLUMINANTS_SDS['D65'].copy()
spd_light = d65 / to_photometric(d65)
# + [markdown] id="xLmTR0GGGDih" colab_type="text"
# Our setup is $2.5lx$ incident on a 100% diffuse reflector. So exitant luminance from the surface, $L_v$ will be $\frac{2.5}{\pi} nit$
# + id="dpF4F1ELGa2C" colab_type="code" colab={}
L_v = 2.5 / math.pi
# L is radiance scaled to $L_v nit$
L = spd_light * L_v
# + [markdown] id="diRNCuocGqsI" colab_type="text"
# EV settings from Wikipedia $2.5lx$ $EV0$ example.
#
#
# https://en.wikipedia.org/wiki/Exposure_value#Relationship_of_EV_to_lighting_conditions
#
# When $EV=0$ (i.e. $2.5lx$ assuming $C=250$), then we should get a "correct" exposure
# with these camera settings
# + id="JzGWNOgGGoa-" colab_type="code" colab={}
t = 1.0
N = 1.0
S = 100.0
C = 250.0
K_m = 683.0
# + [markdown] id="XXDQf1pRHFfb" colab_type="text"
# Convert radiance entering the camera system to exposure in $W m^{-2} nm^{-1} s$ (ish - we're actually representing some sort of output signal from the sensor here rather than exposure at the sensor, but it's easier to think of it this way)
#
# + id="XkVFhoj3G7fC" colab_type="code" colab={}
imaging_ratio = (math.pi * t * S * K_m) / (C * N * N)
H = L * imaging_ratio
# + [markdown] id="zejGP096HDAO" colab_type="text"
# Convert to XYZ then to linear sRGB. We get back to exactly 1 in RGB by dividing by the normalized RGB whitepoint.
# + id="R17kphqCHhSd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b3927b7f-bda7-4ef3-c80a-f0d4d2084e6b"
H_xyz = spectral_to_XYZ(H)
sRGB = colour.models.sRGB_COLOURSPACE
# We normalize when calculating the white point since we just want to affect
# colour, not brightness
white_xyz = spectral_to_XYZ(spd_light, normalize=True)
white_rgb = np.dot(sRGB.XYZ_to_RGB_matrix, white_xyz)
H_rgb = np.dot(sRGB.XYZ_to_RGB_matrix, H_xyz) / white_rgb
print('H_rgb', H_rgb)
assert np.array_equal(np.round(H_rgb, 15), [1.0, 1.0, 1.0])
| physlight_imaging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bertviz.pytorch_transformers_attn import XLNetTokenizer, XLNetModel
from bertviz.model_view_xlnet import show
# + language="javascript"
# IPython.OutputArea.auto_scroll_threshold = 9999;
# require.config({
# paths: {
# d3: '//cdnjs.cloudflare.com/ajax/libs/d3/5.7.0/d3.min',
# jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min',
# }
# });
# -
model = XLNetModel.from_pretrained('xlnet-large-cased')
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
text = "At the store she bought apples, oranges"
show(model, tokenizer, text)
| model_view_xlnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2 (SageMath)
# language: python
# name: python2
# ---
import xy_ODE as p1
import unstable_ODE as p2
# # Homework 6
#
# ### <NAME>
# ### 3/19/2016
# ## xy_ODE
#
# This section solves a differential equation by the Euler's Forward method over the interval 0 to 4 and then plots the solutions for 5, 17, and 401 fixed points (blue, red, and green respectively) as well as the analytic solution (black). As is readily apparent, 5, and even 17 fixed points are nowhere near sufficient in order to produce the required accuracy. 401 points is a good approximation, but more points would still be required for exact correspondence. That's why this method isn't great, especially in the days before computers. Can you picture Euler going through the process of calculating 401 points? I can. Mathematicians are odd like that.
p1.graph()
# We can also use sympy to solve for the differential equation numerically, as demonstrated below. Note that the value C1 would depend on the value of epsilon given for the equation.
p1.symbolic_solver()
# ## unstable_ODE
#
# The crux of this implementation is that for the differential equation y' = Cy, the separation yields a recursive relationship of multiplication by a constant value, the likes of which always renderes itself such that the value of y_k is always equal to (1 + Cdx)^k * y_0, as this implements this simple multiplication by a constant which the problem calls for.
#
# If the value of Cdx is such that it is less than -1 but greater than -2, the multiplied factor will be between 0 and -1, and will thus oscillate and decrease. This is simply a geometric series. Here, we have set C to -1, and thus Delta t values between 1 and 2 exclusive will produce such behavior. Delta t values greater than 2 will oscillate in sign, but as the absolute value of the argument they create is greater than 1, they will grow with each time steps, without bound.
p2.tabler()
# Notice how as the other values go decreasing in magnitude, the Delta t = 2.75 case is obviously in it to win it big.
#
# Similarly to the last example on xy_ODE, we can also solve this symbolically using sympy.
p2.symbolic_solver()
| Homework_6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py37]
# language: python
# name: conda-env-py37-py
# ---
# + hide_input=false
from preamble import *
# %matplotlib inline
# + [markdown] hide_input=false
# ## Introduction
# ### Why Machine Learning?
# #### Problems Machine Learning Can Solve
# -
# #### Knowing Your Task and Knowing Your Data
# ### Why Python?
# ### scikit-learn
# #### Installing scikit-learn
# ### Essential Libraries and Tools
# #### Jupyter Notebook
# #### NumPy
# + uuid="e2b8e959-75f0-4fa9-a878-5ab024f89223"
import numpy as np
x = np.array([[1, 2, 3], [4, 5, 6]])
print("x:\n{}".format(x))
# -
# #### SciPy
# +
from scipy import sparse
# Create a 2D NumPy array with a diagonal of ones, and zeros everywhere else
eye = np.eye(4)
print("NumPy array:\n", eye)
# -
# Convert the NumPy array to a SciPy sparse matrix in CSR format
# Only the nonzero entries are stored
sparse_matrix = sparse.csr_matrix(eye)
print("\nSciPy sparse CSR matrix:\n", sparse_matrix)
data = np.ones(4)
row_indices = np.arange(4)
col_indices = np.arange(4)
eye_coo = sparse.coo_matrix((data, (row_indices, col_indices)))
print("COO representation:\n", eye_coo)
# #### matplotlib
# + uuid="30faf136-0ef7-4762-bd82-3795eea323d0"
# %matplotlib inline
import matplotlib.pyplot as plt
# Generate a sequence of numbers from -10 to 10 with 100 steps in between
x = np.linspace(-10, 10, 100)
# Create a second array using sine
y = np.sin(x)
# The plot function makes a line chart of one array against another
plt.plot(x, y, marker="x")
# -
# #### pandas
# + uuid="ad1b06f7-e03a-4938-9d59-5bb40e848553"
import pandas as pd
# create a simple dataset of people
data = {'Name': ["John", "Anna", "Peter", "Linda"],
'Location' : ["New York", "Paris", "Berlin", "London"],
'Age' : [24, 13, 53, 33]
}
data_pandas = pd.DataFrame(data)
# IPython.display allows "pretty printing" of dataframes
# in the Jupyter notebook
display(data_pandas)
# -
# Select all rows that have an age column greater than 30
display(data_pandas[data_pandas.Age > 30])
# #### mglearn
# ### Python 2 versus Python 3
# ### Versions Used in this Book
# +
import sys
print("Python version:", sys.version)
import pandas as pd
print("pandas version:", pd.__version__)
import matplotlib
print("matplotlib version:", matplotlib.__version__)
import numpy as np
print("NumPy version:", np.__version__)
import scipy as sp
print("SciPy version:", sp.__version__)
import IPython
print("IPython version:", IPython.__version__)
import sklearn
print("scikit-learn version:", sklearn.__version__)
# -
# ### A First Application: Classifying Iris Species
# 
# #### Meet the Data
from sklearn.datasets import load_iris
iris_dataset = load_iris()
print("Keys of iris_dataset:\n", iris_dataset.keys())
print(iris_dataset['DESCR'][:193] + "\n...")
print("Target names:", iris_dataset['target_names'])
print("Feature names:\n", iris_dataset['feature_names'])
print("Type of data:", type(iris_dataset['data']))
print("Shape of data:", iris_dataset['data'].shape)
print("First five rows of data:\n", iris_dataset['data'][:5])
print("Type of target:", type(iris_dataset['target']))
print("Shape of target:", iris_dataset['target'].shape)
print("Target:\n", iris_dataset['target'])
# #### Measuring Success: Training and Testing Data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
print("X_train shape:", X_train.shape)
print("y_train shape:", y_train.shape)
print("X_test shape:", X_test.shape)
print("y_test shape:", y_test.shape)
# #### First Things First: Look at Your Data
# create dataframe from data in X_train
# label the columns using the strings in iris_dataset.feature_names
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
# create a scatter matrix from the dataframe, color by y_train
pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15),
marker='o', hist_kwds={'bins': 20}, s=60,
alpha=.8, cmap=mglearn.cm3)
# #### Building Your First Model: k-Nearest Neighbors
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
# #### Making Predictions
X_new = np.array([[5, 2.9, 1, 0.2]])
print("X_new.shape:", X_new.shape)
prediction = knn.predict(X_new)
print("Prediction:", prediction)
print("Predicted target name:",
iris_dataset['target_names'][prediction])
# #### Evaluating the Model
y_pred = knn.predict(X_test)
print("Test set predictions:\n", y_pred)
print("Test set score: {:.2f}".format(np.mean(y_pred == y_test)))
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
# ### Summary and Outlook
# +
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
| BookCode/introduction_to_ml_with_python/01-introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Encoding a categorical variable using Pandas
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_excel('/Users/swaruptripathy/Desktop/Data Science/datasets/stark_data.xlsx')
dataset.head()
dataset.shape
X_gender = dataset.iloc[:,2].values
X_gender
#Converting categorical features
X_dummy1 = pd.get_dummies(X_gender)
X_dummy1
X_dummy2 = pd.get_dummies(X_gender, drop_first=True)
X_dummy2
| data_preprocessing - Categorical Encoding Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pyremo-tests]
# language: python
# name: conda-env-pyremo-tests-py
# ---
# # Cmorization
# The `pyremo` package includes a cmorization module that should be useful to rewrite remo output for publication. The module is using the official climate model output rewriter ([CMOR3 API](https://github.com/PCMDI/cmor)) provided by [PCMDI](https://pcmdi.llnl.gov/). The goal of the cmorization process is to provide climate model data output based on the Climate and Forecast (CF) standards to make it easily comparable to other model output.
# The model output data not neccessarily altered (except maybe for converting units) but it is rather
# organized in a certain manner. For example, the hourly output of remo might be resampled to 3-hourly and daily instantaneous and mean values. Also the variables names and attributes are harmonized to provide standard names and units. Additionally, the different output variables are stored in a certain directory tree that is required for uploading it to an ESGF node.
# + [markdown] tags=[]
# ## Examples
# We will show some simple examples here of how the `pyremo` cmorization module can be used. Let's load an example remo dataset that contains monthly means for the year 2000
# +
import xarray as xr
import pyremo as pr
ds = pr.tutorial.open_dataset("remo_EUR-11_TEMP2_mon")
ds
# -
# The dataset is slightly larger than the official CORDEX domain according to archive specifications. Also the variable name is not CF standard and global attributes are missing. To cmorize this dataset, we use the pyremo cmor module. We also use CMIP6 cmor tables that are provided by `py-cordex`.
# +
from cordex.tables import cmip6_cmor_table, cordex_cmor_table
from pyremo.cmor import cmorize_variable
# -
filename = cmorize_variable(
ds,
"tas",
cmor_table=cmip6_cmor_table("CMIP6_Amon"),
dataset_table=cordex_cmor_table("CORDEX_remo_example"),
CORDEX_domain="EUR-11",
)
# The warnings generated here mostly originate from the fact, that the original CMIP6 cmor tables have not yet been updated for use in regional climate model data. We will ignore them for now and have a look at the output
filename
output = xr.open_dataset(filename)
output
# You can see that the dataset now has the right dimension sizes and names, added time bounds (since the dataset contains mean values) and also lots of global attributes to self describe the dataset.
output.tas.plot(col="time", col_wrap=4)
| notebooks/cmorization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/allokkk/Odd-Word-Out/blob/master/odd_one_odd_challange.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="TpK4ggGEjZTR" colab_type="code" colab={}
import gensim
from gensim.models import word2vec
from gensim.models import KeyedVectors
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
# + id="yXPhENRfkzuk" colab_type="code" outputId="c6ec6d7d-bd28-49bc-a764-c87d54ddccc3" colab={"base_uri": "https://localhost:8080/", "height": 54}
from google.colab import drive
drive.mount('/content/gdrive')
# + id="7LfVGenjkmc8" colab_type="code" outputId="30408999-95cc-47b8-af4a-b0b217daa6c0" colab={"base_uri": "https://localhost:8080/", "height": 71}
import numpy as np
word_vectors = KeyedVectors.load_word2vec_format('/content/gdrive/My Drive/GoogleNews-vectors-negative300.bin.gz',binary=True)
# + id="9VgARhL4ktUE" colab_type="code" outputId="8ab2d9a3-3732-45bb-879d-0c631809e125" colab={"base_uri": "https://localhost:8080/", "height": 377}
# ! wget https://www.dropbox.com/s/59zqs3blmx3sgcq/Test%20-%20Odd%20one%20out%20Challange.csv?dl=0
# + id="0Y0kP4A4lmst" colab_type="code" colab={}
test_data=pd.read_csv("/content/Test - Odd one out Challange.csv?dl=0")
# + [markdown] id="6_oX7n1Tl7FM" colab_type="text"
# FIND ODD FUNCTION
# + id="FuP6z-JMlyVr" colab_type="code" colab={}
def odd_one_out2(words):
"""Accepts a list of words and returns the odd word"""
# Generate all word embeddings for the given list
all_word_vectors = [word_vectors[w] for w in words]
avg_vector = np.mean(all_word_vectors,axis=0)
print(avg_vector.shape)
#Iterate over every word and find similarity
odd_one_out = None
min_similarity = 1.0 #Very high value
for w in words:
sim = cosine_similarity([word_vectors[w]],[avg_vector])
if sim < min_similarity:
min_similarity = sim
odd_one_out = w
#print("Similairy btw %s and avg vector is %.2f"%(w,sim))
return odd_one_out
# + id="kiZ7lY8hl-7L" colab_type="code" outputId="f3768779-0ea7-481d-ef2f-71b0a00f916d" colab={"base_uri": "https://localhost:8080/", "height": 34}
test_data.shape
# + id="kQ-fPJtNmgpN" colab_type="code" outputId="d3a370f4-2be4-4c90-fc79-311b57b3e490" colab={"base_uri": "https://localhost:8080/", "height": 421}
test_data.head(n=12)
# + colab_type="code" outputId="1a3698e6-f657-436f-99f5-288462cacc84" id="us5qKqaSmtvr" colab={"base_uri": "https://localhost:8080/", "height": 119}
import numpy as np
test_data.values
row2=test_data.iloc[2,:]
print(row2)
# + id="D7UXmgj9hAIp" colab_type="code" outputId="bab2c3e5-aad0-4d5a-9d5f-383ff8455677" colab={"base_uri": "https://localhost:8080/", "height": 153}
print(row2)
odd_one_out2(row2)
# + id="NySVJ8cIxKi1" colab_type="code" outputId="f69a7a76-59b4-45cb-ca28-9cb3c5fc473f" colab={"base_uri": "https://localhost:8080/", "height": 357}
l=[]
for i in range(20):
rows=test_data.iloc[i,:]
r=odd_one_out2(rows)
l.append(r)
# + id="5pbO1dfu04k8" colab_type="code" outputId="8e960181-5bcf-4d08-d766-d3e2e421e4ea" colab={"base_uri": "https://localhost:8080/", "height": 54}
print(l)
# + id="AxXGONTA07mX" colab_type="code" colab={}
import csv
with open('OddOneOut.csv', 'w', newline='') as file:
writer = csv.writer(file)
row_list=["OddOut"]
writer.writerow(row_list)
for i in l:
row_item=[i]
writer.writerow(row_item)
# + id="rGecTRB_2vEo" colab_type="code" outputId="9bbaf824-c422-4d58-ce6c-a3de4d6a51e6" colab={"base_uri": "https://localhost:8080/", "height": 357}
for j in l:
print(j)
# + id="d_CmPM762z3s" colab_type="code" colab={}
| odd_one_odd_challange.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quick start to KinFragLib
#
# Introduction on how to load and use the KinFragLib fragment library.
# ## Table of contents
#
# 1. Access fragment library
# 2. View all fragments of a specific inhibitor
# 3. Identify most common fragments in a subpocket
# +
from pathlib import Path
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import PandasTools
from kinfraglib import utils
# -
# Needed to display ROMol images in DataFrames
PandasTools.RenderImagesInAllDataFrames(images=True)
# ## 1. Access fragment library
# Path to library folder
HERE = Path(_dh[-1])
PATH_TO_LIB = HERE / '..' / 'data' / 'fragment_library'
# Use utils function to read fragment library
fragment_library = utils.read_fragment_library(PATH_TO_LIB)
fragment_library.keys()
# NBVAL_CHECK_OUTPUT
# `fragment_library` is a dictionary containing a `pandas` DataFrame for each of the six subpockets (AP, FP, SE, GA, B1, B2) and the pool X.
#
# The following data is linked to each fragment:
#
# - `ROMol`, `ROMol_dummy`, and `ROMol_original`: RDKit molecules for the 2D representation of the fragment without and with dummy atoms as well as for the 3D representation of the fragment as described in the SDF file from which the data is loaded.
# - `kinase`, `family`, and `group`: *Kinase* name, *family* and *group* of the kinase that the ligand (from which the fragment originates) was co-crystallized with
# - `complex_pdb`, `ligand_pdb`, `alt`, and `chain`: *PDB complex* and *ligand ID*, *alternate model* and *chain* for the KLIFS structure that the ligand (from which the fragment originates) was co-crystallized with
# - `atom_subpockets`: Subpocket assignment for each of the fragment's atoms
# - `atom_environments`: BRICS environment IDs for each of the fragment's atoms
# - `smiles` and `smiles_dummy`: SMILES for the fragment without and with dummy atoms
# - `subpocket`: Subpocket assignment for the fragment
# ### Access fragments in specific subpocket (here AP)
ap_fragments = fragment_library['AP']
print('Shape of AP fragment DataFrame: ', ap_fragments.shape)
ap_fragments.head()
# ### Draw sample set of fragments (without dummy atoms)
# Draw fragments
Draw.MolsToGridImage(ap_fragments.ROMol[:10], legends=ap_fragments.complex_pdb.to_list(), molsPerRow=5)
# ### Draw sample set of fragments (with dummy atoms)
# Draw fragments
Draw.MolsToGridImage(ap_fragments.ROMol_dummy[:10], legends=ap_fragments.complex_pdb.to_list(), molsPerRow=5)
# ### Show SMILES for sample set of fragments with/without dummy atoms
ap_fragments[['smiles', 'smiles_dummy']].head()
# NBVAL_CHECK_OUTPUT
# ## 2. View all fragments of a specific inhibitor (here Gefitinib; PDB identifier: IRE)
#
# This analysis could be used to investigate, how different orientations/binding modi of the same molecules co-crystallized to different structures affects the subpocket assignment.
# ### Find IRE fragments in each subpocket
# +
ire_fragments = utils.get_fragments_by_ligand('IRE', fragment_library)
print(f'Number of fragments from IRE ligands: {ire_fragments.shape[0]}')
ire_kinases = list(ire_fragments.kinase.unique())
ire_pdbs = list(ire_fragments.complex_pdb.unique())
print(f'Kinases the ligand bind to: {ire_kinases}')
print(f'Ligand co-crystallized in structures with PDB IDs: {ire_pdbs}')
# NBVAL_CHECK_OUTPUT
# -
# Draw full ligand
utils.draw_ligands_from_pdb_ids(ire_pdbs[4], 'IRE')
# ### Draw sample ligand fragmentation
# +
complex_pdb = ire_fragments.iloc[0].complex_pdb
ligand_pdb = ire_fragments.iloc[0].ligand_pdb
print(f'Draw fragments for PDB structure {complex_pdb} and ligand {ligand_pdb}')
utils.draw_fragmented_ligand(fragment_library, complex_pdb, ligand_pdb, mols_per_row=5)
# -
print(f'Draw fragments for PDB structure {complex_pdb} and ligand {ligand_pdb} including dummy atoms')
utils.draw_fragmented_ligand(fragment_library, complex_pdb, ligand_pdb, mols_per_row=5, include_dummy=True)
# ### Draw fragmentation for all ligands (sorted by subpocket)
Draw.MolsToGridImage(
[
fragment.ROMol_dummy
for index, fragment
in ire_fragments.iterrows()
],
legends=[
f"{fragment.subpocket} | {fragment.complex_pdb} | {fragment.chain} | {fragment.kinase}"
for index, fragment
in ire_fragments.iterrows()
],
molsPerRow=6
)
# **Discussion**
#
# The above figure shows:
# * The hinge binding fragments clearly lie in AP in all structures, only difference
# - Neighboring oxygen is in most structures part of AP
# - In some structures it was assigned to FP (3ug2, 5y7z)
# * The halogenetated benzene ring is clearly assigned to GA in all structures
# * In contrast the long solvent exposed tail is very flexible, and can thus, be assigned to
# - FP or SE
# - and even be further split, with the terminal ring being annotated to pool X
# ## 3. Identify most common fragments in a subpocket (here AP)
# Use utils function to count fragment occurances
most_common_fragments_df = utils.get_most_common_fragments(ap_fragments, top_x=25)
most_common_fragments_df.head(3)
Draw.MolsToGridImage(
most_common_fragments_df.ROMol,
legends=[f'{i+1}: {x}' for i, x in enumerate(most_common_fragments_df.fragment_count)],
molsPerRow=5
)
# ### Analyze the occurences of a specific fragment
#
# Select a fragment from the AP most common fragments.
query_smiles = Chem.MolToSmiles(most_common_fragments_df.ROMol[0])
print(query_smiles)
# NBVAL_CHECK_OUTPUT
ap_fragments_subset = ap_fragments[ap_fragments.smiles==query_smiles]
# Collect complexes containing this fragment.
print(f'List of pdb codes, from which the fragment originated:\n{ap_fragments_subset.complex_pdb.to_list()}')
# NBVAL_CHECK_OUTPUT
# Investigate individual ligands from this set.
# +
ap_fragments_subset_example = ap_fragments_subset.iloc[0]
pdb = ap_fragments_subset_example.complex_pdb
ligand = ap_fragments_subset_example.ligand_pdb
print('Draw fragments for PDB structure', pdb, ' and ligand', ligand)
utils.draw_fragmented_ligand(fragment_library, pdb, ligand, mols_per_row=5)
# -
# Draw full ligand
utils.draw_ligands_from_pdb_ids('4dce', '0JF')
| notebooks/1_1_quick_start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# name: python3
# ---
# # Defect Detection: Semantic Segmentation - Pipeline Execution
#
# In this notebook, we will use the pipeline configured in the included python package under `pipelines` together with the defined code for preprocessing and training to automate the model training. It is easy to use such that you can simple drop in whatever input data for image classification you want and have it train a model automatically.
# +
import boto3
import sagemaker
import time
import uuid
import json
iot_client = boto3.client('iot')
sts_client = boto3.client('sts')
sm_client = boto3.client('sagemaker')
# Get the account id
account_id = sts_client.get_caller_identity()["Account"]
# Project Name as defined in your CloudFormation template
PROJECT_NAME = '<YOUR PROJECT NAME>'
region = boto3.Session().region_name
role = sagemaker.get_execution_role()
bucket_name = 'sm-edge-workshop-%s-%s' % (PROJECT_NAME, account_id)
# Change these to reflect your project/business name or if you want to separate ModelPackageGroup/Pipeline from the rest of your team
model_package_group_name = 'defect-detection-semantic-segmentation-%s' % PROJECT_NAME
job_prefix = 'defect-detection-semantic-segmentation'
pipeline_name = 'defect-detection-semantic-segmentation-pipeline-%s' % PROJECT_NAME
# -
# ### Getting the pipeline definition
#
# We use the `get_pipeline` method to create a pipeline DAG definition with our provided input. The input provided here is fixed for each pipeline you create or update, you cannot change these parameters with each execution (see usage of parameters in the cell below).
# +
from pipelines.semantic_segmentation.pipeline import get_pipeline
pipeline = get_pipeline(
region=region,
role=role,
default_bucket=bucket_name,
pipeline_name=pipeline_name,
base_job_prefix=job_prefix
)
# -
# ### Creating the pipeline
#
# We create the pipeline (or update it in case it exists) with the previously defined DAG definition.
pipeline.upsert(role_arn=role)
# ### Starting the pipeline execution
#
# We now start the exeuction of the pipeline with a given set of parameters which we can alter for every execution.
# +
input_data_path = 's3://%s/<YOUR-DATA-LOCATION-PREFIX>' % bucket_name
execution = pipeline.start(
parameters=dict(
InputData=input_data_path,
TrainingInstanceType="ml.p3.2xlarge",
ModelApprovalStatus="Approved",
ModelPackageGroupName=model_package_group_name
)
)
# -
# ### Check progress
#
# After execution started, you can always check the progress of your pipeline execution either by looking at the processing and training jobs in the SageMaker Console, using the built-in SageMaker Studio Pipeline visualization tools or using SDK methods like below.
execution.describe()
# ## Preparing trained model for edge
#
# Please proceed here only, if the execution of the training pipeline as successful. In this part of the workshop, we will prepare the model which you just trained in the pipeline for the deployment onto the edge device.
# +
compilation_output_sub_folder = 'models/' + job_prefix + '/compilation-output'
edgepackaging_output_sub_folder = 'models/' + job_prefix + '/edge-packaging-output'
# S3 Location to save the model artifact after compilation
s3_compilation_output_location = 's3://{}/{}'.format(bucket_name, compilation_output_sub_folder)
# S3 Location to save the model artifact after edge packaging
s3_edgepackaging_output_location = 's3://{}/{}'.format(bucket_name, edgepackaging_output_sub_folder)
# +
# Define some helper functions
def get_latest_approved_s3_model_location(client, model_package_group):
"""Returns the model location of the latest approved model version in a group"""
response = client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus='Approved'
)
latest_version = max(response['ModelPackageSummaryList'], key=lambda x:x['ModelPackageVersion'])
model_artifact_location = sm_client.describe_model_package(ModelPackageName=latest_version['ModelPackageArn'])['InferenceSpecification']['Containers'][0]['ModelDataUrl']
return model_artifact_location
def get_latest_approved_model_version(client, model_package_group):
"""Returns the model version of the latest approved model version in a group"""
response = client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus='Approved'
)
latest_version = max(response['ModelPackageSummaryList'], key=lambda x:x['ModelPackageVersion'])
return latest_version['ModelPackageVersion']
# -
# ### Run SageMaker Neo compilation job
# +
# Retrieve some information on the model we just trained and registered in SageMaker Model Registry
s3_model_artifact_location = get_latest_approved_s3_model_location(sm_client, model_package_group_name)
print(s3_model_artifact_location)
model_name = 'unet'
compilation_job_name = '%s-%d' % (model_name, int(time.time()*1000))
# Lets start a compilation job for the target architecture
sm_client.create_compilation_job(
CompilationJobName=compilation_job_name,
RoleArn=role,
InputConfig={
'S3Uri': s3_model_artifact_location,
'DataInputConfig': '{"input_image":[1,%d,%d,%d]}' % (3,224, 224),
'Framework': 'KERAS'
},
OutputConfig={
'S3OutputLocation': s3_compilation_output_location,
'TargetPlatform': { 'Os': 'LINUX', 'Arch': 'X86_64' }
#'TargetPlatform': { 'Os': 'LINUX', 'Arch': 'ARM64', 'Accelerator': 'NVIDIA' },
#'CompilerOptions': '{"trt-ver": "7.1.3", "cuda-ver": "10.2", "gpu-code": "sm_53"}'
#'TargetPlatform': { 'Os': 'LINUX', 'Arch': 'ARM64'},
#'TargetDevice': 'ml_c5'
},
StoppingCondition={ 'MaxRuntimeInSeconds': 900 }
)
# Poll the status of the job
print('Started compilation job .', end='')
while True:
resp = sm_client.describe_compilation_job(CompilationJobName=compilation_job_name)
if resp['CompilationJobStatus'] in ['STARTING', 'INPROGRESS']:
print('.', end='')
else:
print(resp['CompilationJobStatus'], compilation_job_name)
break
time.sleep(5)
if resp['CompilationJobStatus'] == 'COMPLETED':
s3_compiled_model_artifact_location_fullpath = resp['ModelArtifacts']['S3ModelArtifacts']
print(f'Compiled artifact location in S3: {s3_compiled_model_artifact_location_fullpath}')
# -
# ### Running the SageMaker Edge Packaging job
# +
# Run the edge packaging job
edge_packaging_job_name='%s-%d' % (model_name, int(time.time()*1000))
model_version=str(get_latest_approved_model_version(sm_client, model_package_group_name))
# Start the edge packaging job
resp = sm_client.create_edge_packaging_job(
EdgePackagingJobName=edge_packaging_job_name,
CompilationJobName=compilation_job_name,
ModelName=model_name,
ModelVersion=model_version,
RoleArn=role,
OutputConfig={
'S3OutputLocation': s3_edgepackaging_output_location
}
)
# Poll the status of the job
print('Started edge packaging job .', end='')
while True:
resp = sm_client.describe_edge_packaging_job(EdgePackagingJobName=edge_packaging_job_name)
if resp['EdgePackagingJobStatus'] in ['STARTING', 'INPROGRESS']:
print('.', end='')
else:
print(resp['EdgePackagingJobStatus'], compilation_job_name)
break
time.sleep(5)
if resp['EdgePackagingJobStatus'] == 'COMPLETED':
s3_packaged_model_artifact_location_fullpath = resp['ModelArtifact']
print(f'Packaged artifact location in S3: {s3_packaged_model_artifact_location_fullpath}')
# -
# ### Running IoT Job for deplyoment onto the edge
# +
def split_s3_path(s3_path):
path_parts=s3_path.replace("s3://","").split("/")
bucket=path_parts.pop(0)
key="/".join(path_parts)
return bucket, key
model_bucket, model_key = split_s3_path(s3_packaged_model_artifact_location_fullpath)
# -
resp = iot_client.create_job(
jobId=str(uuid.uuid4()),
targets=[
'arn:aws:iot:%s:%s:thinggroup/defect-detection-%s-group' % (region, account_id, PROJECT_NAME),
],
document=json.dumps({
'type': 'new_model',
'model_version': model_version,
'model_name': model_name,
'model_package_bucket': model_bucket,
'model_package_key': model_key
}),
targetSelection='SNAPSHOT'
)
| src/cloud/semantic_segmentation_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.2
# language: julia
# name: julia-1.4
# ---
In this example, we will conduct a second order Global Sensitivity Analysis (GSA) on total cell cost to cell
components cost. Second order GSA assesses the cost by considering one factor and it also considers the
coupling effects or interactioneffects between the various input parameters.
# +
include("../src/PBCM.jl")
cell_general = cell()
cell_design_op = cylindrical_cell_designer(cell_general)
cost = cost_default()
cell_general, cost = convert_all(cell_general, cost, mult)
using DiffEqSensitivity
using PyPlot
per = 30
no_cells_yr = 10
no_cells_yr_intrvl = [no_cells_yr*(1 - (per/100)) , no_cells_yr*(1 + (per/100))]
cost_pos_AM = 18.0
cost_pos_AM_intrvl = [cost_pos_AM*(1 - (per/100)) , cost_pos_AM*(1 + (per/100))]
cost_neg_AM = 12.5
cost_neg_AM_intrvl = [cost_neg_AM*(1 - (per/100)) , cost_neg_AM*(1 + (per/100))]
cost_sep = 1.1
cost_sep_intrvl = [cost_sep * (1 - (per/100)) , cost_sep * (1 + (per/100))]
function gsa_cost(arr)
cost.general_costs.no_units_mfg = converter(arr[1], mult.units_mfg)
cost.cell_costs.cathode.AM[1] = arr[2]
cost.cell_costs.anode.AM[1] = arr[3]
cost.cell_costs.seperator_cost[1] = arr[4]
dollars_kWh = cost_calc(cell_general, cost, system="Cell", cost_verbosity=0)[1]
return dollars_kWh
end
# gsa_cost([0.25, 150.0, 130.0, 25.0, 19.0])
gsa_result = gsa(gsa_cost, Sobol(order = [2]), [no_cells_yr_intrvl, cost_pos_AM_intrvl, cost_neg_AM_intrvl, cost_sep_intrvl], N = 10000)
x = [0, 1, 2, 3, 1, 2, 3, 2, 3, 3]
y = [0, 0, 0, 0, 1, 1, 1, 2, 2, 3]
print("\n")
S1 = gsa_result.S1
S2 = gsa_result.S2
print(S1)
print(S2)
z = [S1[1],S2[1],S2[2],S2[3],S1[2],S2[4],S2[5],S1[3],S2[6],S1[4]]
clf()
r = bar3D(x, y, z*0, 0.4, 0.4, z)
xticks([0,1,2,3], ["No Cells","Cost Pos AM", "Cost Neg AM", "Cost Sep"])
yticks([1,2,3,4], ["No Cells","Cost Pos AM", "Cost Neg AM", "Cost Sep"])
title(string("GSA: 2nd Order: ", per, "%"))
zlabel("Sobol Indices")
figure(2)
# -
# The battery supply chain is quite complex and there are the prices are volatile. The plot above shows the sensitivity of cell cost to the prices of cell components, which helps battery manufacturers better identify the critical components and manage their supply chain.
| Examples/Example_GSA_2nd_order_Cost_only.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from sklearn.preprocessing import MinMaxScaler
plt.style.use('ggplot')
# -
# ### Import dataset
# +
df = pd.read_csv('aggregated_df_final01oct2021.csv')
#df['time']=df['time'].to_datetime(dayfirst=True).astype(int)
df[['sentiment_Extreme Fear', 'sentiment_Extreme Greed', 'sentiment_Fear','sentiment_Greed', 'sentiment_Neutral']]=df[['sentiment_Extreme Fear', 'sentiment_Extreme Greed', 'sentiment_Fear','sentiment_Greed', 'sentiment_Neutral']].fillna(0)
df['time'] = pd.to_datetime(df['time'], dayfirst=True)
df.set_index('time', drop=True, inplace=True)
df.sort_index(inplace=True)
df=df.interpolate(method='linear')
df = df.fillna(method='bfill')
df.head()
# -
df = df['close_x']
df.head()
# +
def take_last(array_like):
return array_like[-1]
df = df.resample('M').agg({'close_x': take_last})
df = df.droplevel(0)
df[0:5]
# -
# ### EDA
# - Closing price boxplot
# - ACF and PACF
# - Stationarity check (mean and variance)
# - ADF test
# - KSS test
mean = [np.mean(df.values[:x]) for x in range(df.shape[0])]
plt.figure(figsize=(18,4))
sns.lineplot(x=df.index ,y=df, label='BTC-USD', ci='sd')
sns.lineplot(x=df.index, y=mean, label='Mean', ci=None)
plt.title('BTC close price - Full dataset')
plt.ylabel('Closing price [USD]')
plt.xlabel('Date')
plt.show()
# From this we can see that the data is not stationary, as both the mean and the variance are increasing over time.
plt.figure(figsize=(12,6))
sns.boxplot(df)
plt.title('BTC close price boxplot - Full dataset')
plt.show()
# ### __Preprocessing__
# - Train/test split
# - Stationarity check (mean and variance)
# - Seasonal decompose of train
# - ADF test
# - KSS test
# - Stationarity transformation
# - LSTM data transformation
# #### Train/Test split
# +
# Train/Test split
# Number of periods to incluse in validation split
val_periods = 10
df_train = df[:df.shape[0]-val_periods]
df_val = df[df.shape[0]-val_periods:]
print('Train shape', df_train.shape)
print('Validation shape', df_val.shape)
# -
plt.figure(figsize=(18,4))
sns.lineplot(data=df_train, color='black', label = 'Train set', ci='sd')
sns.lineplot(data=df_val, color='red', label='Validation set', ci='sd')
plt.title('BTC Close price validation and training sets')
plt.show()
# #### Min Max Scaling
scaler = MinMaxScaler()
df_train_scaled = scaler.fit_transform(df_train.values.reshape(-1,1))
df_train_scaled.shape
# Head and tail of scaled dataset
df_train_scaled
# #### Reshaping
# +
# Transforms the original time series into the input formar required by the LSTM model
nb_timesteps = 2
def makeXy(ts, nb_timesteps, features='itself'):
"""
Input:
ts: original scaled time series
nb_timesteps: number of time steps in the regressors
features: itself == use the previous values of the label only
all == use previous values of all avaialable data
Output:
X: 2-D array of regressors
y: 1-D array of target
"""
x_train = []
y_train = []
for i in range(nb_timesteps, ts.shape[0]):
if features == 'itself':
x_train.append(ts[i-nb_timesteps:i,-1])
else:
x_train.append(ts[i-nb_timesteps:i, 0:])
y_train.append(ts[i, -1])
x_train, y_train = np.array(x_train), np.array(y_train)
return x_train, y_train
# +
# Reshape training data into (samples, timestamp, features)
X_train, y_train = makeXy(df_train_scaled, nb_timesteps)
print('Train dataset shape:', X_train.shape, y_train.shape)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train.shape
# +
# Reshape validation data
data = pd.concat([df_train, df_val], axis=0)
data.reset_index(inplace=True, drop=True)
val_inputs = data[df_train_scaled.shape[0]-nb_timesteps:].values
#val_inputs.reshape(-1,1)
# Scale validation inputs
val_inputs = scaler.transform(val_inputs.reshape(-1,1))
X_val, y_val = makeXy(val_inputs, nb_timesteps)
print('Train dataset shape:', X_val.shape, y_val.shape)
X_val = np.reshape(X_val, (X_val.shape[0], X_val.shape[1], 1))
X_val.shape
# -
# ### LSTM Model
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout, InputLayer
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
# +
# Define LSTM Neural Network
regressor = Sequential()
regressor.add(InputLayer(input_shape=(X_train.shape[1], X_train.shape[2])))
regressor.add(LSTM(units=100, return_sequences=True, ))
regressor.add(Dropout(rate=0.3))
regressor.add(LSTM(units=100, return_sequences=True))
regressor.add(Dropout(rate=0.3))
regressor.add(LSTM(units=75, return_sequences=True))
regressor.add(Dropout(rate = 0.2))
regressor.add(LSTM(units=50))
regressor.add(Dropout(rate = 0.2))
regressor.add(Dense(units=1))
regressor.summary()
# -
# Create directory to save model
# !mkdir "keras_models_lstm1D_monthly"
# +
# Fit and save best parameters of model
epoch = 50
# Compiler and loss function
regressor.compile(loss='mean_squared_error', optimizer='adam')
# Model Checkpoint
model_folder ='keras_models_lstm1D'
model_file = 'BTC_close_price_lstm_weights.hdf5'
save_weights_at = os.path.join(model_folder, model_file)
save_best = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=0,
save_best_only=True, save_weights_only=False, mode='min',
save_freq='epoch')
# Fit model
regressor.fit(x=X_train, y=y_train, batch_size=32, epochs=epoch,
verbose=1, callbacks=[save_best], validation_data=(X_val, y_val),
shuffle=True)
# +
regressor.load_weights(model_folder + '\\' + 'BTC_close_price_lstm_weights.hdf5')
# Validation predict
predicted_price = regressor.predict(X_val)
predicted_price = scaler.inverse_transform(predicted_price)
predicted_price = predicted_price.reshape(X_val.shape[0])
# Train predict
predicted_price_train = regressor.predict(X_train)
predicted_price_train = scaler.inverse_transform(predicted_price_train)
predicted_price_train = predicted_price_train.reshape(X_train.shape[0])
# +
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error
print('Train RMSE:')
train_mape = mean_absolute_percentage_error(df_train[:df_train.shape[0]-nb_timesteps],predicted_price_train)
print(train_mape)
train_rmse = np.sqrt(np.mean(np.square(df_train[:df_train.shape[0]-nb_timesteps]-predicted_price_train)))
print(train_rmse)
print('Validation RMSE:')
test_mape = mean_absolute_percentage_error(df_val,predicted_price)
print(test_mape)
test_rmse = np.sqrt(np.mean(np.square(df_val-predicted_price)))
test_rmse
# -
import plotly.express as px
train_results = pd.DataFrame(df_train[:df_train.shape[0]-nb_timesteps])
train_results['train_pred'] = predicted_price_train
px.line(train_results,
title='<span style="color:#012888;font-weight:bold">BTC-USD Close price prediction - Training data \
<br><span style="font-size: 13px;color:#444444;">Train RMSE: {:.1f} <br>Train MAPE: {:.1%}</span>'.format(train_rmse, train_mape ))
test_results = pd.DataFrame(df_val)
test_results['test_pred'] = predicted_price
px.line(test_results,
title='<span style="color:#012888;font-weight:bold">BTC-USD Close price prediction - Validation data \
<br><span style="font-size: 13px;color:#444444;">Validation RMSE: {:.1f} <br>Train MAPE: {:.1%}</span>'.format(test_rmse, test_mape ))
# plt.figure(figsize=(19,6))
# plt.plot(df_val.index, df_val)
# plt.plot(df_val.index, predicted_price)
# plt.title('BTC-USD Close price prediction - Validation')
# plt.xlabel('Date')
# plt.ylabel('USD')
# +
total_results = pd.concat([train_results,test_results])
px.line(total_results,
title='<span style="color:#012888;font-weight:bold">BTC-USD Close price prediction - Total data \
<br><span style="font-size: 13px;color:#444444;">Train RMSE: {:.1f} ; Train MAPE: {:.1%}<br>Validation RMSE: {:.1f} ; Test MAPE: {:.1%} </span>'.format(train_rmse, train_mape, test_rmse, test_mape))
# -
blabla = pd.DataFrame(scaler.inverse_transform(val_inputs))
blabla
X_val
val_inputs
| [Codes]LSTM:RNN/LSTM 1D - Monthly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx="hidden"
# This notebook is part of the `nbsphinx` documentation: http://nbsphinx.readthedocs.io/.
# -
# # Hidden Cells
#
# You can remove cells from the HTML/LaTeX output by adding this to the cell metadata:
#
# ```json
# "nbsphinx": "hidden"
# ```
#
# Hidden cells are still executed but removed afterwards.
#
# For example, the following hidden cell defines the variable `answer`.
# + nbsphinx="hidden"
answer = 6 * 7
# -
# This is the cell after the hidden cell.
# Although the previous cell is not visible, its result is still available:
answer
# Don't overuse this, because it may make it harder to follow what's going on in your notebook.
#
# Also Markdown cells can be hidden.
# The following cell is hidden.
# + [markdown] nbsphinx="hidden"
# I am a *hidden* Markdown cell!
# -
# This is the cell after the hidden cell.
| doc/hidden-cells.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import all packages and set plots to be embedded inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import seaborn as sb
import matplotlib.pyplot as plt # Standard plotting library
from IPython.display import display # A notebook function to display more complex data (like tables)
import scipy.stats as stats # Scipy again
from math import *
#read an csv file
df = pd.read_csv(r'C:\Users\Smegn\Documents\GitHub\AdSmart\AdSmartABdata.csv')
df.head(10)
exposed = df.loc[df.experiment == 'exposed'] #exposed set
exposed
exposed['dateHour'] = pd.to_datetime(exposed.date)
#Selecting Feature : Here, you need to divide the given columns into two types of variables dependent(or target variable)
#and independent variable(or feature variables).
'''
segment data into exposed and control groups
consider that SmartAd runs the experment hourly, group data into hours.
Hint: create new column to hold date+hour and use df.column.map(lambda x: pd.Timestamp(x,tz=None).strftime('%Y-%m-%d:%H'))
create two dataframes with bernouli series 1 for posetive(yes) and 0 for negative(no)
Hint: Given engagement(sum of yes and no until current observation as an array) and success (yes countas an array), the method generates random binomial distribution
#Example
engagement = np.array([5, 3, 3])
yes = np.array([2, 0, 3])
Output is "[1] 1 0 1 0 0 0 0 0 1 1 1", showing a binary array of 5+3+3 values
of which 2 of the first 5 are ones, 0 of the next 3 are ones, and all 3 of
the last 3 are ones where position the ones is randomly distributed within each group.
'''
def transform_data(df):
# split dataset to control and exposed groups (feature variables)
exposed = df.loc[df.experiment == 'exposed'] #exposed set
control = df.loc[df.experiment == 'control'] #control set
# creat new column datehour for exposed group
exposed['date_hour'] = pd.to_datetime(exposed.date)
exposed.date_hour += pd.to_timedelta(exposed.hour, unit='h')
exposed.date_hour = exposed.date_hour.map(lambda x: pd.Timestamp(x,tz=None).strftime('%Y-%m-%d:%H'))
# creat new column datehour for control group
control['date_hour'] = pd.to_datetime(control.date)
control.date_hour += pd.to_timedelta(control.hour, unit='h')
control.date_hour = control.date_hour.map(lambda x: pd.Timestamp(x,tz=None).strftime('%Y-%m-%d:%H'))
# groupby df using date_hour and count
df_exposed = exposed.groupby('date_hour').agg({'auction_id':'count', 'device_make':'count', 'platform_os':'count', 'browser':'count', 'yes':'sum', 'no':'sum'})
df_control = control.groupby('date_hour').agg({'auction_id':'count', 'device_make':'count', 'platform_os':'count', 'browser':'count', 'yes':'sum', 'no':'sum'})
# creat an array engagement for each group (sum number of yes and no)
df_exposed['engagement'] = df_exposed['yes'] + df_exposed['no']
df_control['engagement'] = df_control['yes'] + df_control['no']
# create an array success using yes
df_exposed['success'] = df_exposed['yes']
df_control['success'] = df_control['yes']
# probablity of success
global prob_succ_exp, prob_succ_con
prob_succ_exp = sum(df_exposed['success']) / sum(df_exposed['engagement'])
prob_succ_con = sum(df_control['success']) / sum(df_control['engagement'])
# Convert the DataFrame to a NumPy array
engag_exp = df_exposed['engagement'].to_numpy()
engag_con = df_control['engagement'].to_numpy()
# Generates a random sample from a given 1-D array
expo = np.random.choice([0, 1], size=((np.sum(engag_exp)),), p=[prob_succ_exp, 1-prob_succ_exp])
cont = np.random.choice([0, 1], size=((np.sum(engag_con)),), p=[prob_succ_con, 1-prob_succ_con])
return expo,cont
transform_data(df)
# # Test function
# +
alpha = 0.05
beta = 0.1
##data processing here
exposed,control=transform_data(df)
# odd ratio
odd_ratio=(prob_succ_exp/(1-prob_succ_exp))/(prob_succ_con/(1-prob_succ_con))
# -
def ConditionalSPRT(x,y,t1,alpha=0.05,beta=0.10,stop=None):
if t1<=1:
printLog('warning',"Odd ratio should exceed 1.")
if (alpha >0.5) | (beta >0.5):
printLog('warning',"Unrealistic values of alpha or beta were passed."
+" You should have good reason to use large alpha & beta values")
if stop!=None:
stop=math.floor(n0)
def comb(n, k):
return factorial(n) // factorial(k) // factorial(n - k)
def lchoose(b, j):
a=[]
if (type(j) is list) | (isinstance(j,np.ndarray)==True):
if len(j)<2:
j=j[0]
if (type(j) is list) | (isinstance(j,np.ndarray)==True):
for k in j:
n=b
if (0 <= k) & (k<= n):
a.append(math.log(comb(n,k)))
else:
a.append(0)
else:
n=b
k=j
if (0 <= k) & (k<= n):
a.append(math.log(comb(n,k)))
else:
a.append(0)
return np.array(a)
def g(x,r,n,t1,t0=1):
return -math.log(h(x,r,n,t1))+math.log(h(x,r,n,t0))
def h(x,r,n,t=1):
return f(r,n,t,offset=ftermlog(x,r,n,t))
def f(r,n,t,offset=0):
upper=max(0,r-n)
lower=min(n,r)
rng=list(range(upper,lower+1))
return np.sum(fterm(rng,r,n,t,offset))
def fterm(j,r,n,t,offset=0):
ftlog=ftermlog(j,r,n,t,offset)
return np.array([math.exp(ex) for ex in ftlog])
def ftermlog(j,r,n,t,offset=0):
xx=r-j
lch=lchoose(n,j)
lchdiff=lchoose(n,xx)
lg=np.array(j)*math.log(t)
lgsum=lch+lchdiff
lgsum2=lgsum+lg
lgdiff=lgsum2-offset
return lgdiff
def logf(r,n,t,offset=0):
z=f(r,n,t,offset)
if z>0:
return math.log(z)
else:
return np.nan
def clowerUpper(r,n,t1c,t0=1,alpha=0.05,beta=0.10):
offset=ftermlog(math.ceil(r/2),r,n,t1c)
z=logf(r,n,t1c,logf(r,n,t0,offset)+offset)
a=-math.log(alpha/(1-beta))
b=math.log(beta/(1-alpha))
lower=b
upper=1+a
return (np.array([lower,upper])+z)/math.log(t1c/t0)
l=math.log(beta/(1-alpha))
u=-math.log(alpha/(1-beta))
sample_size=min(len(x),len(y))
n=np.array(range(1,sample_size+1))
if stop!=None:
n=np.array([z for z in n if z<=stop])
x1=np.cumsum(x[n-1])
r=x1+np.cumsum(y[n-1])
stats=np.array(list(map(g,x1, r, n, [t1]*len(x1)))) #recurcively calls g
clu=list(map(clowerUpper,r,n,[t1]*len(r),[1]*len(r),[alpha]*len(r), [beta]*len(r)))
limits=[]
for v in clu:
inArray=[]
for vin in v:
inArray.append(math.floor(vin))
limits.append(np.array(inArray))
limits=np.array(limits)
k=np.where((stats>=u) | (stats<=l))
cvalues=stats[k]
if cvalues.shape[0]<1:
k= np.nan
outcome='Unable to conclude.Needs more sample.'
else:
k=np.min(k)
if stats[k]>=u:
outcome=f'Exposed group produced a statistically significant increase.'
else:
outcome='There is no statistically significant difference between two test groups'
if (stop!=None) & (k==np.nan):
c1=clowerUpper(r,stop,t1,alpha,beta)
c1=math.floor(np.mean(c1)-0.5)
if x1[n0]<=c1:
truncate_decision='h0'
outcome='Maximum Limit Decision. The aproximate decision point shows their is no statistically significant difference between two test groups'
else:
truncate_decision='h1'
outcome=f'Maximum Limit Decision. The aproximate decision point shows exposed group produced a statistically significant increase.'
truncated=stop
else:
truncate_decision='Non'
truncated=np.nan
return (outcome,n, k,l,u,truncated,truncate_decision,x1,r,stats,limits)
test = ConditionalSPRT(x = exposed,y = control,t1 = odd_ratio, alpha=alpha,beta=alpha)
test[0]
#plt.summary(exposed,control)
sb.countplot(df['experiment'])
df_exp=pd.DataFrame(exposed)
a = df_exp.cumsum()
a.columns = ['value']
sb.lineplot(x = a.index, y = a.value)
df_con=pd.DataFrame(control)
a = df_exp.cumsum()
a.columns = ['value']
sb.lineplot(x = a.index, y = a.value)
exposed,control=transform_data(df)
fig, ax = plt.subplots(figsize=(8,6))
kwargs = {'cumulative': True}
sb.distplot(exposed, hist_kws=kwargs, kde_kws=kwargs, color = 'black')
sb.distplot(control, hist_kws=kwargs, kde_kws=kwargs, color = 'green')
plt.title('A histogram indicating cummulative distributions of success in the 2 groups black: control, green:exposed')
plt.ylabel('frequency')
plt.xlabel('cummulative success')
| notebooks/sequential_ABtest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Object detection demo
# This notebook shows an example of object detection of an image.
# The network that is used for inference is a variant of Tiny-Yolo, whose topology is illustrated in the following picture.
# The pynq colored layers have been quantized with 1 bit for weights and 3 bit for activations, and will be executed in the HW accelerator, while the other layers are executed in python.
#
# The image processing is performed within darknet by using python bindings.
#
#
# 
import sys
import os, platform
import json
import numpy as np
import cv2
import ctypes
from matplotlib import pyplot as plt
from PIL import Image
from datetime import datetime
from qnn import TinierYolo
from qnn import utils
sys.path.append("/opt/darknet/python/")
from darknet import *
# %matplotlib inline
import IPython
# ## 1. Instantiate a Classifier
# Creating a classifier will automatically download the bitstream onto the device. All other initializations are currently performed in the Darknet framework.
# +
classifier = TinierYolo()
classifier.init_accelerator()
net = classifier.load_network(json_layer="/opt/python3.6/lib/python3.6/site-packages/qnn/params/tinier-yolo-layers.json")
conv0_weights = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/tinier-yolo-conv0-W.npy', encoding="latin1")
conv0_weights_correct = np.transpose(conv0_weights, axes=(3, 2, 1, 0))
conv8_weights = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/tinier-yolo-conv8-W.npy', encoding="latin1")
conv8_weights_correct = np.transpose(conv8_weights, axes=(3, 2, 1, 0))
conv0_bias = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/tinier-yolo-conv0-bias.npy', encoding="latin1")
conv0_bias_broadcast = np.broadcast_to(conv0_bias[:,np.newaxis], (net['conv1']['input'][0],net['conv1']['input'][1]*net['conv1']['input'][1]))
conv8_bias = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/tinier-yolo-conv8-bias.npy', encoding="latin1")
conv8_bias_broadcast = np.broadcast_to(conv8_bias[:,np.newaxis], (125,13*13))
file_name_cfg = c_char_p("/opt/python3.6/lib/python3.6/site-packages/qnn/params/tinier-yolo-bwn-3bit-relu-nomaxpool.cfg".encode())
net_darknet = lib.parse_network_cfg(file_name_cfg)
# -
# ## 2. Launch demo
#
# The loop will automatically pick a random image from the yoloimages folder and perform the whole classification. Use the "interrupt kernel" button on top to stop the demo
# +
out_dim = net['conv7']['output'][1]
out_ch = net['conv7']['output'][0]
img_folder = './yoloimages/'
file_name_out = c_char_p("/home/xilinx/jupyter_notebooks/qnn/detection".encode())
file_name_probs = c_char_p("/home/xilinx/jupyter_notebooks/qnn/probabilities.txt".encode())
file_names_voc = c_char_p("/opt/darknet/data/voc.names".encode())
tresh = c_float(0.3)
tresh_hier = c_float(0.5)
darknet_path = c_char_p("/opt/darknet/".encode())
conv_output = classifier.get_accel_buffer(out_ch, out_dim)
while(1):
for image_name in os.listdir(img_folder):
img_file = os.path.join(img_folder, image_name)
file_name = c_char_p(img_file.encode())
img = load_image(file_name,0,0)
img_letterbox = letterbox_image(img,416,416)
img_copy = np.copy(np.ctypeslib.as_array(img_letterbox.data, (3,416,416)))
img_copy = np.swapaxes(img_copy, 0,2)
free_image(img)
free_image(img_letterbox)
#First convolution layer in sw
if len(img_copy.shape)<4:
img_copy = img_copy[np.newaxis, :, :, :]
conv0_ouput = utils.conv_layer(img_copy,conv0_weights_correct,b=conv0_bias_broadcast,stride=2,padding=1)
conv0_output_quant = conv0_ouput.clip(0.0,4.0)
conv0_output_quant = utils.quantize(conv0_output_quant/4,3)
#Offload to hardware
conv_input = classifier.prepare_buffer(conv0_output_quant*7);
classifier.inference(conv_input, conv_output)
conv7_out = classifier.postprocess_buffer(conv_output)
#Last convolution layer in sw
conv7_out = conv7_out.reshape(out_dim,out_dim,out_ch)
conv7_out = np.swapaxes(conv7_out, 0, 1) # exp 1
if len(conv7_out.shape)<4:
conv7_out = conv7_out[np.newaxis, :, :, :]
conv8_output = utils.conv_layer(conv7_out,conv8_weights_correct,b=conv8_bias_broadcast,stride=1)
conv8_out = conv8_output.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
#Draw detection boxes
lib.forward_region_layer_pointer_nolayer(net_darknet,conv8_out)
lib.draw_detection_python(net_darknet, file_name, tresh, tresh_hier,file_names_voc, darknet_path, file_name_out, file_name_probs);
#Display result
IPython.display.clear_output(1)
file_content = open(file_name_probs.value,"r").read().splitlines()
detections = []
for line in file_content[0:]:
name, probability = line.split(": ")
detections.append((probability, name))
for det in sorted(detections, key=lambda tup: tup[0], reverse=True):
print("class: {}\tprobability: {}".format(det[1], det[0]))
res = Image.open(file_name_out.value.decode() + ".png")
display(res)
# -
# ## Reset the device
classifier.deinit_accelerator()
# +
from pynq import Xlnk
xlnk = Xlnk();
xlnk.xlnk_reset()
| notebooks/tiny-yolo-image-loop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import networkx as nx
import matplotlib.pyplot as plt
import qutip
import itertools
terms = [''.join(_) for _ in itertools.product(['XZ', 'YI'], repeat=4)]
[term for term in terms if term.count('I') % 2 == 1]
terms = ['YIXZ\nXZXZ', 'YIXZ\nYIYI', 'YIYI\nXZYI', 'YIYI\nYIXZ',
'XZXZ\nXZXZ', 'XZXZ\nYIYI', 'XZYI\nXZYI', 'XZYI\nYIXZ',
'YIXZ\nXZYI', 'YIXZ\nYIXZ', 'YIYI\nXZXZ', 'YIYI\nYIYI',
'XZXZ\nXZYI', 'XZXZ\nYIXZ', 'XZYI\nXZXZ', 'XZYI\nYIYI']
# +
pauli_to_matrix = {'I': qutip.identity(2),
'X': qutip.sigmax(),
'Y': qutip.sigmay(),
'Z': qutip.sigmaz()}
def commutes(term1, term2):
term1 = [char for char in term1 if char != '\n']
term2 = [char for char in term2 if char != '\n']
term1_matrices = [pauli_to_matrix[pauli] for pauli in term1]
term2_matrices = [pauli_to_matrix[pauli] for pauli in term2]
return qutip.commutator(qutip.tensor(*term1_matrices), qutip.tensor(*term2_matrices)).norm() == 0
# +
G = nx.Graph()
node_color = []
for term in terms:
if term in ['XZXZ\nXZXZ', 'XZXZ\nYIYI', 'XZYI\nXZYI', 'XZYI\nYIXZ', 'YIXZ\nXZYI', 'YIXZ\nYIXZ', 'YIYI\nXZXZ', 'YIYI\nYIYI']:
node_color.append('green')
else:
node_color.append('orange')
G.add_node(term)
colors = []
for i in range(len(terms)):
for j in range(i + 1, len(terms)):
if commutes(terms[i], terms[j]):
G.add_edge(terms[i], terms[j], color='b')
colors.append('#d95b59')
nx.draw(G, pos=nx.circular_layout(G), with_labels=True, font_family="Times", font_size=13.5,
node_color=node_color, node_size=1930, edge_color=colors)
ax = plt.gca() # to get the current axis
ax.collections[0].set_edgecolor("#000000")
plt.savefig("BK_pqrs_term.svg")
# -
| Figures/Parity_pqrs_term_MIN_Clique_Cover.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CLR_Transformation
# ## Metabolite and WGS data from timepoints 2 and 5 are CLR transformed for downstream analysis.
# ### skbio package is used to perform CLR transform. Original data includes header row with rows as sample and columns as features. Sample ID is first column and treatment condition is second column.
import pandas as pd
from skbio.stats.composition import clr
from skbio.stats.composition import multiplicative_replacement
# # T2 CLR WGS
T2=pd.read_csv("T2_WGS.csv")
sampleTreat=T2[['Sample', 'Treat']].copy()
T2=T2.drop(['Sample', 'Treat'], axis=1)
T2features=[]
for col in T2.columns:
T2features.append(col)
newT2=clr(multiplicative_replacement(T2))
newT2DF = pd.DataFrame(data=newT2, columns=T2features)
newT2DF.insert(loc=0, column='Treat', value=sampleTreat['Treat'])
newT2DF.insert(loc=0, column='Sample', value=sampleTreat['Sample'])
newT2DF.to_csv('T2_WGS_clr.csv', index=False)
# # T5_CLR WGS
T5=pd.read_csv("T5_WGS.csv")
sampleTreat=T5[['Sample', 'Treat']].copy()
T5=T5.drop(['Sample', 'Treat'], axis=1)
T5features=[]
for col in T5.columns:
T5features.append(col)
newT5=clr(multiplicative_replacement(T5))
newT5DF = pd.DataFrame(data=newT5, columns=T5features)
newT5DF.insert(loc=0, column='Treat', value=sampleTreat['Treat'])
newT5DF.insert(loc=0, column='Sample', value=sampleTreat['Sample'])
newT5DF.to_csv('T5_WSG_clr.csv', index=False)
# # T2_metabolite
T2=pd.read_csv("T2_meta.csv")
sampleTreat=T2[['Sample', 'Treat']].copy()
T2=T2.drop(['Sample', 'Treat'], axis=1)
T2features=[]
for col in T2.columns:
T2features.append(col)
newT2=clr(multiplicative_replacement(T2))
newT2DF = pd.DataFrame(data=newT2, columns=T2features)
newT2DF.insert(loc=0, column='Treat', value=sampleTreat['Treat'])
newT2DF.insert(loc=0, column='Sample', value=sampleTreat['Sample'])
newT2DF.to_csv('T2_meta_clr.csv', index=False)
# # T5_metabolite
T2=pd.read_csv("T5_meta.csv")
sampleTreat=T2[['Sample', 'Treat']].copy()
T2=T2.drop(['Sample', 'Treat'], axis=1)
T2features=[]
for col in T2.columns:
T2features.append(col)
newT2=clr(multiplicative_replacement(T2))
newT2DF = pd.DataFrame(data=newT2, columns=T2features)
newT2DF.insert(loc=0, column='Treat', value=sampleTreat['Treat'])
newT2DF.insert(loc=0, column='Sample', value=sampleTreat['Sample'])
newT2DF.to_csv('T5_meta_clr.csv', index=False)
| Revision/CLR_transform/CLR_transform_metabolite_WGS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Base Python)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/python-3.6
# ---
# # Detecting Motor Anomalies
# Preventing motor anomalies is a bit more complicated than battery issues. Usually, motors operate in a certain range of power, but sometimes they may present anomalous behavior. Their power consumption can go to high, due to environmental issues, or too low, due to aging issues.
# As usual, let's start by recovering and looking at data:
# %store -r data
data.head()
# %store -r bucket
bucket
# # Exploratory Data Analysis
train_data = data[["motor_peak_mA"]]
train_data = train_data[train_data["motor_peak_mA"] > 0]
train_data.head()
train_data.describe()
# train_data.info()
import matplotlib.pyplot as plt
train_data.plot(rot=30)
# ## Synthetic Ground Truth
anomalies = data[["motor_peak_mA"]]
anomalies = anomalies[anomalies["motor_peak_mA"] > 0]
anomalies.info()
# +
from sklearn.model_selection import train_test_split
train_data, test_dataframe = train_test_split(anomalies, test_size=0.2)
# -
test_data = test_dataframe.copy()
test_data["anomaly"] = test_data["motor_peak_mA"] > 4000
test_data["anomaly"] = test_data["anomaly"] | (test_data["motor_peak_mA"] > 50) & (test_data["motor_peak_mA"] < 200)
test_data["anomaly"] = test_data["anomaly"].astype(int)
test_data.groupby("anomaly").count().head()
test_data.describe()
train_data.describe()
# # Random Cut Forest Training
train_array = train_data.values
train_array
test_array = test_data[["motor_peak_mA"]].values
test_array
labels_array = test_data["anomaly"].values
labels_array
# +
import io
import numpy as np
import sagemaker
import sagemaker.amazon.common as smac
import boto3
s3bucket = boto3.resource('s3').Bucket(bucket)
def upload_records(array,key,labels=None):
result = {}
buf = io.BytesIO()
if (labels is not None):
smac.write_numpy_to_dense_tensor(buf, array, labels)
else:
smac.write_numpy_to_dense_tensor(buf, array)
buf.seek(0)
s3bucket.Object(key).upload_fileobj(buf)
# +
import os
s3 = boto3.client("s3")
prefix = "mt-motor-anomaly"
cwd = os.getcwd()
train_key = "{}/input/{}".format(prefix,"train.rio")
test_key = "{}/input/{}".format(prefix, "test.rio")
upload_records(train_array,train_key)
upload_records(test_array,test_key,labels_array)
train_input = sagemaker.inputs.TrainingInput(
s3_data="s3://{}/{}".format(bucket,train_key),
content_type='application/x-recordio-protobuf',
distribution='ShardedByS3Key')
test_input = sagemaker.inputs.TrainingInput(
s3_data="s3://{}/{}".format(bucket,test_key),
content_type='application/x-recordio-protobuf',
distribution='FullyReplicated')
rcf_input = {
'train': train_input,
'test': test_input
}
rcf_input
# -
# # RCF Training
# +
region = boto3.Session().region_name
from sagemaker.amazon.amazon_estimator import get_image_uri
region = boto3.Session().region_name
rcf_container = sagemaker.image_uris.retrieve('randomcutforest', region)
rcf_container
# -
rcf_hparams = {
"num_samples_per_tree":512,
"num_trees":50,
"feature_dim":1,
"eval_metrics": "accuracy"
}
rcf_estimator = sagemaker.estimator.Estimator(
rcf_container,
role=sagemaker.get_execution_role(),
instance_count=1,
instance_type='ml.m5.large',
base_job_name="mt-motor-anomaly",
output_path='s3://{}/{}/output'.format(bucket, prefix),
hyperparameters = rcf_hparams )
rcf_estimator.fit(rcf_input)
print('Training job name: {}'.format(rcf_estimator.latest_training_job.job_name))
rcf_inference = rcf_estimator.deploy(
initial_instance_count=1,
instance_type='ml.m5.large',
)
rcf_inference_endpoint = rcf_inference.endpoint_name
# %store rcf_inference_endpoint
rcf_inference_endpoint
# +
from sagemaker.predictor import csv_serializer, json_deserializer
rcf_inference.serializer = sagemaker.serializers.CSVSerializer()
rcf_inference.deserializer = sagemaker.deserializers.JSONDeserializer()
# -
sample_data = train_data[:5].values
sample_data
results = rcf_inference.predict(sample_data)
results
# +
import pandas as pd
sigmas = 1
scores = results["scores"]
scores = [score["score"] for score in scores]
series = pd.Series(scores)
score_mean = series.mean()
score_max = series.max()
score_std = series.std()
score_cutoff = score_mean + sigmas*score_std
(score_mean,score_max,score_std,score_cutoff)
# -
anomalies = series[series > score_cutoff ]
anomalies
"{} anomalies detected".format(len(anomalies))
# ## Motor Maintenance
# Now that we can detect anomalies in past data, let's combine that with forecasting for predictive [motor maintenance](mt-motor-maintenance.ipynb).
| mt-motor-anomaly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
base_path="/net/mraid08/export/jafar/UKBioBank/Data/"
data_path=os.path.join(base_path,"ukb29741.csv")
train_val_path=os.path.join(os.path.join(base_path,"ukb29741_a1c_below_65_updates_scoreboard_train_val.csv"))
test_path=os.path.join(os.path.join(base_path,"ukb29741_a1c_below_65_updates_scoreboard_test.csv"))
# ls -lht /net/mraid08/export/jafar/UKBioBank/Data/
data_cols=pd.read_csv(data_path,nrows=0).columns
date_cols=[x for x in data_cols if x.startswith("53-")]
socio_cols=[x for x in data_cols if x.startswith("189-")]
socio_cols
diab_date_cols=[x for x in data_cols if x.startswith("2976-")]
diab_stat_cols=[x for x in data_cols if x.startswith("2443-")]
tot_cols=socio_cols+date_cols+diab_date_cols+diab_stat_cols+["eid"]
tot_cols
df=pd.read_csv(data_path,usecols=tot_cols,index_col="eid")
test_data=pd.read_csv(test_path,usecols=["eid","2443-3.0"],index_col="eid")
train_val_data=pd.read_csv(train_val_path,usecols=["eid","2443-3.0"],index_col="eid")
tot_data=pd.concat([test_data,train_val_data])
tot_data.shape
df.loc[:,['53-0.0','53-1.0','53-2.0','2976-0.0','2976-1.0','2976-2.0',]]=df.loc[
:,['53-0.0','53-1.0','53-2.0','2976-0.0','2976-1.0','2976-2.0',]].apply(pd.to_datetime,errors='coerce')
df.head()
189-0.0tot_data=tot_data.join(df)
tot_data.notna().sum()
# +
#189-0.0, Townsend deprivation index at recruitment, 738-0.0 Average total household income before tax (Pilot)
# -
# # Socio columns:
from scipy.stats import mannwhitneyu as mwu
tot_data["2443-3.0"]
tot_data.columns
r,p=mwu(x=tot_data["189-0.0"].dropna().iloc[:1000],y=tot_data["2443-3.0"].dropna().iloc[:1000])
p
tot_data_sick=tot_data.loc[tot_data["2443-3.0"]==1,"189-0.0"]
tot_data_healthy=tot_data.loc[tot_data["2443-3.0"]==0,"189-0.0"]
fig,ax=plt.subplot(1,1,figsize=(12,12))
tot_data_healthy.describe()
bins = np.linspace(-10, 10, 100)
plt.style.use('default')
fig,ax=plt.subplots(1,1,figsize=(9,6))
ax.hist(tot_data_sick,bins,alpha=0.5,label="sick",density=True)
ax.hist(tot_data_healthy,bins,alpha=0.5,label="healthy",density=True)
ax.set_ylabel("Pupulation density")
ax.set_xlabel("Deprivation index")
ax.vlines(ymin=0,ymax=0.26,x=tot_data_sick.median(),color="blue",linestyles='dashed')
ax.vlines(ymin=0,ymax=0.26,x=tot_data_healthy.median(),color="orange",linestyles='dashed')
ax.set_title("Deprivation index histograms for T2D healthy and sick populations")
plt.legend(loc='upper right')
plt.savefig("/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Revision_runs/final_figures/S2A_Deprivation_index_compare.png")
plt.show()
| UKBB_Socio_Impact.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PUC Minas Pós Graduação em Ciência de Dados e Big Data (2019-2020) Trabalho de Conclusão de Curso
# Aluno: <NAME>
#
# Notebook relativo ao Trabalho de Conclusão de Curso em Ciência de Dados e Big Data.
# O título é Análise de series temporais em vendas de veículos no Brasil, período utilizado de 2011 a 2020.
# Foram utilizados dados aberto do Site www.autoo.com.br
# +
#Importando Bibliotecas utilizadas
import pandas as pd
import warnings
import requests
from bs4 import BeautifulSoup
import warnings
import numpy as np
import seaborn as sns
import statsmodels.api as sm
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
# +
#Função para o Scraping de dados da Web
def buscar_Tabela():
url = 'https://www.autoo.com.br/emplacamentos/'
print(url)
req = requests.get(url)
if req.status_code == 200:
print('Requisição bem sucedida!')
content = req.content
soup = BeautifulSoup(content, 'html.parser')
table = soup.find(name='table')
table_str = str(table)
tabela = 'Emplacamentos'
dfDados = pd.read_html(table_str)[0]
dfDados = dfDados.to_csv(tabela + '.csv', encoding = 'utf-8')
print('Foi criado o arquivo de Dados ' + tabela+ '.csv')
def teste_adf(serie):
'''
Função para exibir o teste Augmented Dickey-Fuller.
'''
result = adfuller(serie)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
# -
#https://www.autoo.com.br/emplacamentos/
#Emplacamentos
buscar_Tabela()
# +
#Alocando os dados do arquivo csv criado a partir da tabela da web
#Tratamento da coluna Ano para o formato Data
dfEmplacamentos = pd.read_csv('Emplacamentos.csv', encoding='utf-8')
#dfEmplacamentos.set_index('Ano',inplace=True)
#dfEplacamentos[['Jan','Fev','Mar','Abr','Mai','Jun','Jul','Ago','Set','Out','Nov','Dez']] = dfEmplacamentos[['Jan','Fev','Mar','Abr','Mai','Jun','Jul','Ago','Set','Out','Nov','Dez']].astype(float)
#dfEmplacamentos['Ano']= dfEmplacamentos['Ano'].dt.year
#dfEmplacamentos['Ano'] = pd.DatetimeIndex(dfEmplacamentos['Ano']).year
dfEmplacamentos.head()
# -
# # Tratamento dos dados
#Verificando o tipo dos Dados
dfEmplacamentos.dtypes
#excluindo a coluna Total e recriando a partir da soma dos valor
dfEmplacamentos.drop('Total', axis=1, inplace=True)
dfEmplacamentos['Total de Vendas'] = dfEmplacamentos.sum(axis=1)
dfEmplacamentos = dfEmplacamentos.head(15)
dfEmplacamentos.dtypes
#Verificando os campos com valores em branco
dfEmplacamentos.isnull().sum()
#Trocando os campos em branco por campos com o valor zero
dfEmplacamentos.fillna(0, inplace=True)
# +
#Selecionando apenas os últimos dez anos
#dfEmplacamentos = dfEmplacamentos.head(10)
# -
#retirando a coluna criada pelo pandas para indexação
dfEmplacamentos.drop('Unnamed: 0', axis=1, inplace=True)
dfEmplacamentos.drop(0,inplace=True)
#Ordenando os dados pela coluna Ano
dfEmplacamentos.sort_values('Ano', ascending=True, inplace=True)
# ## Análise exploratória dos dados
#
# Para orientar a análise dos dados realizei algumas pergundas
#
# 1. Qual foi o melhor ano para as vendas de veículos no Brasil
#
# 2. Qual foi o pior ano para as vendas de veículos no Brasil
# 3. Como foi o comportamento das vendas de veículos por Trimestre
# 4. parametros estatisticos para as vendas anuais e trimestrais
#
#Dados Estatisticos sobre as variáveis
dfEstatisticas = dfEmplacamentos.drop(columns=['Ano','Total de Vendas'],axis=1).describe()
dfEstatisticas
Media = dfEstatisticas.loc['mean']
dfMedia = pd.DataFrame(data=Media,columns=['Mes','Media de Vendas'])
dfMedia = dfEstatisticas.loc['mean']
dfMedia = pd.DataFrame(np.array(Media).reshape(12, 1), columns = list("A"))
#dataset = pd.DataFrame({'Column1': Media[:, 0], 'Column2': data[:, 1]})
#ataset
dfMedia = pd.DataFrame(np.array(Media).reshape(12, 1), columns = list("A"))
dfMedia
#Melhor Ano para as Vendas de Veículos no Brasil
plt.style.use("ggplot")
dfEmplacamentos[['Ano','Total de Vendas']].plot(kind='bar', x='Ano', figsize=(15,8),title='Vendas Anuais', grid='True',alpha = .6, color = "royalblue")
#dfMedia[['Ano','Total de Vendas']].plot(kind='line', figsize=(15,8),title='Vendas Anuais', grid='True')
plt.style.use("ggplot")
plt.show()
# ### Podemos verificar através do gráfico de barras que o pior ano para as vendas de veículos foi o de 2016 e que os melhores anos foram os de 2012 e 2013.
#
# +
#plt.style.use("ggplot")
#plt.figure(figsize = (15, 8))
#dfEmplacamentos[['Ano','Total de Vendas']].hist(bins = 40, ec = "k", alpha = .6, color = "royalblue")
#plt.title('Ano')
#plt.xlabel("Período")
#plt.ylabel("Vendas em Milhares de Veículos")
# -
#Dados Estatisticos sobre as variáveis
dfEmplacamentos.drop(columns=['Ano','Total de Vendas',],axis=1).describe()
#Gráfico de Caixas para análises estatísticas das vendas pelos meses e os outliers(mínimos,máximos,médias, quartis e as discrepâncias) das vendas mensais de 2011 a 2020.
plt.style.use("ggplot")
dfEmplacamentos.drop(columns=['Ano','Total de Vendas'], axis=1).boxplot(color='#4169e1',figsize='15,5')
#dfEmplacamentos
# Através do gráfico de caixas, podemos verificar um outlier no mês de maio, esta discrepância é devido a falta de dados para o mês no ano corrente.
# No mês de abril, verificamos a mínima histórica para as vendas, provavelmente puxada pelo mês no ano de 2020.
# Já a máxima Histórica de vendas é verificada no mês de agosto, provavelmente influenciado pelos anos de 2012 e 2013.
# Podemos visualizar também, que as medianas de vendas se movem 200 e 250 mil vendas mensais.
# Podemos observar que o mês de dezembro possuí a maior dispersão de dados.
#Sabendo que o ano de 2012 foi o melhor ano para as vendas, e que o completo de 2016 foi o pior, qual o trimestre que teve maior influencia nesse resultado
dfEmplacamentos['1ºTrimestre'] = dfEmplacamentos['Jan']+dfEmplacamentos['Fev']+dfEmplacamentos['Mar']
dfEmplacamentos['2ºTrimestre'] = dfEmplacamentos['Abr']+dfEmplacamentos['Mai']+dfEmplacamentos['Jun']
dfEmplacamentos['3ºTrimestre'] = dfEmplacamentos['Jul']+dfEmplacamentos['Ago']+dfEmplacamentos['Set']
dfEmplacamentos['4ºTrimestre'] = dfEmplacamentos['Out']+dfEmplacamentos['Nov']+dfEmplacamentos['Dez']
dfEmplacamentos
#Separando as vendas por trimestres
dfvendas_trimestre = dfEmplacamentos[['Ano','1ºTrimestre','2ºTrimestre','3ºTrimestre','4ºTrimestre']]
#dfvendas_trimestre =dfvendas_trimestre.sort_values(by='Ano' , ascending=False)
dfvendas_trimestre
#Vemos que os dois ultimos trimestres de 2012 foram os responsaveis pelo recorde de vendas de 2012
#Vemos também que a diminuiçcao das vendas de 2016 foi bem dividida ao longo do ano
#dfvendas_trimestre.loc[[4,8]].plot(kind='bar', x='Ano', figsize=(15,8),title='Vendas Anuais', grid='True')
dfvendas_trimestre.plot(kind='line', x='Ano', figsize=(18,5),title='Vendas Anuais', grid='True')
plt.show()
# A partir do segundo semestre de 2015 verifica-se o ínicio da tendência que é interrompida no início de 2020 pelo começo da crise gerada pela Pandemia.
# +
#dfvendas_trimestre["1ºTrimestre"].plot()
# -
##Dados Estatisticos sobre as variáveis
dfvendas_trimestre.describe()
#Gráfico de Caixas para análises estatísticas e outliers(mínimos,máximos,médias, quartis e as discrepâncias) das vendas trimestrais de 2008 a 2020.
dfvendas_trimestre.drop('Ano', axis=1).boxplot(color='#4169e1',figsize='15,5')
sns.heatmap(dfvendas_trimestre.drop('Ano', axis='columns').corr()
)
plt.show()
# Através do mapa de correlação, percebemos forte sasonalidade ao final de cada semestre.
# # Análise da Serie Temporal
# Qual o comportamento da série temporal do total de vendas mês/ano no periodo de 2008 a 2020?
#
# +
# Dividindo as bases
ts_treino = dfEmplacamentos[['Ano','Total de Vendas']].iloc[3:,0:2].copy()
ts_teste = dfEmplacamentos[['Ano','Total de Vendas']].iloc[:3,0:3].copy()
ts_treino
#ts_teste
# -
# Início da base de teste
ts_teste['Ano'] = pd.to_datetime(ts_teste['Ano'], format="%Y")
ts_teste.set_index('Ano', inplace=True)
ts_teste
# Final da base de treino
ts_treino['Ano'] = pd.to_datetime(ts_treino['Ano'], format="%Y")
ts_treino.set_index('Ano',inplace=True)
ts_treino
# Decomposição dos dados da base de treinamento
decomposicao = seasonal_decompose(ts_treino, period=2)
imagem = decomposicao.plot()
# Aplicando a diferenciação na série e removendo dados nulos
ts_treino_diff_1 = ts_treino.diff()
ts_treino_diff_1 = ts_treino_diff_1.dropna()
# Decomposição dos dados da base de treinamento
decomposicao = seasonal_decompose(ts_treino_diff_1, period=3)
imagem = decomposicao.plot()
# Gráfico de autocorrelação
sm.graphics.tsa.plot_acf(ts_treino.values.squeeze(), lags=5)
plt.show()
# Gráfico da autocorrelação parcial
sm.graphics.tsa.plot_pacf(ts_treino.values.squeeze(), lags=4)
plt.show()
# Criando o modelo
modelo = ARIMA(ts_treino, order=(0,1,1), freq=ts_treino.index.inferred_freq)
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Ajustando o modelo de acordo com os parâmetros
modelo_ajustado = modelo.fit(disp=0)
# Exibe dados do modelo, como valores AIC e BIC obtidos.
modelo_ajustado.summary()
# Teste Dickey-Fuller
#Com o p-value 0 podemos considerar a serie estacionaria
teste_adf(ts_treino['Total de Vendas'])
# Gráfico com os erros residuais do modelo ajustado
redisuais = pd.DataFrame(modelo_ajustado.resid)
fig, ax = plt.subplots(1,2)
redisuais.plot(title="Residuais", ax=ax[0])
redisuais.plot(kind='kde', title='Densidade', ax=ax[1])
plt.show()
# Dados atuais x Modelo
modelo_ajustado.plot_predict(dynamic=False)
plt.show()
redisuais.describe()
# + active=""
# # Conclusão
#
# Através da técnica de Scrapping foi obtida a série temporal para o emplacamento mensal dos últimos anos.O emplacamento mensal reflete as vendas mensais de veículos novos no Brasil.
#
# Após as análises realizadas conclui-se que o melhor ano para as vendas de veículos novos no Brasil foi o de 2012, enquanto que o ano de 2016 apresentou o menor índice de vendas.
# Observando o gráfico Trimestral, pode-se perceber que os melhores Trimestres para a venda de veículos são os 4º trimestres. sendo os 1º Trimestres os que apresentam menor índice de vendas. Foi observado também que em 2015 houve uma inversão de vendas ao longo dos trimestres.
# Ao iniciar a análise da série Temporal, constata-se que, para uma análise mais precisa, é necessário a utilização de dataset mais extenso tanto para o treinamento do modelo, decomposição sazonal quanto para a predição dos dados.
# A decomposição sazonal demonstrou que a partir de 2015 as vendas de veículos novos estão em uma tendência de alta e que, no período análisado, foi observado alta sazonalidade entre os anos.
# O mercado de vendas de veículos novos no Brasil já sofria com a queda no período de 2014 a 2016. A partir de 2017 o mercado se preparava para um período de alta nas vendas, porém foi interrompido com a crise gerada pela pandemia da covid-19, marcando assim um novo período de baixa a partir de 2020.
# Para a predição foram testados os modelos de Regressão linear, Random Forest e o ARMA (ARIMA): os dois primeiros apresentaram altos níveis de erros, provavelmente pela baixa quantidade de dados do nosso DataSet; o modelo ARIMA foi escolhido por possibilitar a manipulação de configurações de forma mais prática para o usuário.
#
#
#
#
#
#
# -
| .ipynb_checkpoints/TccFinal-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Testing YOLO with CV2
# Created with example code from [here](https://opencv-tutorial.readthedocs.io/en/latest/yolo/yolo.html)
# YOLO object detection
import cv2 as cv
import numpy as np
import time
# Load names of classes and get random colors
classes = open('Data/RBNR/obj.names').read().strip().split('\n')
np.random.seed(42)
colors = np.random.randint(0, 255, size=(len(classes), 3), dtype='uint8')
# +
# Give the configuration and weight files for the model and load the network.
configPath = 'Data/YOLO/bib_detector/RBNR2_custom-yolov4-tiny-detector.cfg'
weightsPath = 'Data/YOLO/bib_detector/RBNR2_custom-yolov4-tiny-detector_best.weights'
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
# net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
# determine the output layer
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# -
cap = cv.VideoCapture(0)
ret, frame = cap.read()
frame.shape
# +
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# process frame
blob = cv.dnn.blobFromImage(frame, 1/255.0, (416, 416), swapRB=True, crop=False)
# get detections
net.setInput(blob)
outputs = net.forward(ln)
boxes = []
confidences = []
classIDs = []
h, w = frame.shape[:2]
for output in outputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > 0.5:
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
box = [x, y, int(width), int(height)]
boxes.append(box)
confidences.append(float(confidence))
classIDs.append(classID)
indices = cv.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
if len(indices) > 0:
for i in indices.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
color = [int(c) for c in colors[classIDs[i]]]
cv.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(classes[classIDs[i]], confidences[i])
cv.putText(frame, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
# Display the resulting frame
cv.imshow('frame',frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
# -
| scratch/yolo-demo-bib-detector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic Python Objects, Variables, and Operators
# ## Variables
# Everything in python can be considered to be either a variable, an object, or an operator. An object can be everything
# from a number, to a function, to something more complex like a class. For the moment let's not worry too much about
# objects, in fact most of this course is about how to create and use the plethora of objects that exist in the
# python language. Briefly an operator is something that does something to a variable or an object (e.g. =, +, -).
# We'll talk about operators in a moment. Instead for now let's focus on the variable in python.
# A python variable is basically the name we give to an object in a script. Assignment of a variable is simple using
# the '=' operator:
#
x = 42
# A variable can be named anything except one of the built in keywords of python.
# To get a list of these keywords you can use the help function:
help('keywords')
# While you can name a variable anything, some options are better than others. As previously mentioned style is important.
# The PEP 8 standards suggest that variables should be lowercase, with words separated by underscores as necessary to
# improve readability. While PEP 8 isn't necessary, it is a really good habit to get into. It is also very important not
# to give a variable the name of any of the builtin functions and variables, otherwise you cannot use the
# builtin function again in your script.
#
# That said don't panic about knowing every builtin variable, most integrated development editors will raise some sort of warning when you overwrite a builtin name. Also if you try to use a builtin function again it will simply raise an exception, for example:
# now we'll be naughty and overwrite the help function, really don't do this...
help = 42
# if we try to use the help function it will raise an exception
help('keywords')
# if you make this mistake, fix it in your script and reload you interpreter.
#
# Why did this happen? It has to do with how python assigns variables. When we assigned the value of 42 to x above the
# number 42 was created in the computer's memory and the variable *x* was pointed to that memory via a unique object ID.
# python has a built in function id(), which allows us to see the this ID. This is helpful as we can see how python
# handles memory. Take a look at the example below:
x = 42
id(x)
y = x
id(y)
x = 15
y
id(x)
id(y)
# Note that when we set *y* = *x* all it did was point *y* to the same bit of computer memory that *x* is pointing to. When
# we re-assigned *x* (to 15) it points at a different part of memory leaving *y* unchanged with a value of 42. So when we
# overwrote the help function above, all we did was point the variable *help* to a new object (42)
#
# When an object is no longer in use (e.g. no variables are pointing to it) a part of python called the garbage collector
# will remove the object from memory so your computer doesn't have too much on it's mind.
# ## Numbers: Integers and Floats
# There are three main ways to portray numeric values in python - integers, floats, and complex.
#
# An Integers is as you would expect, a number without a decimal point (e.g. 1, 2, 3, or -5000).
# Floats on the other hand are numbers with a decimal point. We won't really talk about complex numbers here, but it is
# useful to know that python can handle complex numbers.
type(1)
type(3.14159)
type(3.)
# the type function in python tells you what type a given object or variable is.
#
# There are a number of operations that you can do with numeric values:
x = 2
y = 3.5
z = -5
x + y # the sum of x and y
x - y # the difference of x and y
x * z # the product of x and z
z / x # the quotient of z and x (2)
z // x # the floored quotient of z and x (3)
z % x # the remainder of z / x
abs(z) # the absolute value of z
int(y) # the integer of y rounded down
float(x) # x converted to a float
z ** x # z to the power of x
x = 42
x += 60 # add 60 to x and assign the new value back to x
x
x = 10
x *= 10 # multiply x by 10 and assign the new value back to x
x
# ## Boolean
# A boolean value in python is either True or False (case sensitive). As with numeric data there a several basic
# operations that can be preformed on boolean data.
True or False
True or True
True and True
True and False
not True
not False
all([True, True, False]) # this uses a list, which will be described in the next section
any ([True, False, False])
# order of operations also applies to boolean operations, so:
True and (True or False)
False or (True and False)
# Boolean values can be converted to integers and floats where True = 1 and False = 0
True == 1
False == 0
# ## Strings
# Strings are made up of different characters (e.g. a, b, c, %, &, ?, etc.). Every sentence ever written can be
# considered as a string. You can make strings in a number of ways by wrapping characters ' and " so for example:
x = 'my string'
y = "also my string"
z = "my string can contain quotes of the other type 'like this one'"
x
y
z
x = """
triple " or ' can define a string that splits
a number of lines
like this
"""
x # \n is the symbol for new line. \' is the symbol for '
x = '5'
x
int(x) # and number stings can be converted to floats and ints
float(x)
x = 'five'
int(x) # though python isn't smart enough to convert everything to a numeric value and will raise an exception
# ## The print function
# Up to now in order to see the contents of a variable we have simply been calling the variable. This works fine in an
# interactive python environment, but when running a python script from start to finish you need the print function.
# The print function is easy to use and will simply print the variable, so for instance:
x = 'some string'
print(x)
print(1,1,2,2,3)
| jupyter/basic-python/1_basic_python_objects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 購物籃分析 market basket analysis
# ## 購物籃分析簡介
# **購物籃分析(market basket analysis)** 又稱 **關連分析(association analysis)** ,其目的是從大量的交易資料中,探勘出隱藏在資料間具有相關性的關連規則(association rules)。這些關連規則表示消費者通常買什麼,哪些商品經常會被一起購買。購物籃分析最經典的就是啤酒與尿布的例子。
# ### 購物籃分析的概念
# 購物籃分析的演算概念主要為兩個機率統計量的計算,分別為 **支持度(support)** 和 **信賴度(confidence)** 。以下用一個例子來說明支持度和信賴度的意義與計算方式。如下圖所示,假定所有的發票共有2000筆(以下以$C$代表),包含A產品的發票有1250(750+500)筆(以$C_{A}$代表),包含B產品的發票則有1000(500+500)筆(以$C_{B}$代表),同時包含A產品與B產品的發票有500筆。
# 
# 我們想計算「如果購買A產品時也一起購買B產品」(購物籃分析將這條關連規則表示成 ${A}\Rightarrow{B}$ )時的支持度與信賴度:
# - 支持度(Support) :
# 在所有的發票中,同時購買A、B產品的次數比例 $Pr(A, B)=\frac{C_{A, B}}{C}=\frac{500}{2000}=0.25$
# 如果支持度大,表示顧客很有可能同時購買A、B產品。
# - 信賴度(Confidence) :
# 在購買A產品的發票中,同時也購買B產品的次數比例 $Pr(B|A)=\frac{Pr(A,B)}{Pr(A)}=\frac{\frac{C_{A, B}}{C}}{\frac{C_{A}}{C}}=\frac{\frac{500}{2000}}{\frac{1250}{2000}}=0.4$
# 如果信賴度大,表示顧客在購買A產品時也很有可能同時購買B產品,但反之,購買B產品時並不一定同時購買A產品。
# 在進行購物籃分析時,需要先設定最小支持度與最小信賴度。如果所設定的最小支持度與最小信賴度太低,則會產生太多關連規則,造成決策上的干擾。反之,最小支持度與最小信賴度的設定太高則可能會面臨關連規則找出太少而造成難以應用的窘境。
# 一個強關聯規則,通常支持度和信賴度的值都很高。但支持度和信賴度值高的規則,卻不一定代表這條規則所指的事件彼此間就一定存在著高相關性。同時還需檢查**增益率(lift)**的值是否大於1。
#
# - 當增益度的值>1, 則A與B間有正向關係
# - 當增益度的值=1, 則A與B間沒有關係
# - 當增益度的值<1, 則A與B間為負向關係
# 增益率的計算方式:$\frac{Pr(B|A)}{Pr(B)}=\frac{Pr(A,B)}{Pr(A){\times}Pr(B)}$
# ## 購物籃分析的應用
# 以下我們將利用python的mlxtend套件分析Online Retail.xlsx資料集。如同前面幾次課程,這個資料集包括發票編號(InvoiceNo)、貨品編號(StockCode)、描述(Description)、數量(Quantity)、發票日期(InvoiceDate)、單價(UnitPrice)、顧客識別號(CustomerID)、國別(Country)等變數欄位。我們以同一個發票編號的發票做為一次交易,分析哪些貨品比較可能會一起購買。
# ### 載入套件
# +
# 載入所需套件
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
# +
'''
圖形中有中文字型的問題
參考
https://codertw.com/%E7%A8%8B%E5%BC%8F%E8%AA%9E%E8%A8%80/359974/
'''
from matplotlib.font_manager import FontProperties
han_font = FontProperties(fname=r"c:/windows/fonts/msjh.ttc", size=14) # 中文字形
# -
'''
設計圖形呈現的外觀風格
'''
sns.set(style="whitegrid")
# 關聯分析
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
# ### 讀入資料
# 讀入資料檔
df = pd.read_excel('Online Retail.xlsx')
# ### 資料清理
# 去除CustomerID沒有資料的紀錄
df = df.dropna(subset=['CustomerID'])
# +
from datetime import date
df = df.assign(PurchaseDate=df.InvoiceDate.apply(lambda x: x.date()))
# 取出2010-12-09到2011-12-09一年之間的資料
df = df[df.PurchaseDate>=date(2010, 12, 9)]
# -
#取出購買紀錄(不包含取消紀錄)
df = df[df.Quantity>0]
# 清除品項描述欄位中前後多餘的空白
df['Description'] = df['Description'].str.strip()
# 去除品項描述欄位中資料為郵資(POSTAGE)的紀錄
df = df.loc[df.Description!="POSTAGE"]
# ### 查看資料
df.head()
# ## 分析不同國家的情形
df.groupby("Country").InvoiceNo.nunique().reset_index().sort_values("InvoiceNo", ascending=False)
# 德國
print("德國共有{}筆記錄,{}筆發票,{}項商品".
format(len(df[df.Country=="Germany"]),
len(set(df[df.Country=="Germany"].InvoiceNo)),
len(set(df[df.Country=="Germany"].Description))))
# 法國
print("法國共有{}筆記錄,{}筆發票,{}項商品".
format(len(df[df.Country=="France"]),
len(set(df[df.Country=="France"].InvoiceNo)),
len(set(df[df.Country=="France"].Description))))
national_purchase = df.groupby(['Country', 'InvoiceNo', 'Description']).Quantity.sum().reset_index()
national_purchase.head()
national_purchase.Quantity = 1
# +
### 德國的購物籃分析
# -
Germany_inv_items = national_purchase[national_purchase.Country=="Germany"]
Germany_inv_items.head()
Germany_inv_items = Germany_inv_items.pivot(index='InvoiceNo',
columns='Description',
values='Quantity')
Germany_inv_items.head()
Germany_inv_items = Germany_inv_items.fillna(0)
Germany_inv_items.head()
# +
frequent_itemsets = apriori(Germany_inv_items, min_support=0.05, use_colnames=True)
frequent_itemsets.head()
# -
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1)
rules.head()
rules[(rules.lift>=1) & (rules.confidence>0.6)]
# +
## Total
# -
inv_items = national_purchase.pivot(index='InvoiceNo',
columns='Description',
values='Quantity')
inv_items.head()
inv_items = inv_items.fillna(0)
inv_items.head()
# +
frequent_itemsets = apriori(inv_items, min_support=0.03, use_colnames=True)
frequent_itemsets
# -
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1)
rules.head()
print("Remaining orders with 2+ items: {:11d}".format(len(qualifying_orders)))
print("Remaining order_item: {:21d}".format(len(order_item)))
gr2 = gr1.groupby("product_id")\
.order_id.nunique().reset_index()\
.sort_values("product_id", ascending=True)
gr2 = gr2.assign(totals=gr2.product_id*gr2.order_id)
total_sum = gr2.totals.sum()
gr2 = gr2.assign(percent=gr2.totals/total_sum)
gr2 = gr2.assign(cum_percent=np.around(gr2.percent.cumsum()*100, decimals=2))
# +
'''
選用線圖呈現訂單上商品數量的分布情形。
'''
plt.figure(figsize=[10, 5]) #圖的大小
ax = sns.lineplot(x="product_id", y="cum_percent", data=gr2)
ax.set_xlabel("訂單上商品數量", fontproperties=han_font) # x軸的標題,字型選用中文字型
ax.set_ylabel("銷售商品累積百分比", fontproperties=han_font)
ax.set_title('訂單上商品數量的分布情形', fontproperties=han_font, fontsize=18)
# +
# 熱銷商品
gr3 = order_products_prior.groupby("product_name")\
.agg({'reordered': 'count'}).reset_index().sort_values("reordered", ascending=False)
gr3
# -
| 14-market-basket-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: learn-env
# language: python
# name: learn-env
# ---
# # Plan of Attack
#
# Attributes of content to consider - runtime, genre, actors cast, type of title. We always want to correlate to box office gross first. Second indicator? Budget? Profitability?
#
# professional_names_df - is our basic crew information. Actor information (living or dead) as well as known for titles in IMDB naming convention.
#
# title_key_df is movie title_ID (use as key to translate across dataframes?) We also know 'region' but no indication of what that is, and language.
#
# imdb_genres_df is movies by runtime and genre, also year it was released. This could be very useful. genre_list has a full list of all categories, lots of null values
#
# box_gross_df is movies by gross from 2010-2018
#
# crew_df is movie set job I think?
#
# directors_df is directors and writers, but by IMDB ID - need to convert
#
# imdb_ratings_df is audience reviews and number of reviews by movie - added primary titles. No num values.
#
# rt_details_df is Rotten Tomatoes basic info about movies - includes genres as well
#
# rt_reviews_df is Rotten Tomatoes reviews by movie - includes fresh/rotten - 9 duplicates
#
# tmdb_df is TMDB ratings on 25000 movies - looks super helpful
#
# budget_df is a list of movie budgets and grosses. Combine with df_4 to create the best list of Y values for future visualizations
#
# ## Current Data Points
# - Domestic Box Office Gross
# - Foreign Box Office Gross
# - Actors by movie
# - Directors by movie
# - Producers by movie
# - Writers by movie
# - Movie Runtime
# - Studio
# - IMDB ratings
# - RT freshness
# - TMDB Popularity
# - TMDB vote count, vote average
# - Production budget vs. gross
#
# ## Initial Questions
# - What genres produce the most popularity AND the most positive audience votes?
# - What directors/writers/producers have the best ROI?
# - What actors have the most popularity and/or box office gross?
# - Is there an ideal movie budget to maximize ROI by genre?
#
#
# ## To Do By Question
#
# #### General Data Cleaning
# - Create DataFrame with following columns:
# - Movie Title
# - IMDB Code
# - Directors
# - Writers
# - Producers
# - IMDB vote count
# - IMDB vote score
# - TMDB vote count
# - TMDB vote score
# - RT Freshness
# - Popularity
# - Domestic Gross
# - Budget
# - Foreign Gross
# - Genres
# - Studio?
# - (Top 3 Actors?)
#
# #### What genres produce the most popularity AND the most positive audience votes?
#
# - df_11 vs. df_10 - popularity and budget
# - df_11 w/ df_2 w/ df_7 - IMDB votes
# - Parse out Genres - map function?
# - Parse out directors/writers/producers
# - Actors???
# - turn all budget info into float64 in terms of millions
#
# +
#Isolate and clean Popularity
#Link popularity, RT freshness, IMDB vount data
# -
# Import required libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from difflib import SequenceMatcher
# %matplotlib inline
# +
# Import all data files. Key to what's in files included in README at top of notebook.
professional_names_df = pd.read_csv('zippedData/imdb.name.basics.csv.gz')
title_key_df = pd.read_csv('zippedData/imdb.title.akas.csv.gz')
imdb_genres_df = pd.read_csv('zippedData/imdb.title.basics.csv.gz')
box_gross_df = pd.read_csv('zippedData/bom.movie_gross.csv.gz')
crew_df = pd.read_csv('zippedData/imdb.title.principals.csv.gz')
directors_df = pd.read_csv('zippedData/imdb.title.crew.csv.gz')
imdb_ratings_df = pd.read_csv('zippedData/imdb.title.ratings.csv.gz')
rt_details_df = pd.read_csv('zippedData/rt.movie_info.tsv.gz',delimiter='\t')
rt_reviews_df = pd.read_csv('zippedData/rt.reviews.tsv.gz', compression='gzip', delimiter='\t', encoding='iso-8859-1')
tmdb_df = pd.read_csv('zippedData/tmdb.movies.csv.gz')
budget_df = pd.read_csv('zippedData/tn.movie_budgets.csv.gz')
# -
def clean_title(title):
'''
Takes a string and normalizes it by removing excess symbols that could be ambiguous
Also removes any dates attached to titles. These will be added back in during data cleaning.
'''
c_title = ''
l_title = title.lower()
word_list = l_title.split()
for word in word_list:
if word[0] == '(':
break
else:
word = word.replace('?','')
word = word.replace('/','')
word = word.replace('!','')
word = word.replace('–','')
word = word.replace('-','')
word = word.replace('&','and')
word = word.replace('ep.','episode')
word = word.replace(':','')
word = word.replace(',','')
word = word.replace('.','')
word = word.replace("'","")
word = word.replace("`",'')
c_title += word.title()
return c_title
def separate_values(string):
'''
Splits any string by a comma and then returns a list of values
'''
if type(string) == str:
value_list = string.split(',')
else:
return string
return value_list
def clean_money(string):
'''
Takes money objects and turns it into a float
'''
if type(string) == str:
answer = string.replace('$','')
answer = answer.replace(',','')
answer = float(answer)
else:
return string
return answer
# +
# Creates the IMDB table to add to other tables. Includes title_id for merging as well as genres
imdb_genres_df.rename(columns={'tconst':'title_id'},inplace=True)
imdb_df = title_key_df.merge(imdb_genres_df,how='left',on='title_id')
imdb_df = imdb_df.loc[:,['title_id','title','primary_title','start_year','runtime_minutes','genres']]
imdb_df['clean_title'] = imdb_df.title.map(lambda x: clean_title(x))
#Renames columns to clean. Splits genres into a list that can be parsed later.
imdb_df.rename(columns={'start_year':'year','runtime_minutes':'runtime'},inplace=True) #Change year into string?
imdb_df['clean_title'] = imdb_df['clean_title'] + " " + imdb_df['year'].astype(str)
# +
# #Join box office gross to IMDB t_const - #Start with 3387 - goes to 2523
# box_gross_df['clean_title'] = box_gross_df.title.map(lambda x: clean_title(x))
# box_gross_df['clean_title'] = box_gross_df['clean_title'] box_gross_df['year'].astype(str),sep=' ')
# box_gross_tconst_df = box_gross_df.merge(imdb_df, how='left',on='clean_title')
# box_gross_tconst_df.drop_duplicates(inplace=True)
# box_gross_tconst_df.dropna(subset=['title_id'], inplace=True)
# # box_gross_tconst_df.drop_duplicates(subset=['title_id'], inplace=True)
# # box_gross_tconst_df.drop_duplicates(subset=['title_x'], inplace=True)
# box_gross_tconst_df.drop(columns=['title_y','year_y'], inplace = True)
# box_gross_tconst_df.rename(columns={'title_x':'title','year_x':'year'},inplace=True)
# #Fixes foreign gross from string to float
# box_gross_tconst_df.foreign_gross = box_gross_tconst_df.foreign_gross.map(lambda x: clean_money(x) if type(x) == str else x)
# box_gross_tconst_df.foreign_gross.astype('float64')
# box_gross_final = box_gross_tconst_df
# +
# def similar(a, b):
# ratio = SequenceMatcher(None, a, b).ratio()
# return ratio >= .9
# +
#Joins budget table with IMDB t-const - Goes from 5782 records to 4069
budget_df.rename(columns={'movie':'title'},inplace=True)
budget_df['year'] = budget_df.release_date.map(lambda x: x.split()[-1])
budget_df['clean_title'] = budget_df.title.map(lambda x: clean_title(x))
budget_df['clean_title'] = budget_df['clean_title'] + ' ' + budget_df['year'].astype(str)
budget_tconst_df = budget_df.merge(imdb_df, how='left',on='clean_title')
budget_tconst_df.drop_duplicates(inplace=True)
budget_tconst_df.drop(columns=['title_y','year_y'],inplace = True)
budget_tconst_df.rename(columns={'title_x':'title','year_x':'year'},inplace=True)
# budget_tconst_df.drop_duplicates(subset='title_id',inplace = True)
#Turns money columns into floats from objects
budget_tconst_df['production_budget'] = budget_tconst_df.production_budget.map(lambda x: clean_money(x))
budget_tconst_df['domestic_gross'] = budget_tconst_df.domestic_gross.map(lambda x: clean_money(x))
budget_tconst_df['worldwide_gross'] = budget_tconst_df.worldwide_gross.map(lambda x: clean_money(x))
final_budget_df = budget_tconst_df
# +
#Creates an ROI column for films and creates a column with a boolean value based on whether or not it's profitable
final_budget_df['ROI'] = final_budget_df['worldwide_gross'] + final_budget_df['domestic_gross'] - final_budget_df['production_budget']
final_budget_df['ROI%'] = final_budget_df['ROI']/final_budget_df['production_budget']
budget_ROI_df = final_budget_df.sort_values(by='ROI',ascending=False)
budget_ROI_df['Profitable'] = budget_ROI_df.ROI.map(lambda x: x>0)
x_values = list(budget_ROI_df.title[:20])
y_values = list(budget_ROI_df.ROI[:20])
plt.figure(figsize=(10,7))
sns.barplot(y=x_values, x=y_values, data = final_budget_df, orient='h')
plt.xlabel('ROI in Dollars')
plt.title('Movie by ROI');
# +
final_budget_df['ROI%'] = final_budget_df['ROI']/final_budget_df['production_budget']
budget_ROI_df = budget_ROI_df.sort_values(by='ROI%',ascending=False)
x_values = list(budget_ROI_df.title[:20])
y_values = list(budget_ROI_df['ROI%'][:20])
plt.figure(figsize=(10,7))
sns.barplot(y=x_values, x=y_values, data = final_budget_df, orient='h')
plt.xlabel('%ROI Compared to Production Budget')
plt.title('Movie by ROI')
# +
#Joins the TMDB table, which includes information regarding popularity by film.
tmdb_df['clean_title'] = tmdb_df.title.map(lambda x: clean_title(x))
tmdb_df['year'] = tmdb_df.release_date.map(lambda x: x.split('-')[0])
tmdb_df['clean_title'] = tmdb_df['clean_title'] + ' ' + tmdb_df['year'].astype(str)
tmdb_tconst_df = tmdb_df.merge(imdb_df,how='inner',on='clean_title')
tmdb_tconst_df.drop_duplicates(subset='title_id',inplace=True)
tmdb_clean_df = tmdb_tconst_df.loc[:, ['popularity','vote_average','vote_count','clean_title','title_id']]
# -
#Creates Super_df that contains ROI, popularity, IMDB rating and vote counts.
imdb_ratings_df.rename(columns={'tconst':'title_id'},inplace=True)
super_df = budget_ROI_df.merge(tmdb_clean_df,how='left',on='title_id')
super_df = super_df.merge(imdb_ratings_df,how='left',on='title_id')
super_df.drop(columns=['clean_title_y'],inplace=True)
# +
#Creates Linear Regression showing relationship between production budget and popularity
plt.figure(figsize=(7,7))
sns.regplot(x=super_df.production_budget, y=super_df.popularity, data=super_df)
plt.title('Production Budget vs. Popularity');
plt.savefig('budget_linear.png')
# +
#Creates list of genres from the IMDB dataframes
genre_list = list(super_df.genres)
genre_list2 = []
#iterates through the list and adds the genres to a set to get a set of unique values
for item in genre_list:
if type(item) == str:
new_list = item.split(',')
for genre in new_list:
genre_list2.append(genre)
genre_set = set(genre_list2)
# -
# drops 36 films that do not have genres
super_df.dropna(subset=['genres'],inplace=True)
# creates a column for each genre populated with a boolean value if it is in the genre list for that movie.
for genre in list(genre_set):
super_df[f'{genre}'] = super_df.genres.map(lambda x: genre in x)
# splits the genres into a list, counts the list and adds a column with the number of genres in list
super_df['genres'] = super_df.genres.map(lambda x: separate_values(x))
super_df['genre_count'] = super_df.genres.map(lambda x: len(x))
# Groups by number of genres and giveds the mean popularity, rating and vote average
super_df[['ROI','popularity','averagerating','vote_average','genre_count']].groupby(by=['genre_count']).mean().sort_values(by='ROI')
# Groups by number of genres and giveds the mean popularity, rating and vote average
super_df[['ROI','popularity','averagerating','vote_average','genre_count']].groupby(by=['genre_count']).median().sort_values(by='ROI')
#Creates separate dataframes for each genre count category for cleaning and manipulation
single_genre = super_df[['title','ROI','popularity','averagerating','vote_average','genre_count']].loc[super_df.genre_count == 1].sort_values(by=['ROI'],ascending=False)
double_genre = super_df[['title','ROI','popularity','averagerating','vote_average','genre_count']].loc[super_df.genre_count == 2].sort_values(by=['ROI'],ascending=False)
triple_genre = super_df[['title','ROI','popularity','averagerating','vote_average','genre_count']].loc[super_df.genre_count == 3].sort_values(by=['ROI'],ascending=False)
# +
#Takes the top 50 entries per category
single_genre.drop_duplicates(subset=['title'],inplace=True)
single_short = single_genre.iloc[:50]
double_genre.drop_duplicates(subset=['title'],inplace=True)
double_short = double_genre.iloc[:50]
triple_genre.drop_duplicates(subset=['title'],inplace=True)
triple_short = triple_genre.iloc[:50]
# -
#Joins the dataframes into a single dataframe with 50 entries per genre grouping
genre_df = pd.concat([single_short,double_short,triple_short])
# +
plt.figure(figsize=(7,7))
sns.scatterplot(x=genre_df.averagerating,y=genre_df.ROI,data=genre_df,hue='genre_count')
plt.title('IMDB Rating vs. ROI by Genre Count');
# +
plt.figure(figsize=(7,7))
sns.scatterplot(x=genre_df.popularity,y=genre_df.ROI,data=genre_df,hue='genre_count')
plt.title('Popularity vs. ROI by # of Genres');
# +
plt.figure(figsize=(7,7))
sns.scatterplot(x=genre_df.vote_average,y=genre_df.ROI,data=genre_df,hue='genre_count')
plt.title('IMDB Vote Average vs ROI Split by Cross-Genre')
plt.xlabel('IMDB Rating')
plt.ylabel('Return on Investment in $');
# -
# Used this to figure out 25% quartile
super_df.vote_count.describe()
# Creates a version of the super_df but with only movies having both more than 500 IMDB votes and 20000 TMDB votes
super_vote_df = super_df.loc[(super_df.vote_count > 500) & (super_df.numvotes > 20000)]
super_vote_df.rename(columns={'vote_average':'IMDB Quality','averagerating':'TMDB Quality','clean_title_x':'clean_title'},inplace=True)
def genre_counts(dataframe,column):
'''
Takes in a dataframe and a column. Orders the DF by the values in the column in question, cuts in down
to the top 100 entries. Counts how many of the movies have each genre and then plots a horizontal
bar graph to show top genres by the column metric.
'''
df = dataframe.sort_values(by=[column],ascending=False)
df = df.iloc[:100]
df = pd.DataFrame(df[list(genre_set)].sum())
df.columns = ['count']
df.sort_values(by='count',ascending=False,inplace=True)
plt.figure(figsize=(7,7))
x_values = list(df['count'])
y_values = list(df.index)
plt.title(f'Genres ordered by {column}')
plt.xlabel('Number of Movies')
plt.ylabel('Genres')
return sns.barplot(y=y_values,x=x_values, data=df, orient='h');
genre_counts(super_vote_df,'ROI');
genre_counts(super_vote_df,'IMDB Quality');
genre_counts(super_vote_df,'TMDB Quality');
plt.savefig('TMDB_quality.png')
genre_counts(super_vote_df,'popularity');
plt.savefig('popularity.png')
genre_counts(super_vote_df,'ROI%');
plt.savefig('cheap.png')
imdb_df.head()
crew_df.head()
crew_df.rename(columns={'tconst':'title_id'},inplace=True)
crew_df = crew_df.loc[(crew_df.category == 'actor') | (crew_df.category == 'actress') | (crew_df.category == 'producer') | (crew_df.category == 'writer') | (crew_df.category == 'director')]
crew_df = crew_df.merge(professional_names_df, how='left', on='nconst')
crew_df.drop(columns=['characters','job','nconst','ordering','birth_year','death_year','primary_profession','known_for_titles'],inplace=True)
super_crew_df = super_vote_df.merge(crew_df,how='left',on='title_id')
#Builds new Dataframe using the IMDB data
clean_imdb_df = imdb_df.loc[imdb_df.primary_title == imdb_df.title]
clean_imdb_df = clean_imdb_df.merge(imdb_ratings_df, how='left', on='title_id')
clean_imdb_df.drop_duplicates(inplace=True)
directors_df.rename(columns={'tconst':'title_id'},inplace=True)
clean_imdb_df = clean_imdb_df.merge(directors_df, how='left', on='title_id')
# +
#Creates a dataframe from the Crew Dataframe that only includes actors, actresses, producers, writers, directors
crew_df.rename(columns={'tconst':'title_id'},inplace=True)
non_directors_df = crew_df.loc[(crew_df.category == 'actor') | (crew_df.category == 'actress') | (crew_df.category == 'producer') | (crew_df.category == 'writer') | (crew_df.category == 'director')]
#Combines IMDB Dataframe with the slimmed Crew dataframe and only focuses on movies with more than 500 votes on IMDB
clean_imdb_df = non_directors_df.merge(clean_imdb_df, how='left', on='title_id')
clean_imdb_df = clean_imdb_df.loc[clean_imdb_df.numvotes > 500]
#Combines Dataframes to add names to crew members
clean_imdb_df = clean_imdb_df.merge(professional_names_df, how='left', on='nconst')
#Drops useless columns
clean_imdb_df = clean_imdb_df.drop(columns=['job','characters','ordering','title','birth_year','death_year','primary_profession','known_for_titles'])
#Combines Dataframe with the TMDB dataframe that has already been cleaned
clean_imdb_df = clean_imdb_df.merge(tmdb_clean_df, how='left', on='title_id')
# -
clean_imdb_df.groupby(by=clean_imdb_df['primary_name']).mean().sort_values(by='averagerating',ascending=False)
clean_imdb_df.
#Creates different dataframes for each of the major crew types for further analysis. Treats actors and actresses equally
actors = super_crew_df.loc[(super_crew_df.category == 'actor') | (super_crew_df.category == 'actress')]
directors = super_crew_df.loc[super_crew_df.category == 'director']
writers = super_crew_df.loc[super_crew_df.category == 'writer']
producers = super_crew_df.loc[super_crew_df.category == 'producer']
# +
#Add Formatting
def top_crew(dataframe,column):
'''
Takes a crew dataframe dictionary and finds the mean of the column metric in question and then plots a bar graph
of the top fifty entries
'''
# name = dataframe['name']
df = dataframe['df'].groupby(by=dataframe['df']['primary_name']).mean().sort_values(by=f'{column}',ascending=False)
df = df.iloc[:30]
plt.figure(figsize=(5,10))
x_values = list(df[f'{column}'])
y_values = list(df.index)
plt.title(f'{name} by {column}')
return sns.barplot(x = x_values, y = y_values, data = df)
# -
top_crew(directors,'ROI%');
plt.savefig('cheap_crew.png')
top_crew(writers_df,'ROI%');
# +
# ADD ROI TO clean_imdb_df
# Add columns of True/False for top pop etc.
# Rename TMDB rating + IMDB Rating
# Make sure to include movie count minimum in the recommendations
# -
def create_top_df(list_of_dfs,column_name):
'''
Takes in a list of dataframe dictionaries of the format {'df':dataframe,'name':'name of dataframe'} as well as
a column in question. It will produce a dataframe of the top thirty Actors, Directors, Producers and Writers of
that particular metric.
'''
final_list = []
for dataframe in list_of_dfs:
name = dataframe['name']
df = dataframe['df'].groupby(by=dataframe['df']['primary_name']).mean().sort_values(by=f'{column_name}',ascending=False)
df = df.iloc[:30]
df.reset_index(inplace=True)
df = df.loc[:, ['primary_name']]
df.rename(columns={'primary_name':f'{name}'},inplace=True)
final_list.append(df)
final_df = final_list[0].join(final_list[1:])
final_df.rename_axis(f"By {column_name.title()}",inplace = True)
return final_df
df_list = [
{'df':actors,'name':'Actors','axes':[0,0]},
{'df': directors, 'name': 'Directors','axes':[0,1]},
{'df': writers,'name':'Writers','axes':[1,0]},
{'df': producers,'name':'Producers','axes':[1,1]}
]
df = actors_mf_df.groupby(by=actors_mf_df['primary_name']).mean().sort_values(by='popularity',ascending=False)
df = df.iloc[:30]
df.reset_index(inplace=True)
df = df.loc[:, ['primary_name']]
df.rename(columns={'primary_name':'actors'.title()},inplace=True)
actors.head()
df_2 = directors2_df.groupby(by=directors2_df['primary_name']).mean().sort_values(by='popularity',ascending=False)
df_2 = df_2.iloc[:30]
df_2.reset_index(inplace=True)
df_2 = df_2.loc[:, ['primary_name']]
df_2.rename(columns={'primary_name':'directors'.title()},inplace=True)
df_3 = writers_df.groupby(by=writers_df['primary_name']).mean().sort_values(by='popularity',ascending=False)
df_3 = df_3.iloc[:30]
df_3.reset_index(inplace=True)
df_3 = df_3.loc[:, ['primary_name']]
df_3.rename(columns={'primary_name':'writers'.title()},inplace=True)
final_df = df.join([df_2,df_3])
final_df.rename_axis("By Popularity")
create_top_df(df_list,'popularity')
create_top_df(df_list,'ROI%')
create_top_df(df_list, 'TMDB Quality')
| Scratch Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Zwbww_v00Txx"
# #**Visualising Rocket's Simulation Results in Python**
#
# ---
# 2nd June 2021
#
# Author : [<NAME>](https://www.linkedin.com/in/naimish-balaji-a6182b180/)
#
#
# [Repo](https://github.com/Naimish240/rocket-lander)
#
# ---
# Libraries Used:
# - mat73
# - Pandas
# - Numpy
# - MatPlotLib
#
# ---
#
# This is a companion notebook to briefly summarise the results of the Simulink Simulation. The variables logged are as follows:
#
# - h (height at which the rocket is flying at)
# - x (downrange position of the rocket)
# - v (velocity of the rocket)
# - tht (pitch angle of the rocket)
# - t (time)
#
#
# **NOTE**:
#
# The flight was broken up into two parts, one being powered liftoff and other being powered descent. The PID controller was only active during the powered descent phase.
# + [markdown] id="5oizqSeN1axx"
# # Setup
# + [markdown] id="uGQUw-yN0UaI"
# First, we need to install mat73 in order to read the data from Simulink.
# + colab={"base_uri": "https://localhost:8080/"} id="kKcPPc3dvk8I" outputId="4e48fdd0-77f6-400b-9c0f-7a8a4fe7d072"
# !pip install mat73
# + [markdown] id="jTqkrHvw0U4x"
# As I had already uploaded the simulation results to a Google Drive folder, we are downloading the same here.
# + colab={"base_uri": "https://localhost:8080/"} id="V7j8T6msyErq" outputId="38e0224b-347f-43a3-a156-5ee946a311fb"
# !gdown --id 1PIhZC3UP20AB7CAROLkG0wDm9btuGUAk
# !unzip data.zip
# !ls
# + [markdown] id="d0N-F9HU0WP-"
# Now that we've downloaded and extracted the required files, we can continue ahead.
# + [markdown] id="C-22qrre1eJ8"
# # Loading the Files
#
# Before we can start the visualisations, we need to load the files into memory. We shall do that here.
# + id="l1CSzOMgvtKQ"
# Import statements
import mat73
import pandas as pd
import matplotlib.pyplot as plt
# + id="AvtpVrz7yyRN"
# Load four variables
h = mat73.loadmat('./h.mat')
t = mat73.loadmat('./timer.mat')
x = mat73.loadmat('./x.mat')
v = mat73.loadmat('./v.mat')
tht = mat73.loadmat('./theta.mat')
# + colab={"base_uri": "https://localhost:8080/"} id="tK2JJFBfzU5e" outputId="6a22e8f7-573f-45cd-e5aa-880ed08b68f3"
# Verify dict keys
print(h.keys())
print(t.keys())
print(x.keys())
print(v.keys())
print(tht.keys())
# + id="is0rluRozYKw"
# Index '0' of all arrays is the timestamp
# Index '1' stores the values we want
h_vals = h['h'][1]
t_vals = t['t'][1]
x_vals = x['x'][1]
v_vals = v['v'][1]
tht_vals = tht['tht'][1]
# + id="bwfZDu4uzZiR"
# Create DataFrame
df = pd.DataFrame()
# Insert into dataframe
df['t'] = t_vals
df['x'] = x_vals
df['h'] = h_vals
df['v'] = v_vals
df['tht'] = tht_vals
# + [markdown] id="eK6ScH6a44cN"
# ## Check if values are loaded properly
# + colab={"base_uri": "https://localhost:8080/", "height": 425} id="SdzO0vZl44Ok" outputId="13578583-d694-4326-942c-45cef685d218"
# Sanity check dataframe
print(df.info())
df.head()
# + [markdown] id="nrCt_DZU2clL"
# From this, we can see that the data has been loaded into memory successfully.
# + [markdown] id="AYyaXO5X1ic6"
# # Understanding the Data
# We briefly look at the logged data to make preliminary inferences.
# + [markdown] id="mkuNvBmt3keA"
# ## Initial Flight Conditions
# + colab={"base_uri": "https://localhost:8080/"} id="EQ39n_5I3MQX" outputId="20c40b4e-9614-498f-bb2c-f10d7d3797a6"
# Initial Conditions
df.iloc[0, :]
# + [markdown] id="zfxyuC5m3ivo"
# From this, we can see that the rocket started with an initial pitch angle of 1.57 rad, which implies the rocket started up standing upright.
# + [markdown] id="KkAh2IqO3m5O"
# ## Final Flight Conditions
# + colab={"base_uri": "https://localhost:8080/"} id="I3wEFk2N3YPv" outputId="6b6d2023-c534-482a-bbde-9cdbe80af6d8"
# Final Conditions
df.iloc[-1, :]
# + [markdown] id="EoDeDhz94E4a"
# From this, we can see that:
#
# - The rocket reached the ground successfully.
# - The rocket landed with a pitch angle of about 98 degrees.
# - The rocket landed with a velocity of 162.69 m/s.
#
# From this, we can make the following inferences:
#
# 1. The PID controller works, as indicated by the pitch change.
# 2. The rocket is falling at a high velocity, indicating we should increase the fuel reserved for landing.
# + [markdown] id="v6x7dejR3po6"
# ## Other Inferences
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="V86LgwH4z_8q" outputId="800e1b08-51fa-4b22-a06c-cd7c70339568"
df.describe()
# + [markdown] id="tsROYnNP2iHl"
# From the above table, we can make the following inferences:
#
# 1. The maximum altitude attained by the rocket was 32 km.
# 2. The maximum velocity of the rocket was 1216.17 m/s.
# 3. The maximum pitch angle of the rocket was pi radians (i.e., the rocket briefly was travelling horizontally)
# 4. The rocket travelled 119.19 km downrange from the launchpad.
# 5. The rocket was in motion for 275.71 seconds.
# + [markdown] id="<KEY>"
# # Visualising the Data
# We plot the graphs to make more comprehensive inferences.
# + [markdown] id="qSd_3Mgn82I9"
# ## Altitude vs Time
# From this graph, we can infer that the rocket ascended and descended smoothly across the time of flight.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="X50lRjKHzdvJ" outputId="c6efd125-2bca-4bad-9a7b-48b670968f9e"
# Visualise h vs t
df.plot(x='t', y='h', style='.')
plt.xlabel("time")
plt.ylabel("altitude")
plt.title("altitude vs time")
plt.show()
# + [markdown] id="2KlGq6Xw864j"
# ## Velocity vs Time
# From this graph, we can see that the velocity of the rocket was decreasing towards the end of the simulation. So, by adjusting the landing fuel reserves, we might be able to bring the touchdown velocity even further.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="cRPEccfHzgVP" outputId="6facd4fd-9e1d-4603-b0b4-b1e13945994d"
# Visualise v vs t
df.plot(x='t', y='v', style='.')
plt.xlabel("time")
plt.ylabel("velocity")
plt.title("velocity vs time")
plt.show()
# + [markdown] id="rzZmBqX_9AKS"
# ## Velocity vs Altitude
#
# From this graph, we can see that the velocity of the rocket was decreasing as it started approaching the ground, implying the PID controller was working nominaly.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="x3sVhNIp6cc4" outputId="79c6e3de-5269-4457-bd87-5927ea7dc0fc"
# Visualise v vs h
df.plot(x='v', y='h', style='.')
plt.xlabel("velocity")
plt.ylabel("altitude")
plt.title("altitude vs velocity")
plt.show()
# + [markdown] id="nsL9lngH9FLO"
# ## Downrange Position vs Time
# From this graph, we can see that the rocket was going downrange across the entire duration of the flight, and briefly (towards the middle of the flight) the rocket had a strong x-component of velocity.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="aug9ug8CzjOd" outputId="bd4babd1-40ae-4a9d-f34a-c77f5946be62"
# Visualise x vs t
df.plot(x='t', y='x', style='.')
plt.xlabel("time")
plt.ylabel("downrange position")
plt.title("downrange position vs time")
plt.show()
# + [markdown] id="NPk3mJvD9KQv"
# ## Pitch Angle Vs Time
# From this graph, we can see that the pitch-over manuever worked successfully, as is indicated by the pitch angle jumps from 0 to pi radians. We can also see tha the PID controller was working nominally, correcting the pitch angle frequently to ensure the rocket stays upright during the second half of flight.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="p3YQS4ELz3ZP" outputId="ea41dce9-6c94-45aa-c9d3-94abba64421b"
# Visualise tht vs t
df.plot(x='t', y='tht', style='.')
plt.xlabel("time")
plt.ylabel("pitch angle")
plt.title("pitch angle vs time")
plt.show()
# + [markdown] id="wkLn7Mt07wK7"
# ## Velocity vs Pitch Angle
# The left half of the plot is from the powered ascent phase, and the right half from the powered descent phase.
#
# From this plot, we can clearly see the pitch over manuever the rocket underwent, and also see how the PID controller was working to get the rocket as stable vertical as possible.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="V2HCnKdy7PAP" outputId="f0c4264c-574d-4e2a-e187-61bbdbab5b06"
# Visualise v vs tht
df.plot(x='tht', y='v', style='.')
plt.xlabel("pitch angle")
plt.ylabel("velocity")
plt.title("velocity vs pitch angle")
plt.show()
# + [markdown] id="5c0rRPx-5u3-"
# # Conclusions
#
# - The PID controller works, but can be improved significantly.
# - The split up of fuel between the two stages can be modified to improve performance.
# + [markdown] id="sXYeIcOn9ZOV"
# # Future Work
#
# - Using Reinforcement Learning to identify better values for the PID controller.
# - Using a more robust atmospheric model to calculate Drag.
# - I assumed constant mass flow rate and 100% engine efficiency for the rocket for this simulation. In the future, we could look into more realistic models for the same.
| notebook/VisualiseRocketGraphs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Supply Chain Network Design
# ## Case Study: SunOil Facility Allocation
from IPython.display import Image
from pulp import * # Import PuLP modeler functions
import pandas as pd
# SunOil, a manufacturer of petrochemical products with worldwide sales. Shican, as a supply chain manager, is considering establishing facilities to meet demand. One possibility is to set up a facility in a few of the regions. Shican made an investigation of the following five regions- North American, South American, Europe, Africa, and Asia. The data is collected in the Table 1. Shican has to decide the location of the facility and allocate the demand to minimize the cost.
# 1. What is the annual demand of each region?
# 2. Which part shows us the production/transportation cost?
# 3. Which part shows us the fixed cost associated with the capacities?
# 4. What is the difference between the variable cost and the fixed cost?
Image(filename='picture1.png',width=800,height=800)
# ### objective function
# $\operatorname{Max}$ Profit $=\sum_{i=1}^{n} f_{i} \cdot y_{i}+\sum_{i=1}^{n} \sum_{i=1}^{m} c_{i, j} \cdot x_{i, j}$
# ### boundry conditions
# $\sum_{i=1}^{n} x_{i, j}=D_{j} \quad j=1, \ldots m$
#
# $\begin{array}{ll}\sum_{j=1}^{m} x_{i, j} \leq K_{i} \cdot y_{i} & i=1, \ldots n \end{array}$
#
# $y_{i} \in\{0,1\}$
# ## Case 1:
#
#
# Consolidate plants in a few regions
# - Advantages: improve economies of scale
# - Disadvatages: increases transportation cost and duties
#
# +
# Creates a list of all the supply nodes
Warehouses = ["N.America", "S.America", "Europe", "Asia", "Africa"]
# Creates a dictionary for the number of units of supply for each supply node
supply_low = {"N.America": 10,
"S.America": 10,
"Europe": 10,
"Asia": 10,
"Africa": 10}
supply_high = {"N.America": 20,
"S.America": 20,
"Europe": 20,
"Asia": 20,
"Africa": 20}
supply_cost_low = {"N.America": 6000,
"S.America": 4500,
"Europe": 6500,
"Asia": 4100,
"Africa": 4000}
supply_cost_hign = {"N.America": 9000,
"S.America": 6750,
"Europe": 9750,
"Asia": 6150,
"Africa": 6000}
# Creates a list of all demand nodes
Bars = ["N.America", "S.America", "Europe", "Asia", "Africa"]
# Creates a dictionary for the number of units of demand for each demand node
demand = {"N.America":12,
"S.America":8,
"Europe":14,
"Asia":16,
"Africa":7 }
# Creates a list of costs of each transportation path
df = pd.read_csv('cost.csv',header=None)
costs = df.values
costs = [ #Bars
[81,92,101,130,115],
[117,77,108,98,100],
[102,105,95,119,111],#A Warehouses
[115,125,90,59,74], #B
[142,100,103,105,71]
]
# The cost data is made into a dictionary
costs = makeDict([Warehouses,Bars],costs,0)
# +
# Creates the 'prob' variable to contain the problem data
prob = LpProblem("Beer Distribution Problem",LpMinimize)
# Creates a list of tuples containing all the possible routes for transport
Routes = [(w,b) for w in Warehouses for b in Bars]
# A dictionary called 'Vars' is created to contain the referenced variables(the routes)
vars = LpVariable.dicts("Route",(Warehouses,Bars),0,None)
vars2 = LpVariable.dicts("Built",Warehouses,cat='Binary')
vars3 = LpVariable.dicts("Built2",Warehouses,cat='Binary')
# The objective function is added to 'prob' first
prob += lpSum([vars[w][b]*costs[w][b] for (w,b) in Routes]) + lpSum([vars2[w]*supply_cost_low[w] for w in Warehouses]) + lpSum([vars3[w]*supply_cost_hign[w] for w in Warehouses]), "Sum_of_Transporting_Costs"
# The supply maximum constraints are added to prob for each supply node (warehouse)
for w in Warehouses:
prob += lpSum([vars[w][b] for b in Bars])<=(lpDot(supply_low[w],vars2[w]) + lpDot(supply_high[w],vars3[w])), "Sum_of_Products_out_of_Warehouse_%s"%w
# prob += LpConstraint(e=(vars2[w] + vars3[w]), sense=LpConstraintGE, rhs=1)
# The demand minimum constraints are added to prob for each demand node (bar)
for b in Bars:
prob += lpSum([vars[w][b] for w in Warehouses])==demand[b], "Sum_of_Products_into_Bar%s"%b
# The problem data is written to an .lp file
prob.writeLP("BeerDistributionProblem.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# Each of the variables is printed with it's resolved optimum value
for v in prob.variables():
print(v.name, "=", v.varValue)
# The optimised objective function value is printed to the screen
print("Total Cost of Transportation = ", value(prob.objective))
# -
# Thus, we obtain the following results:
Image(filename='picture3.png',width=800,height=800)
# 1. The lowest-cost network will have facilities located in South America, Asia, and Africa.
# 2. A high-capacity plant should be planned in each region.
# 3. The plant in South America meets the North America demand, whereas the European demand is met from
# plants in Asia and Africa.
# # Case 2:
# Set up facility in each region
#
# - Advantages: lowers transportation cost
# - Disadvantages: but plants are sized to meet local demand regions.
# Not fully exploit economies of scale.
# In this case, we must rewrite constrants:
#
# $\begin{array}{ll}\sum_{j=1}^{m} x_{i, j} \leq K^{low}_{i} \cdot y^{low}_{i}+ K^{hign}_{i} \cdot y^{hign}_{i} & i=1, \ldots n \end{array}$
#
# $y^{high}_{i},\; y^{low}_{i} \in\{0,1\}$
#
# $y^{high}_{i} + y^{low}_{i} = 1$
# +
# Creates the 'prob' variable to contain the problem data
prob = LpProblem("Beer Distribution Problem",LpMinimize)
# Creates a list of tuples containing all the possible routes for transport
Routes = [(w,b) for w in Warehouses for b in Bars]
# A dictionary called 'Vars' is created to contain the referenced variables(the routes)
vars = LpVariable.dicts("Route",(Warehouses,Bars),0,None)
vars2 = LpVariable.dicts("Built",Warehouses,cat='Binary')
vars3 = LpVariable.dicts("Built2",Warehouses,cat='Binary')
# The objective function is added to 'prob' first
prob += lpSum([vars[w][b]*costs[w][b] for (w,b) in Routes]) + lpSum([vars2[w]*supply_cost_low[w] for w in Warehouses]) + lpSum([vars3[w]*supply_cost_hign[w] for w in Warehouses]), "Sum_of_Transporting_Costs"
# The supply maximum constraints are added to prob for each supply node (warehouse)
for w in Warehouses:
prob += lpSum([vars[w][b] for b in Bars])<=(lpDot(supply_low[w],vars2[w]) + lpDot(supply_high[w],vars3[w])), "Sum_of_Products_out_of_Warehouse_%s"%w
prob += LpConstraint(e=(vars2[w] + vars3[w]), sense=LpConstraintGE, rhs=1)
# The demand minimum constraints are added to prob for each demand node (bar)
for b in Bars:
prob += lpSum([vars[w][b] for w in Warehouses])==demand[b], "Sum_of_Products_into_Bar%s"%b
# The problem data is written to an .lp file
prob.writeLP("BeerDistributionProblem.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# Each of the variables is printed with it's resolved optimum value
for v in prob.variables():
print(v.name, "=", v.varValue)
# The optimised objective function value is printed to the screen
print("Total Cost of Transportation = ", value(prob.objective))
# -
# Consequently, we obtain
Image(filename='picture2.png',width=800,height=800)
# The total cost of transportation in Case 2 is 31547 > 23751 in case 1.
# Thus, the company would like to consolidate facilities in South America, Asia, and Africa, where the labour cost is relatively low. Moreover, consolidation could help
# realize economic of scale.
# Some alternative scenario to try
#
# 1 What if a plant must be built in Europe?
# In this case, add I6>=0 in constraint
#
# 2 What if a plant must be built in every market?
# In this case, add I14:I18>=0 in constraint
#
| Supply Chain Network Design.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # MSDS688 -- Artifical Intelligence
#
# ## Week n - topic
#
# 
#
# description here
#
# Cite: <NAME>. (n.d.). Frolicsome Engines: The Long Prehistory of Artificial Intelligence. Retrieved April 10, 2018, from [https://publicdomainreview.org/2016/05/04/frolicsome-engines-the-long-prehistory-of-artificial-intelligence/](https://publicdomainreview.org/2016/05/04/frolicsome-engines-the-long-prehistory-of-artificial-intelligence/)
# + [markdown] slideshow={"slide_type": "slide"}
# # Review - Concepts and techniques
# + [markdown] slideshow={"slide_type": "slide"}
# # Quiz / Exercise
# + [markdown] slideshow={"slide_type": "slide"}
# # Lecture
# + [markdown] slideshow={"slide_type": "notes"}
# _Note: Start with a promise_
# + [markdown] slideshow={"slide_type": "slide"}
# ## Learning Objectives
#
# 1.
# + [markdown] slideshow={"slide_type": "slide"}
# # Break
#
# ![]()
# + [markdown] slideshow={"slide_type": "slide"}
# # Demonstration
# + [markdown] slideshow={"slide_type": "slide"}
# # Exercise
# + [markdown] slideshow={"slide_type": "notes"}
# _Note: End with humor_
# -
| slides/week-n-template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''pyData'': conda)'
# name: python385jvsc74a57bd0cbdf93e4b08e4d7f581a343ef3794943ed85d3bca0f82a88e5f832e9cb7f1c99
# ---
# # Exercise
import numpy as np
np.random.seed(0)
def array_info(array: np.ndarray) -> None:
print(f"ndim: {array.ndim}")
print(f"shape: {array.shape}")
print(f"size: {array.size}")
print(f"dtype: {array.dtype}")
print(f"values:\n{array}\n")
# # Exercise 1
#
# - Create an array (M) with the size 5x5 by sampling from an uniform int distribution $M_{i,j} \in [-10, 10]$
# # Exercise 2
#
# - Create an array (N) with the size 5x5 by sampling from an uniform float distribution $M_{i,j} \in [-10, 10)$
# # Exercise 3
#
# - Count the number of elements of the matrix M and N where the elements are less than 0
# # Exercise 4
#
# - Replace all values of the matrix M and N that are less than 0 by 0
# # Exercise 5
#
# - Stack the matrix M and N on the axis=0 to a new matrix O
# # Exercise 6
#
# - Multiply the matrix O by np.pi
# - Compute the cos for each value of the matrix O
| 2_Numpy/09_Exercise1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/clizarraga-UAD7/Notebooks/blob/main/ML_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="v8NDofFZqbX0"
# ## Supervised Machine Learning
# + id="zYmRiuIFz5c1"
# Import collection of available datasets
from sklearn import datasets
# + colab={"base_uri": "https://localhost:8080/"} id="MoEPiFPADI63" outputId="2c918e3f-6f1e-4ab2-893a-6668914de4e9"
# Load the wine dataset and ask for its description (DESCR)
wine = datasets.load_wine()
print(wine.DESCR)
# + colab={"base_uri": "https://localhost:8080/"} id="5J8RGnsdDOIk" outputId="e4392b07-4b53-48e3-b798-f5f839922e05"
print(wine.target_names)
print(wine.feature_names)
# + [markdown] id="C7bxwmKRojZz"
# ### Analyzing the Iris dataset
# + id="bGrnLZXVDSJE"
# Import needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + id="Hi84PG0AbHhi"
# Load Iris dataset from Scikit-Learn
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# + colab={"base_uri": "https://localhost:8080/"} id="ik1fkYIQhDtN" outputId="d56180f9-d8aa-4171-8f93-09398afcab4a"
print("Keys of iris_dataset:\n", iris_dataset.keys())
# + colab={"base_uri": "https://localhost:8080/"} id="EcuWocTrhR88" outputId="ecf1b870-716c-482b-acab-df96c26fecf8"
print(iris_dataset['DESCR'][:600] + "\n...")
# + colab={"base_uri": "https://localhost:8080/"} id="kOJTd5ORhdY7" outputId="7fde58ee-2163-4302-a58e-c74bbf8881ea"
print("Target names:", iris_dataset['target_names'])
# + colab={"base_uri": "https://localhost:8080/"} id="92JnP47Dhjwg" outputId="019a1485-b65f-4784-ca64-4bacf4827b1c"
print("Feature names:\n", iris_dataset['feature_names'])
# + colab={"base_uri": "https://localhost:8080/"} id="yuNysq0mhpCw" outputId="0db8a2ee-d9f3-4263-bb0b-abf69a147d04"
print("Type of data:", type(iris_dataset['data']))
# + colab={"base_uri": "https://localhost:8080/"} id="OEgHA28yhvRg" outputId="29df056f-eaf0-4692-a6cd-1882daf159a9"
print("Shape of data:", iris_dataset['data'].shape)
# + colab={"base_uri": "https://localhost:8080/"} id="-4XO6zNhhzWo" outputId="431537d6-daad-4219-ad3b-469ad3839e62"
print("First five rows of data:\n", iris_dataset['data'][:5])
# + colab={"base_uri": "https://localhost:8080/"} id="LPt2enNEh72P" outputId="bd1cb4a6-9c44-40b3-8559-9122d5ecbdd7"
print("Type of target:", type(iris_dataset['target']))
# + colab={"base_uri": "https://localhost:8080/"} id="lIou9u-DiAD9" outputId="a7b80ff4-8f91-49c9-b547-f3561876cbe3"
print("Shape of target:", iris_dataset['target'].shape)
# + colab={"base_uri": "https://localhost:8080/"} id="LNaYfk9HiEUy" outputId="7ef77e7b-fda2-4ede-c429-5e459a28b04b"
print("Target:\n", iris_dataset['target'])
# + [markdown] id="6s1Rqdrco3Dt"
# **Creating Training and Test Data**
#
# + id="fV7_JrdeiKBd"
# Import the train_test_split function from sklearn.model_selection
# Use: train_test_split(X, y, test_size=0.3, random_state=41), use 30% of data as test.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], test_size=0.3, random_state=41)
# + colab={"base_uri": "https://localhost:8080/"} id="Eo43a2yyiZ20" outputId="bd7ee035-097c-40c9-e29d-d6e9aae19820"
# Splitting data result
# Train data size
print("X_train shape:", X_train.shape)
print("y_train shape:", y_train.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="ChQanainimzb" outputId="f526fab6-aae1-4ec1-bff9-48ad01c1cf8d"
# Tests data size
print("X_test shape:", X_test.shape)
print("y_test shape:", y_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="UYjcnkYvjOXL" outputId="843de1fa-4a47-44c2-e9f9-66fd5dc775a6"
# Will use mglearn library for illustration purposes.
# Install the mglearn library for Python on Google Colab
# See: https://pypi.org/project/mglearn/
# !pip install mglearn
# + [markdown] id="mmNkbutipk7v"
# **Visualize data**
#
# + colab={"base_uri": "https://localhost:8080/", "height": 900} id="ZTkbpXvnizvj" outputId="59c2bd67-eb00-49c3-973f-8c9b58e271e7"
# Will use mglearn to use a color map
import mglearn
# create DataFrame from data in X_train and
# label the columns using the strings in iris_dataset.feature_names
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
# create a scatter matrix from the dataframe, color by y_train
pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15),
marker='o', hist_kwds={'bins': 20}, s=60,
alpha=.8, cmap=mglearn.cm3);
# + [markdown] id="83UsNPNtoRxc"
# **Building a First Model: k-Nearest Neighbors**
#
# + [markdown] id="-ZTM7Qz5KBF3"
# See: [k-Nearest Neighbors Algorithm](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm)
# + id="Grtc4a0vi8lq"
# We import the K-Nearest Neighbors Classifier
from sklearn.neighbors import KNeighborsClassifier
# Instantiate the Algorithm
knn = KNeighborsClassifier(n_neighbors=1)
# + colab={"base_uri": "https://localhost:8080/"} id="JGGkEsPVkNek" outputId="a158d0df-a88a-43ba-c4d1-561de2aa12d5"
# Fit the model to train data
knn.fit(X_train, y_train)
# + [markdown] id="oDaylEvLoEeF"
# **Making predictions**
#
# + colab={"base_uri": "https://localhost:8080/"} id="txlebasSkSH0" outputId="a19d82d2-8b3e-4bf5-92ac-75d93af4dce1"
# Define a new data value unseen by the model
X_new = np.array([[5, 2.9, 1, 0.2]])
print("X_new.shape:", X_new.shape)
# + [markdown] id="2-g3e1jUnt06"
# **Evaluating the model**
#
# + colab={"base_uri": "https://localhost:8080/"} id="CL3KaAIikbtg" outputId="14f9253c-77d5-487c-804b-ae1aa6918718"
# Predict to what class will the new value belong
prediction = knn.predict(X_new)
print("Prediction:", prediction)
print("Predicted target name:",
iris_dataset['target_names'][prediction])
# + colab={"base_uri": "https://localhost:8080/"} id="njT-wCpLkkJd" outputId="246707ec-3080-4de3-b0b4-09de6a88ce47"
# Predict y values using test values X_test
y_pred = knn.predict(X_test)
print("Test set predictions:\n", y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="ONngNdOKko1h" outputId="4f2f7030-9b1b-417f-8b59-7f8bffdad3c7"
# Now see the performance of the model predicting y values compared to known test values
print("Test set score: {:.2f}".format(np.mean(y_pred == y_test)))
# + colab={"base_uri": "https://localhost:8080/"} id="3_-0eVxhkuuN" outputId="adc8afd1-c540-47fc-d182-4ac313187e2e"
# Print model performance using the score function
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
# + colab={"base_uri": "https://localhost:8080/", "height": 289} id="0eyxKevH4abl" outputId="2cb659be-e547-4810-8f98-e8ad2a011b3e"
# Import Confusion Matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
# Evaluate Model
cm = confusion_matrix(y_pred, y_test)
sns.heatmap(cm, annot=True, cmap='Blues');
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="iRrLNo0t6J7s" outputId="1422f939-94b4-43dc-87b4-c339577971f5"
# Annotated & Normalized Heat Map
sns.heatmap(cm/np.sum(cm), annot=True, fmt='.2%', cmap='Blues');
# + colab={"base_uri": "https://localhost:8080/"} id="Pwjzx7no7R6w" outputId="b23bd9ba-cced-4ee3-99bd-9b3538a86fe6"
# Print performance metrics
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
# + [markdown] id="MZkVxAK6k92F"
# **In summary: KNeighbors Classifier**
# + colab={"base_uri": "https://localhost:8080/", "height": 479} id="D-J67tNhk21l" outputId="70848161-4937-41b4-f68c-51ecb3093587"
# Import frequent needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Import libraries from Scikit-Learn for Classification problems
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# Load Iris dataset from Scikit-Learn
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# Simple steps for building a machine learning model
# Step 1: Split data intro (70%)train and (30%) test sets.
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], test_size=0.3, random_state=41)
# Instantiate the model and train it by fitting the train data.
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
# See the performance of the model
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
# Evaluate Model
cm = confusion_matrix(y_pred, y_test)
# Annotated & Normalized Heat Map
#sns.heatmap(cm/np.sum(cm), annot=True, fmt='.2%', cmap='Blues');
sns.heatmap(cm, annot=True, cmap='Blues');
# Print performance metrics
print(classification_report(y_test, y_pred))
# + [markdown] id="A-29-SqdwW3g"
# ### Trying with other classifiers.
# Introducing also the following concepts and methods:
# * [Cross Validation](https://scikit-learn.org/stable/modules/cross_validation.html),
# * [Hyperparameter tuning](https://scikit-learn.org/stable/modules/grid_search.html),
# * [Scaling](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler) and
# * [Pipelines](https://scikit-learn.org/stable/modules/compose.html#combining-estimators) as means of combining various steps.
# + [markdown] id="saA4hnXJKgrj"
# See [Decision Trees Algorithm](https://en.wikipedia.org/wiki/Decision_tree_learning)
#
# [Cross Validation](https://en.wikipedia.org/wiki/Cross-validation_(statistics))
# + colab={"base_uri": "https://localhost:8080/", "height": 866} id="DntaQJPswF2D" outputId="ec23e7a4-535f-49d7-c2ee-5b9f7b9923b6"
# DecisionTreeClassifier
# Import frequent needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import randint
# Import libraries from Scikit-Learn for Classification problems
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
# Load Iris dataset from Scikit-Learn
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# Step 1: Split data intro (70%)train and (30%) test sets.
X = iris_dataset['data']
y = iris_dataset['target']
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], test_size=0.3, random_state=41)
# Simple steps for building a machine learning model
# Define a grid in hyperparameter space
param_dist = { 'max_depth': [3, None],
'max_features' : randint(1,9),
'min_samples_leaf' : randint(1,9),
'criterion' : ['gini', 'entropy']}
# Instantiate the model
tree = DecisionTreeClassifier()
# Instantiate the RandomSearchCV
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
# Fit model to data
tree_cv.fit(X_train, y_train)
# See the performance of the model
print("Tuned DecisionTree params: {}".format(tree_cv.best_params_))
print("Best score is: {}".format(tree_cv.best_score_))
y_pred = tree_cv.predict(X_test)
# Evaluate Model
cm = confusion_matrix(y_pred, y_test)
# Annotated & Normalized Heat Map
#sns.heatmap(cm/np.sum(cm), annot=True, fmt='.2%', cmap='Blues');
sns.heatmap(cm, annot=True, cmap='Blues');
# Print performance metrics
print(classification_report(y_test, y_pred))
# + [markdown] id="6M5JvQMTLNVF"
# See: [Logistic Regression Algorithm](https://en.wikipedia.org/wiki/Logistic_regression)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="VeJSITTwwUfm" outputId="aca608b9-fed8-4422-b213-93b193beb2b3"
# Logistic Regression Classifier
# Import frequent needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import randint
# Import libraries from Scikit-Learn for Classification problems
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
# Load Iris dataset from Scikit-Learn
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# Step 1: Split data intro (70%)train and (30%) test sets.
X = iris_dataset['data']
y = iris_dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=0)
# Simple steps for building a machine learning model
# Define a grid in hyperparameter space
c_space = np.logspace(-5,4,15)
param_grid = {'C' : c_space, 'penalty' : ['l1', 'l2']}
# Instantiate the model
logreg = LogisticRegression()
# Instantiate the GridSearchCV
logreg_cv = GridSearchCV(logreg, param_grid, cv=5)
# Fit model to data
logreg_cv.fit(X_train, y_train)
# See the performance of the model
print("Tuned LogisticRegression params: {}".format(logreg_cv.best_params_))
print("Tunned LogisticRegression Accuracy is: {}".format(logreg_cv.best_score_))
y_pred = logreg_cv.predict(X_test)
# Evaluate Model
cm = confusion_matrix(y_pred, y_test)
# Annotated & Normalized Heat Map
#sns.heatmap(cm/np.sum(cm), annot=True, fmt='.2%', cmap='Blues');
sns.heatmap(cm, annot=True, cmap='Blues');
# Print performance metrics
print(classification_report(y_test, y_pred))
# + [markdown] id="PWlDkrYyLW5j"
# See: [Elastic Net Regularization](https://en.wikipedia.org/wiki/Elastic_net_regularization)
# + colab={"base_uri": "https://localhost:8080/"} id="eJCsm3O8wUrd" outputId="8d942302-a184-44f3-878e-221e587de42b"
# ElasticNet Classifier
# Import frequent needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import randint
# Import libraries from Scikit-Learn for Classification problems
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import classification_report
# Load Iris dataset from Scikit-Learn
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# Step 1: Split data intro (70%)train and (30%) test sets.
X = iris_dataset['data']
y = iris_dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=0)
# Simple steps for building a machine learning model
# Define a grid in hyperparameter space
l1_space = np.linspace(0,1,30)
param_grid = {'l1_ratio' : l1_space}
# Instantiate the model
elastic_net = ElasticNet()
# Instantiate the GridSearchCV
elnet_cv = GridSearchCV(elastic_net, param_grid, cv=5)
# Fit model to data
elnet_cv.fit(X_train, y_train)
y_pred = elnet_cv.predict(X_test)
r2 = elnet_cv.score(X_test, y_test)
mse = mean_squared_error(y_test, y_pred)
# See the performance of the model
print("Tuned ElasticNet params: {}".format(elnet_cv.best_params_))
print("R squared: {}".format(r2))
print("Mean squared error: {}".format(mse))
# Evaluate Model
#cm = confusion_matrix(y_pred, y_test)
# Annotated & Normalized Heat Map
#sns.heatmap(cm/np.sum(cm), annot=True, fmt='.2%', cmap='Blues');
#sns.heatmap(cm, annot=True, cmap='Blues');
# Print performance metrics
#print(classification_report(y_test, y_pred))
# + [markdown] id="_Lt7mNxtMoK5"
#
# + colab={"base_uri": "https://localhost:8080/"} id="RAAc4ijtNnqv" outputId="4e7b7708-1077-4be2-894f-60985a02f743"
# Scaling in a Pipeline
# Import frequent needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import randint
# Import libraries from Scikit-Learn for Classification problems
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
# Load Iris dataset from Scikit-Learn
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# Step 1: Split data intro (70%)train and (30%) test sets.
X = iris_dataset['data']
y = iris_dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=0)
# Simple steps for building a machine learning model
steps = [('scaler', StandardScaler()),
('knn', KNeighborsClassifier())]
pipeline = Pipeline(steps)
# Instantiate and fit the model
knn_scaled = pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
print("Accuracy score (scaled): {}".format(accuracy_score(y_test, y_pred)))
# Instantiate and fit the model without scaling
knn_unscaled = KNeighborsClassifier().fit(X_train, y_train)
print("Accuracy score (unscaled): {}".format(knn_unscaled.score(X_test, y_test)))
# + colab={"base_uri": "https://localhost:8080/", "height": 496} id="_jIoyqNVNoBL" outputId="62ffc613-d167-4e0b-cffb-32a7902a7b98"
# Cross validation, scaling and hyperparameter tuning in a pipeline
# Scaling in a Pipeline
# Import frequent needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import randint
# Import libraries from Scikit-Learn for Classification problems
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
# Load Iris dataset from Scikit-Learn
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# Step 1: Split data intro (70%)train and (30%) test sets.
X = iris_dataset['data']
y = iris_dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=21)
#neighbors = np.arange(1, 16)
neighbors = list(range(1, 16))
param_grid = dict(knn__n_neighbors=neighbors)
# Simple steps for building a machine learning model
steps = [('scaler', StandardScaler()),
('knn', KNeighborsClassifier())]
pipeline = Pipeline(steps)
# Instantiate and fit the model
cv = GridSearchCV(pipeline, param_grid)
cv.fit(X_train, y_train)
y_pred = cv.predict(X_test)
print("Best parameters: {}".format(cv.best_params_))
print("Score: {}".format(cv.score(X_test, y_test)))
print(classification_report(y_test, y_pred))
# Evaluate Model
cm = confusion_matrix(y_pred, y_test)
# Annotated & Normalized Heat Map
#sns.heatmap(cm/np.sum(cm), annot=True, fmt='.2%', cmap='Blues');
sns.heatmap(cm, annot=True, cmap='Blues');
# + [markdown] id="lqmXnZ2iYjzb"
# **Exercise:** Can you do a similar analysis with the [Penguins dataset](https://raw.githubusercontent.com/clizarraga-UAD7/Datasets/main/penguins/penguins_size.csv)?
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="V32WSXpmwUz2" outputId="3a95a438-7d34-4f9d-ebd6-2f71e8a02b0d"
neighbors
# + [markdown] id="UJC5oLuHqmsU"
# **More datasets**
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="At9UYHA3lCMy" outputId="5a48e4f4-08e9-4bcf-8fd8-5b4da512cbe5"
import warnings
warnings.filterwarnings("ignore")
# Generate a dataset
# generate dataset
X, y = mglearn.datasets.make_forge()
# plot dataset
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)
plt.legend(["Class 0", "Class 1"], loc=4)
plt.xlabel("First feature")
plt.ylabel("Second feature")
print("X.shape:", X.shape)
# + [markdown] id="AytB54HhrFY9"
# ### K-Nearest Neighbors
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="g3MEDdkYqz0i" outputId="95c51b0a-f2eb-4f76-ba94-19461b2ccdb8"
mglearn.plots.plot_knn_classification(n_neighbors=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="_7ZHg4WcrM_m" outputId="40a13b5e-5598-4d2b-e23e-8692ff0c22f6"
mglearn.plots.plot_knn_classification(n_neighbors=3)
# + id="9pO2vtDWrp7-"
from sklearn.model_selection import train_test_split
X, y = mglearn.datasets.make_forge()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=41)
# + id="s5w-_BcGr9C0"
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=3)
# + colab={"base_uri": "https://localhost:8080/"} id="p_rMPX5nsC-f" outputId="31021ba8-281d-480f-98a1-98889d68002d"
clf.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="91JR_r0CsHYp" outputId="1a7808ef-7b01-4dcf-b174-e0332ae8d5f0"
print("Test set predictions:", clf.predict(X_test))
# + colab={"base_uri": "https://localhost:8080/"} id="zZU5QwaWsMHq" outputId="0d056e31-c0a8-45cc-a474-db7ac2bcd29f"
print("Test set accuracy: {:.2f}".format(clf.score(X_test, y_test)))
# + [markdown] id="7oJMDW85sYQ0"
# **Analizing the K-Neighbors Classifier**
#
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="aPIJnkhQsSww" outputId="af857e84-0ae2-4faf-e0f0-005235374d9a"
fig, axes = plt.subplots(1, 3, figsize=(10, 3))
for n_neighbors, ax in zip([1, 3, 9], axes):
# the fit method returns the object self, so we can instantiate
# and fit in one line
clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
mglearn.plots.plot_2d_separator(clf, X, fill=True, eps=0.5, ax=ax, alpha=.4)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
ax.set_title("{} neighbor(s)".format(n_neighbors))
ax.set_xlabel("feature 0")
ax.set_ylabel("feature 1")
axes[0].legend(loc=3);
# + [markdown] id="QbZ3hzXbs6Ds"
# **How do we decide the number of n_neighbors?**
#
# + colab={"base_uri": "https://localhost:8080/", "height": 409} id="HB1PaZp0snp_" outputId="5e8ccca8-c906-46de-f41d-3c47574fa52b"
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, test_size=0.25, random_state=66)
training_accuracy = []
test_accuracy = []
# try n_neighbors from 1 to 10
neighbors_settings = range(1, 11)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors=n_neighbors)
clf.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(clf.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(clf.score(X_test, y_test))
plt.figure(figsize=(10, 6))
plt.plot(neighbors_settings, training_accuracy, label="training accuracy")
plt.plot(neighbors_settings, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend();
# + [markdown] id="6VonLttpvJM5"
# ## Linear Models for Classification
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 408} id="JR-plqyCtPJK" outputId="649b79c2-317e-4a03-a7f1-d580b951025d"
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
X, y = mglearn.datasets.make_forge()
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
for model, ax in zip([LinearSVC(), LogisticRegression()], axes):
clf = model.fit(X, y)
mglearn.plots.plot_2d_separator(clf, X, fill=False, eps=0.5,
ax=ax, alpha=.7)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
ax.set_title(clf.__class__.__name__)
ax.set_xlabel("Feature 0")
ax.set_ylabel("Feature 1")
axes[0].legend();
# + colab={"base_uri": "https://localhost:8080/", "height": 288} id="iuspXjeWvR47" outputId="31c0c300-4aef-496a-8b4d-8322b3f1af0f"
mglearn.plots.plot_linear_svc_regularization()
# + colab={"base_uri": "https://localhost:8080/"} id="L8wEW7n2vrGM" outputId="8237aac2-a46a-4781-f1c9-87de20dc8b0b"
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, test_size=0.25, random_state=42)
logreg = LogisticRegression().fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg.score(X_test, y_test)))
# + colab={"base_uri": "https://localhost:8080/"} id="hyc8rpQ5wIdY" outputId="07902a42-3617-40a4-8d5e-6aa27ae97416"
logreg100 = LogisticRegression(C=100).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg100.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg100.score(X_test, y_test)))
# + colab={"base_uri": "https://localhost:8080/"} id="M028GHjuwTxA" outputId="189d4a1e-7c1c-47c4-dd65-0cfb51f997a8"
logreg001 = LogisticRegression(C=0.01).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg001.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg001.score(X_test, y_test)))
# + colab={"base_uri": "https://localhost:8080/", "height": 517} id="qotGAYnLwaYt" outputId="b20ad89b-1a3d-49d6-b29b-768479bc815d"
# Plot scores for each feature
plt.figure(figsize=(10, 6))
plt.plot(logreg.coef_.T, 'o', label="C=1")
plt.plot(logreg100.coef_.T, '^', label="C=100")
plt.plot(logreg001.coef_.T, 'v', label="C=0.001")
plt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90)
xlims = plt.xlim()
plt.hlines(0, xlims[0], xlims[1])
plt.xlim(xlims)
plt.ylim(-5, 5)
plt.xlabel("Feature")
plt.ylabel("Coefficient magnitude")
plt.legend();
# + [markdown] id="U-VYyiKJxlqG"
# ## Linear models for multilabel classification
#
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="0j-EzKsPw5og" outputId="4ba99345-aba5-468f-eac5-f7f6de81616f"
from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=42)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.legend(["Class 0", "Class 1", "Class 2"])
plt.figure(figsize=(10, 6))
plt.show();
# + colab={"base_uri": "https://localhost:8080/"} id="DZknhTX3xves" outputId="1c0bd444-555a-4746-ca96-9c7288188daa"
linear_svm = LinearSVC().fit(X, y)
print("Coefficient shape: ", linear_svm.coef_.shape)
print("Intercept shape: ", linear_svm.intercept_.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 321} id="orR8DsmuyUJ4" outputId="88cd6341-da0e-48ed-d3d3-f42df9b47e96"
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)
line = np.linspace(-15, 15)
for coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_,
mglearn.cm3.colors):
plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)
plt.ylim(-10, 15)
plt.xlim(-10, 8)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.legend(['Class 0', 'Class 1', 'Class 2', 'Line class 0', 'Line class 1',
'Line class 2'], loc=(1.01, 0.3))
plt.figure(figsize=(10, 6))
plt.show();
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="gSezycRNybXH" outputId="7372792e-ece6-4aa7-b484-3786aad4a79d"
mglearn.plots.plot_2d_classification(linear_svm, X, fill=True, alpha=.7)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)
line = np.linspace(-15, 15)
for coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_,
mglearn.cm3.colors):
plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)
plt.legend(['Class 0', 'Class 1', 'Class 2', 'Line class 0', 'Line class 1',
'Line class 2'], loc=(1.01, 0.3))
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
# + [markdown] id="KfYi4gbny7e9"
# **strength, weakness and parameters**
#
# + id="GII9VriFyvKc"
# instantiate model and fit it in one line
logreg = LogisticRegression().fit(X_train, y_train)
# + id="Y2KsqPlqzFkZ"
logreg = LogisticRegression()
y_pred = logreg.fit(X_train, y_train).predict(X_test)
# + id="rvQJwUADzQg7"
y_pred = LogisticRegression().fit(X_train, y_train).predict(X_test)
# + id="001CrNd9zWc6"
# + [markdown] id="A-NCeJz10Q_2"
# ## Decision Trees
# + colab={"base_uri": "https://localhost:8080/"} id="TK2gTZOg0W1i" outputId="bb5b2cc5-44d4-404b-9fd0-552bf3286bf6"
import sys
sys.path
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="hg3JlGOH0YBB" outputId="f2960b54-0e94-4578-f76e-09d985e07ce3"
# Example of a Decision Tree
mglearn.plots.plot_animal_tree()
# + [markdown] id="Jl4vpk1R0uBn"
# **Building Decision Trees**
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ijPjHvnF0dJ2" outputId="d425785a-0266-4663-fd1a-731d3a05621e"
mglearn.plots.plot_tree_progressive()
# + [markdown] id="VQsidNYJ1F11"
# Controlling complexity of Decision Trees
#
# + colab={"base_uri": "https://localhost:8080/"} id="q7Pv5qqI0zkf" outputId="61be49d4-e8b8-49b6-9dd4-ca77cd9e989d"
from sklearn.tree import DecisionTreeClassifier
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=42)
tree = DecisionTreeClassifier(random_state=0)
tree.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test)))
# + colab={"base_uri": "https://localhost:8080/"} id="G-1jaYzk1LMX" outputId="d5c4048f-74ca-4420-de08-c080bdd22032"
tree = DecisionTreeClassifier(max_depth=4, random_state=0)
tree.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test)))
# + id="fW11J7oj1U52"
| ML_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NTDS'18 milestone 1: network collection and properties
# [<NAME>](https://lts4.epfl.ch/simou), [EPFL LTS4](https://lts4.epfl.ch)
# ## Students
#
# * Team: `<your team number>`
# * Students: `<the name of all students in the team>`
# * Dataset: `<the dataset you used to complete the milestone>`
# ## Rules
#
# * Milestones have to be completed by teams. No collaboration between teams is allowed.
# * Textual answers shall be short. Typically one to three sentences.
# * Code has to be clean.
# * You cannot import any other library than we imported.
# * When submitting, the notebook is executed and the results are stored. I.e., if you open the notebook again it should show numerical results and plots. We won't be able to execute your notebooks.
# * The notebook is re-executed from a blank state before submission. That is to be sure it is reproducible. You can click "Kernel" then "Restart & Run All" in Jupyter.
# ## Objective
# The purpose of this milestone is to start getting acquainted to the network that you will use for this class. In the first part of the milestone you will import your data using [Pandas](http://pandas.pydata.org) and you will create the adjacency matrix using [Numpy](http://www.numpy.org). This part is project specific. In the second part you will have to compute some basic properties of your network. **For the computation of the properties you are only allowed to use the packages that have been imported in the cell below.** You are not allowed to use any graph-specific toolboxes for this milestone (such as networkx and PyGSP). Furthermore, the aim is not to blindly compute the network properties, but to also start to think about what kind of network you will be working with this semester.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Part 1 - Import your data and manipulate them.
# ### A. Load your data in a Panda dataframe.
# First, you should define and understand what are your nodes, what features you have and what are your labels. Please provide below a Panda dataframe where each row corresponds to a node with its features and labels. For example, in the the case of the Free Music Archive (FMA) Project, each row of the dataframe would be of the following form:
#
#
# | Track | Feature 1 | Feature 2 | . . . | Feature 518| Label 1 | Label 2 |. . .|Label 16|
# |:-------:|:-----------:|:---------:|:-----:|:----------:|:--------:|:--------:|:---:|:------:|
# | | | | | | | | | |
#
# It is possible that in some of the projects either the features or the labels are not available. This is OK, in that case just make sure that you create a dataframe where each of the rows corresponds to a node and its associated features or labels.
# +
# Your code here.
features = # the pandas dataframe with the features and labels
# -
# ### B. Create the adjacency matrix of your network.
# Remember that there are edges connecting the attributed nodes that you organized in the dataframe above. The connectivity of the network is captured by the adjacency matrix $W$. If $N$ is the number of nodes, the adjacency matrix is an $N \times N$ matrix where the value of $W(i,j)$ is the weight of the edge connecting node $i$ to node $j$.
#
# There are two possible scenarios for your adjacency matrix construction, as you already learned in the tutorial by Benjamin:
#
# 1) The edges are given to you explicitly. In this case you should simply load the file containing the edge information and parse it in order to create your adjacency matrix. See how to do that in the [graph from edge list]() demo.
#
# 2) The edges are not given to you. In that case you will have to create a feature graph. In order to do that you will have to chose a distance that will quantify how similar two nodes are based on the values in their corresponding feature vectors. In the [graph from features]() demo Benjamin showed you how to build feature graphs when using Euclidean distances between feature vectors. Be curious and explore other distances as well! For instance, in the case of high-dimensional feature vectors, you might want to consider using the cosine distance. Once you compute the distances between your nodes you will have a fully connected network. Do not forget to sparsify by keeping the most important edges in your network.
#
# Follow the appropriate steps for the construction of the adjacency matrix of your network and provide it in the Numpy array ``adjacency`` below:
# +
# Your code here
adjacency = # the adjacency matrix
n_nodes = # the number of nodes in the network
# -
# ## Part 2
# Execute the cell below to plot the (weighted) adjacency matrix of your network.
plt.spy(adjacency, markersize=1)
plt.title('adjacency matrix')
# ### Question 1
#
# What is the maximum number of links $L_{max}$ in a network with $N$ nodes (where $N$ is the number of nodes in your network)? How many links $L$ are there in your collected network? Comment on the sparsity of your network.
# +
# Your code here.
# -
# **Your answer here.**
# ### Question 2
#
# Is your graph directed or undirected? If it is directed, convert it to an undirected graph by symmetrizing the adjacency matrix.
# **Your answer here.**
# +
# Your code here.
# -
# ### Question 3
#
# In the cell below save the features dataframe and the **symmetrized** adjacency matrix. You can use the Pandas ``to_csv`` to save the ``features`` and Numpy's ``save`` to save the ``adjacency``. We will reuse those in the following milestones.
# +
# Your code here.
# -
# ### Question 4
#
# Are the edges of your graph weighted?
# **Your answer here.**
# ### Question 5
#
# What is the degree distibution of your network?
# +
degree = # Your code here. It should be a numpy array.
assert len(degree) == n_nodes
# -
# Execute the cell below to see the histogram of the degree distribution.
weights = np.ones_like(degree) / float(n_nodes)
plt.hist(degree, weights=weights);
# What is the average degree?
# +
# Your code here.
# -
# ### Question 6
#
# Comment on the degree distribution of your network.
# **Your answer here.**
# ### Question 7
#
# Write a function that takes as input the adjacency matrix of a graph and determines whether the graph is connected or not.
def connected_graph(adjacency):
"""Determines whether a graph is connected.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
Returns
-------
bool
True if the graph is connected, False otherwise.
"""
# Your code here.
return connected
# Is your graph connected? Run the ``connected_graph`` function to determine your answer.
# +
# Your code here.
# -
# ### Question 8
#
# Write a function that extracts the connected components of a graph.
def find_components(adjacency):
"""Find the connected components of a graph.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
Returns
-------
list of numpy arrays
A list of adjacency matrices, one per connected component.
"""
# Your code here.
return components
# How many connected components is your network composed of? What is the size of the largest connected component? Run the ``find_components`` function to determine your answer.
# +
# Your code here.
# -
# ### Question 9
#
# Write a function that takes as input the adjacency matrix and a node (`source`) and returns the length of the shortest path between that node and all nodes in the graph using Dijkstra's algorithm. **For the purposes of this assignment we are interested in the hop distance between nodes, not in the sum of weights. **
#
# Hint: You might want to mask the adjacency matrix in the function ``compute_shortest_path_lengths`` in order to make sure you obtain a binary adjacency matrix.
def compute_shortest_path_lengths(adjacency, source):
"""Compute the shortest path length between a source node and all nodes.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
source: int
The source node. A number between 0 and n_nodes-1.
Returns
-------
list of ints
The length of the shortest path from source to all nodes. Returned list should be of length n_nodes.
"""
# Your code here.
return shortest_path_lengths
# ### Question 10
#
# The diameter of the graph is the length of the longest shortest path between any pair of nodes. Use the above developed function to compute the diameter of the graph (or the diameter of the largest connected component of the graph if the graph is not connected). If your graph (or largest connected component) is very large, computing the diameter will take very long. In that case downsample your graph so that it has 1.000 nodes. There are many ways to reduce the size of a graph. For the purposes of this milestone you can chose to randomly select 1.000 nodes.
# +
# Your code here.
# -
# ### Question 11
#
# Write a function that takes as input the adjacency matrix, a path length, and two nodes (`source` and `target`), and returns the number of paths of the given length between them.
def compute_paths(adjacency, source, target, length):
"""Compute the number of paths of a given length between a source and target node.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
source: int
The source node. A number between 0 and n_nodes-1.
target: int
The target node. A number between 0 and n_nodes-1.
length: int
The path length to be considered.
Returns
-------
int
The number of paths.
"""
# Your code here.
return n_paths
# Test your function on 5 pairs of nodes, with different lengths.
print(compute_paths(adjacency, 0, 10, 1))
print(compute_paths(adjacency, 0, 10, 2))
print(compute_paths(adjacency, 0, 10, 3))
print(compute_paths(adjacency, 23, 67, 2))
print(compute_paths(adjacency, 15, 93, 4))
# ### Question 12
#
# How many paths of length 3 are there in your graph? Hint: calling the `compute_paths` function on every pair of node is not an efficient way to do it.
# +
# Your code here.
# -
# ### Question 13
#
# Write a function that takes as input the adjacency matrix of your graph (or of the largest connected component of your graph) and a node and returns the clustering coefficient of that node.
def compute_clustering_coefficient(adjacency, node):
"""Compute the clustering coefficient of a node.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
node: int
The node whose clustering coefficient will be computed. A number between 0 and n_nodes-1.
Returns
-------
float
The clustering coefficient of the node. A number between 0 and 1.
"""
# Your code here.
return clustering_coefficient
# ### Question 14
#
# What is the average clustering coefficient of your graph (or of the largest connected component of your graph if your graph is disconnected)? Use the function ``compute_clustering_coefficient`` to determine your answer.
# +
# Your code here.
| milestones/1_network_properties.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # %load standard_import.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import seaborn as sns
from sklearn.preprocessing import scale
import sklearn.linear_model as skl_lm
from sklearn.metrics import mean_squared_error, r2_score
import statsmodels.api as sm
import statsmodels.formula.api as smf
# %matplotlib inline
plt.style.use('seaborn-white')
# -
# # Some Stats
# - Expectation:
# $$\begin{eqnarray}\mu_X & = & E[X] \\ & = & \sum w_i \cdot x_i\end{eqnarray}$$ where $x_i$ are samples of $X$, $w_i$ is a weight, or probability, of $x_i$ showing up, and $\sum w_i = 1$.
#
# Usually, we look at $\mu_X$ as just the mean, and $w_i=\frac{1}{N}$, so $\mu_X = \frac{1}{N}\sum x_i$. Below, $\mu_X$ refers to the mean.
#
# - Covariance:
# $$\begin{eqnarray}\sigma_{XY}^2 & = & E[(X-E[X])\cdot(Y-E[Y])] \\ & = & E[XY] - E[X]E[Y] \\ & = & \mu_{XY} - \mu_X\mu_Y\end{eqnarray}$$
#
# - Variance:
# $$\begin{eqnarray}\sigma_X^2 & = & Var[X] \\ & = & E[(X-E[X])^2] \\ & = & E[X^2]-E[X]^2 \\ & = & \mu_{X^2}-\mu_X^2\end{eqnarray}$$
#
# - Standard deviation: Square root of variance, i.e., $\sigma_X$.
#
# - Correlation:
# $$r_{XY}=\frac{\sigma_{XY}^2}{\sigma_X\cdot\sigma_Y}$$
wage = pd.read_csv('Data/Wage.csv')
wage.describe()
x=wage["age"]
y=wage["wage"]
# The full nelson for $r_{xy}$!
((x*y).mean()-x.mean()*y.mean())/(np.sqrt(((x-x.mean())**2).mean())*np.sqrt(((y-y.mean())**2).mean()))
# A bit simpler this way...
np.cov(x,y)[0,1]/(x.std()*y.std())
# ... or this way!
wage.corr()
# It is sometime nice to look at these variables as "standard normal", and we achieve this by "normalizing" these sample vectors:
# ... as standard normal variables...
xx=(x-x.mean())/x.std()
yy=(y-y.mean())/y.std()
sns.scatterplot(data=wage, x="age", y="logwage", alpha=0.1);
# Distributions visualization tell the full story though...
sns.distplot(xx, label='xx');
sns.distplot(yy, label='yy');
plt.legend();
# When in standard normal form, the correlation coefficient is easier to think about: $E[xx\cdot yy]$
n=len(xx)
1/(n-1)*np.dot(xx,yy)
np.correlate(xx,yy)
np.cov(x,y)
| Notebooks/Notes 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# BipartiteGeneral
from PyQuantum.BipartiteGeneral.Cavity import Cavity
from PyQuantum.BipartiteGeneral.Hamiltonian import Hamiltonian
from PyQuantum.BipartiteGeneral.WaveFunction import WaveFunction
from PyQuantum.BipartiteGeneral.Evolution import run_wf
# +
# Common
from PyQuantum.Common.STR import *
from PyQuantum.Common.LoadPackage import *
from PyQuantum.Common.Tools import mkdir
from PyQuantum.Common.PlotBuilder3D import PlotBuilder3D
# +
import PyQuantum.BipartiteGeneral.config as config
mkdir(config.path)
# +
cavity = Cavity(n=config.n, wc=config.wc, wa=config.wa, g=config.g)
cavity.print()
# +
H = Hamiltonian(capacity=config.capacity, cavity=cavity)
H.print_states()
if __debug__:
H.to_csv(filename=config.H_csv)
H.print_bin_states()
H.print_html(filename=config.H_html)
# +
w_0 = WaveFunction(states=H.states, init_state=config.init_state)
w_0.print()
# -
run_wf(w_0=w_0, H=H, dt=config.dt, nt=config.nt,
config=config, fidelity_mode=True)
# +
y_scale = 1
if config.T < 0.5 * config.mks:
y_scale = 0.1
elif config.T == 0.5 * config.mks:
y_scale = 0.01
elif config.T == 1 * config.mks:
y_scale = 7.5
# y_scale = 10
elif config.T == 5 * config.mks:
y_scale = 1
plt = PlotBuilder3D()
# title = ""
title = "<b>"
title += "n = " + str(config.n)
if config.capacity - config.n > 0:
title += "<br>" + str(config.capacity - config.n) + \
" photons in cavity"
title += "<br>atoms state: " + str(config.init_state)
title += "<br>"
title += "<br>w<sub>c</sub> = " + wc_str(config.wc)
title += "<br>w<sub>a</sub> = " + \
"[" + ", ".join([wa_str(i) for i in config.wa]) + "]"
title += "<br>g = " + "[" + ", ".join([g_str(i) for i in config.g]) + "]"
title += "</b>"
plt.set_title(title)
plt.set_xaxis("states")
plt.set_yaxis("time, " + T_str_mark(config.T))
plt.set_zaxis("prob.")
plt.set_yscale(y_scale)
plt.set_width(900)
plt.set_height(650)
plt.iplot(
x_csv=config.path + "/" + "x.csv",
y_csv=config.path + "/" + "t.csv",
z_csv=config.path + "/" + "z.csv",
# t_coeff=20000 / 1000 * (config.T / 1e-6),
online=False,
path=config.path,
filename="BipartiteGeneral",
)
# -
| .ipynb_checkpoints/bpg-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import os
import pickle as pkl
from os.path import join as oj
from io import StringIO
from IPython.display import Image
import warnings
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier, plot_tree, export_graphviz
import pydotplus
#from dtreeviz.trees import dtreeviz - not loaded...
import sys
sys.path.append('../../../../')
from rulevetting.api import validation
from rulevetting.projects.tbi_pecarn.dataset import Dataset
from rulevetting.projects.tbi_pecarn.graph import barplot
from sklearn.ensemble import AdaBoostClassifier
from logitboost import LogitBoost
from sklearn.metrics import roc_auc_score
MODELS_DIR = './models'
os.makedirs(MODELS_DIR, exist_ok=True)
outcome_def = 'outcome' # output
data_path = '../../../../data/' # path to raw csv - change to processed...
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Set plotting font sizes and properties
TINY_SIZE = 10
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
MARKER_SIZE = 6
LINE_SIZE = 4
plt.rc("font", size=SMALL_SIZE) # controls default text sizes
plt.rc("axes", titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc("axes", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc("xtick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("ytick", labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc("legend", fontsize=TINY_SIZE) # legend fontsize
plt.rc("figure", titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rc("lines", markersize=MARKER_SIZE) # marker size
plt.rc("lines", linewidth=LINE_SIZE) # line width
mpl.rcParams["figure.dpi"] = 180
# Height and width per row and column of subplots
FIG_HEIGHT = 20
FIG_WIDTH = 18
fig_fcn = lambda kwargs: plt.figure(figsize=(FIG_WIDTH, FIG_HEIGHT), **kwargs)
color_list = sns.color_palette("colorblind")
# +
df = Dataset().clean_data(data_path = data_path)
df = Dataset().preprocess_data(df)
df_train, df_tune, _ = Dataset().split_data(df)
X_train = df_train.drop(columns=outcome_def)
y_train = df_train[outcome_def].values
X_tune = df_tune.drop(columns=outcome_def)
y_tune = df_tune[outcome_def].values
processed_feats = df_train.keys().values.tolist()
feature_names = list(X_train)
def predict_and_save(model, model_name='decision_tree'):
'''Plots cv and returns cv, saves all stats
'''
results = {'model': model}
for x, y, suffix in zip([X_train, X_tune],
[y_train, y_tune],
['_train', '_tune']):
stats, threshes = validation.all_stats_curve(y, model.predict_proba(x)[:, 1],
plot=suffix == '_tune')
for stat in stats.keys():
results[stat + suffix] = stats[stat]
results['threshes' + suffix] = threshes
pkl.dump(results, open(oj(MODELS_DIR, model_name + '.pkl'), 'wb'))
return stats, threshes
# +
simple_var_list = ['InjuryMech_Assault',
'InjuryMech_Bicyclist struck by automobile',
'InjuryMech_Bike collision/fall', 'InjuryMech_Fall down stairs',
'InjuryMech_Fall from an elevation',
'InjuryMech_Fall to ground standing/walking/running',
'InjuryMech_Motor vehicle collision',
'InjuryMech_Object struck head - accidental',
'InjuryMech_Other mechanism', 'InjuryMech_Other wheeled crash',
'InjuryMech_Pedestrian struck by moving vehicle', 'InjuryMech_Sports',
'InjuryMech_Walked/ran into stationary object',
'High_impact_InjSev_High', 'High_impact_InjSev_Low',
'High_impact_InjSev_Moderate', 'High_impact_InjSev_No', 'Amnesia_verb_No',
'Amnesia_verb_Pre/Non-verbal', 'Amnesia_verb_Yes',
'LOCSeparate_No', 'LOCSeparate_Suspected', 'LOCSeparate_Yes',
'Seiz', 'ActNorm', 'HA_verb_No', 'HA_verb_Pre/Non-verbal', 'HA_verb_Yes',
'Vomit', 'Intubated', 'Paralyzed', 'Sedated', 'AMS', 'SFxPalp_No', 'SFxPalp_Unclear', 'SFxPalp_Yes',
'FontBulg', 'Hema', 'Clav', 'NeuroD', 'OSI', 'Drugs', 'AgeTwoPlus', 'Gender', 'outcome']
df_simp = df[simple_var_list + ["GCSGroup"]]
df_simp = df_simp.loc[df_simp["GCSGroup"] != 1]
df_simp = df_simp.drop(columns=["GCSGroup"])
df_simp_train, df_simp_tune, _ = Dataset().split_data(df_simp)
X_simp_train = df_simp_train.drop(columns=outcome_def)
y_simp_train = df_simp_train[outcome_def].values
X_simp_tune = df_simp_tune.drop(columns=outcome_def)
y_simp_tune = df_simp_tune[outcome_def].values
# -
# SANITY CHECK
df_simp.iloc[0, :]
# # Useful Functions and Parameters
# +
def fit_eval_boosted(model, X_train, y_train, X_val, y_val, title_str):
# Fit model
model.fit(X_train, y_train)
# Find accuracies on train/val sets
# This takes ~2 minutes to run
auc_train = []
acc_train = []
auc_val = []
acc_val = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
train_pred = list(model.staged_predict_proba(X_train))
val_pred = list(model.staged_predict_proba(X_val))
for tp in train_pred:
auc_train.append(roc_auc_score(y_train, tp[:, 1]))
acc_train.append((y_train == tp.argmax(axis=1)).mean())
for vp in val_pred:
auc_val.append(roc_auc_score(y_val, vp[:, 1]))
acc_val.append((y_val == vp.argmax(axis=1)).mean())
# Plot ACC/AUC as function of number of weak learners
plt.figure()
plt.suptitle(title_str + " Performance", y=1.02)
plt.subplot(1, 2, 1)
plt.plot(acc_train, label="Train ACC", color=color_list[0])
plt.plot(auc_train, label="Train AUC", color=color_list[1])
plt.xlabel("Number of Weak Learners")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(acc_val, label="Val ACC", color=color_list[2])
plt.plot(auc_val, label="Val AUC", color=color_list[3])
plt.xlabel("Number of Weak Learners")
plt.legend()
plt.tight_layout()
# Plot Feature Importances
barplot(pd.Series(dict(zip(simple_var_list[:-1],
model.feature_importances_))).sort_values(ascending=False),
False, title_str + " Feature Importance (Gini)"
)
return (model, {"acc_train" : acc_train, "auc_train" : auc_train, "acc_val" : acc_val, "auc_val" : auc_val})
# -
# # LogitBoost
# Fit LogitBoost model
# Note that n is the number of weak learners (we could in principle tune this)
lboost = LogitBoost(n_estimators=100, random_state=0)
lboost, lboost_perf = fit_eval_boosted(lboost, X_simp_train, y_simp_train, X_simp_tune, y_simp_tune, "(Simple Data) LogitBoost")
# Fit LogitBoost model
# Note that n is the number of weak learners (we could in principle tune this)
lboost_og = LogitBoost(n_estimators=100, random_state=0)
lboost_og, lboost_og_perf = fit_eval_boosted(lboost_og, X_train, y_train, X_tune, y_tune, "(Original Data) LogitBoost")
# # AdaBoost
# Fit LogitBoost model
# Note that n is the number of weak learners (we could in principle tune this)
aboost = AdaBoostClassifier(n_estimators=100, random_state=0)
aboost, aboost_perf = fit_eval_boosted(aboost, X_simp_train, y_simp_train, X_simp_tune, y_simp_tune, "(Simple Data) AdaBoost")
# Fit LogitBoost model
# Note that n is the number of weak learners (we could in principle tune this)
aboost_og = AdaBoostClassifier(n_estimators=100, random_state=0)
aboost_og, aboost_og_perf = fit_eval_boosted(aboost_og, X_train, y_train, X_tune, y_tune, "(Original Data) AdaBoost")
| rulevetting/projects/tbi_pecarn/notebooks/boosting_estimators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sapienhwaker/A-Priori-Algorithm/blob/main/A_Priori.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KTadYLtNjJ35"
# ## Project-1: Distributed Association Rule Mining
# ### Author: <NAME>
# + id="zLB5jvinjJ4A" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="54ddcd58-b6ad-4c05-f083-b76480a95ac8"
# !pip install pyspark
# + id="P7tyjFqFjg_6" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="60c5c8c0-a00d-4982-ff28-cd8a432554e1"
from google.colab import drive
drive.mount('/content/drive')
# + id="gqIDzmcV_isd"
import re
from pyspark import SparkContext
sc = SparkContext("local", "Distributed Association Rule Mining")
# + id="r_8hpHna_qL3" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="311a2447-b6bc-4da8-b95b-531d71f9d9fb"
file = "/content/drive/My Drive/BIGDATA Fall2020/browsing.txt"
# reading input file
fileRDD = sc.textFile(file)
# total Baskets count
print('Total number of baskets = ', fileRDD.count())
# + id="_og__tiWJAaC"
basketsRDD = fileRDD.map(lambda line: re.split(r'\W+', line.strip()))
#for i in range(0,10):
#print(basketsRDD.collect()[i])
# + id="wxOKIa_wouvP" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9ed9f0f0-b1c3-4022-c611-ae0567c4f712"
# function to give index to every basket
def indexing(record):
l = []
for item in record[0]:
l.append((item,record[1]))
return l
#every basket will be numbered from 0, 1, 2, ....
#then a table will be created for every item.
#Where item will be the key and list of baskets in which the item is present will be the value
#later this table will be used to find most frequent item sets for different k values
indexedBasketsRDD = basketsRDD.zipWithIndex().map(indexing)
tempMapRDD = indexedBasketsRDD.flatMap(lambda x: x).map(lambda x: (x[0],[x[1]]))
mapRDD = tempMapRDD.reduceByKey(lambda list1,list2: list1 + list2).map(lambda x: (x[0], frozenset(x[1])))
#for i in range(0,10):
#print(mapRDD.collect()[i])
print('Total Records = ', mapRDD.count())
# + id="4m5e-2kMqiWZ"
dictionaryRDD = mapRDD.collectAsMap()
broadDictionaryRDD = sc.broadcast(dictionaryRDD)
# + [markdown] id="tQpm-eXpMSbD"
# ***Finding frequent itemsets***
# + id="1hJZ4grDAp-f"
support = 85
k = 1
wordsRDD = fileRDD.flatMap(lambda line: re.split(r'\W+', line.strip()))
singleFrequentItemsRDD = wordsRDD.map(lambda x: (x,1)).reduceByKey(lambda a,b: a+b).filter(lambda x: x[1] >= support)
#qualifiers is a list which will collect all the qualified frequent item sets which has k=1,2,3,4 and support >= 85
qualifiers = []
qualifiers.append(singleFrequentItemsRDD.collect())
#for i in range(0,10):
#print(singleFrequentItemsRDD.collect()[i])
#print(f'Unique items with support >= 85 : ', itemsRDD.count())
# + [markdown] id="fN7RVZzPMXOD"
# ***Frequent Itemsets for k = 2,3,4***
# + id="bUiekYUIdE9I"
# function for getting eligible item sets depending on the given support value
support = 85
def eligibleItems(x):
first_item = True
for item in x:
if first_item:
itemset = broadDictionaryRDD.value[item]
first_item = False
else:
itemset = itemset.intersection(broadDictionaryRDD.value[item])
if len(itemset) >= support:
return x,len(itemset)
return
# + id="zGe6WkvNvwFK"
# function for getting frequent itemsets
def frequent_itemset(prev, frequent_items, k, qualifiers):
if k == 2:
cartesianRDD = prev.cartesian(frequent_items).map(lambda x: frozenset(x))
else:
cartesianRDD = prev.cartesian(frequent_items).map(lambda x: frozenset(x[0]+tuple([x[1]])))
eligibleRDD = cartesianRDD.filter(lambda x: len(x) == k).distinct().map(lambda x: tuple(x))
#print(f'Total candidate itemsets (k = {k}) : ', eligibleRDD.count())
mulitpleItemsRDD = eligibleRDD.map(eligibleItems).filter(lambda x: x)
#if k == 2:
#twoFrequentItemsRDD = sc.parallelize(mulitpleItemsRDD.collect());
#if k == 3:
#threeFrequentItemsRDD = sc.parallelize(mulitpleItemsRDD.collect());
#if k == 4:
#fourFrequentItemsRDD = sc.parallelize(mulitpleItemsRDD.collect());
qualifiers.append(mulitpleItemsRDD.collect())
#print(f'Total frequent itemsets (k = {k}) : ', mulitpleItemsRDD.count())
prev = mulitpleItemsRDD.map(lambda x: x[0])
prev.persist()
if k < 4:
frequent_itemset(prev, frequent_items, k+1, qualifiers)
else:
return
singleFrequentItemsKeysRDD = singleFrequentItemsRDD.map(lambda x: x[0])
singleFrequentItemsKeysRDD.persist()
frequent_itemset(singleFrequentItemsKeysRDD, singleFrequentItemsKeysRDD, 2, qualifiers)
# + [markdown] id="G7-EHEtf12mr"
# ***Association rule implementation***
# + id="7-gRk_a51-Jq"
import itertools as it
c = 0.9
#function to get the association
#seperate: indicates how many parameters will be there on the left hand side of the association rule
#x is single record and map is a dictionary
def get_association(x,map,seperate,k):
li = []
for item in it.combinations(x[0],seperate):
if k == 2:
confidence = x[1]/map[frozenset({item[0]})]
else:
confidence = x[1]/map[frozenset(item)]
if confidence >= c:
li.append((item, tuple(set(x[0])-set(item)), confidence*100))
if li:
return x,li
return
# converting rdd to the dictionary/map
singleFrequentItemsMapRDD = singleFrequentItemsRDD.map(lambda x: (frozenset({x[0]}),x[1])).collectAsMap()
# qulifier list contains list at index 1 which is a twofrequent items rdd
twoFrequentItemsRDD = sc.parallelize(qualifiers[1]).map(lambda x: (frozenset(x[0]),x[1]))
twoFrequentItemsMapRDD = twoFrequentItemsRDD.collectAsMap()
# qulifier list contains list at index 2 which is a threefrequent items rdd
threeFrequentItemsRDD = sc.parallelize(qualifiers[2]).map(lambda x: (frozenset(x[0]),x[1]))
threeFrequentItemsMapRDD = threeFrequentItemsRDD.collectAsMap()
# qulifier list contains list at index 2 which is a fourfrequentitems rdd
fourFrequentItemsRDD = sc.parallelize(qualifiers[3])
twoItemsConfiedenceRDD = twoFrequentItemsRDD.map(lambda x : get_association(x,singleFrequentItemsMapRDD,1,2)).filter(lambda x: x)
#print('Two items with confidence 90 or greater: ', twoItemsConfiedenceRDD.count())
threeItemsConfiedenceRDD = threeFrequentItemsRDD.map(lambda x : get_association(x,twoFrequentItemsMapRDD,2,3)).filter(lambda x: x)
#print('Three items with confidence 90 or greater: ', threeItemsConfiedenceRDD.count())
fourItemsConfiedenceRDD = fourFrequentItemsRDD.map(lambda x : get_association(x,threeFrequentItemsMapRDD,3,4)).filter(lambda x: x)
#print('Four items with confidence 90 or greater: ', fourItemsConfiedenceRDD.count())
#storing output to the text file
output = open("/content/drive/My Drive/BIGDATA Fall2020/association_rules.txt", "a")
# + id="8UBWTqOuLBP4" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0a1d9ced-84be-46f3-b386-e8e0ab317e5f"
output.write("Association rule for two items\n\n")
for val in twoItemsConfiedenceRDD.collect():
#print(val[1][0][0][0], '>>', val[1][0][1][0], ' {:.2f}'.format(val[1][0][2]), '%')
line = val[1][0][0][0] + ' >> ' + val[1][0][1][0] + ' {:.2f}'.format(val[1][0][2]) + '%\n'
output.write(line)
output.write("\n\n\n")
# + id="qPuf7WpPK72p" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bfacdfaf-2b87-4945-c3a7-fc4c6ee00a88"
output.write("Association rule for three items\n\n")
for val in threeItemsConfiedenceRDD.collect():
line = str(val[1][0][0]) + ' >> ' + val[1][0][1][0] + ' {:.2f}'.format(val[1][0][2]) + '%\n'
output.write(line)
output.write("\n\n\n")
# + id="lBc64nr_4KI9"
output.write("Association rule for four items\n\n")
for val in fourItemsConfiedenceRDD.collect():
line = str(val[1][0][0]) + ' >> ' + val[1][0][1][0] + ' {:.2f}'.format(val[1][0][2]) + '%\n'
output.write(line)
output.close()
| A_Priori.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/chetnashahi/100daysofML/blob/master/DPhi-%20Simple%20Linear%20Regression%20Example2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Df1S75-fwfrn"
# # **Simple Linear Regression:** DPhi
# The objective is to use linear regression to understand how years of experience impact Salary.
# + id="S-AXYYB_wdw3"
# importing packages
import numpy as np # to perform calculations
import pandas as pd # to read data
import matplotlib.pyplot as plt # to visualise
from sklearn import linear_model
# + id="Q8h1gyXNwjfV"
#Loading the salary dataset
data = pd.read_csv('https://raw.githubusercontent.com/dphi-official/Linear_Regression_Introduction/master/Salary_Data.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="bEGtZW3Awli9" outputId="5c2073dc-9375-41ed-a89a-eefa8ed96d85"
data.head()
# + [markdown] id="mrHSX7HYOsUl"
# # **Plotting the Data**
# Let’s plot our data points on a 2-D graph to eyeball our dataset(get a rough overview) and see if we can manually find any relationship between the data.
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="uffKTC4nNha6" outputId="14d06980-fc53-471b-f186-89274e58d30e"
# Scatter plot helps in visualising the data distribution
plt.plot(data.YearsExperience, data.Salary,'rx')
# + [markdown] id="V9CvKBMMO_-i"
# As you can see, there is a clear relationship between the years of experience and salary.
#
# # **Setting variables**
# Our next step is to divide the data into “attributes” and “labels” or as you've already known as input and target variables.
#
# In our dataset, we only have two columns. We want to predict the Salary depending upon the Years of Experience recorded. Therefore our attribute set will consist of the “YearsExperience” column which is stored in the X variable, and the label will be the “Salary” column which is stored in y variable.
# + id="7U5ad89zwxbo"
X=data['YearsExperience'].values.reshape(-1,1)
Y=data['Salary'].values.reshape(-1,1)
# + [markdown] id="IT6wumXdPb-e"
#
# # **Splitting the data**
# Next, we split 80% of the data to the training set while 20% of the data to test set using below code. The test_size variable is where we actually specify the proportion of the test set.
#
# By passing our X and y variables into the train_test_split method, we are able to capture the splits in data by assigning 4 variables to the result.
# + id="Z_qkdhjtPn9F"
# import SK Learn train test split
from sklearn.model_selection import train_test_split
# Assign variables to capture train test split output
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=47)
# + [markdown] id="PL5lV8WPP3SD"
# # **Understanding the working of Linear Regression**
# The term “linearity” in algebra refers to a linear relationship between two or more variables. If we draw this relationship in a two-dimensional space (between two variables), we get a straight line.
#
# If we plot the independent variable (x) on the x-axis and dependent variable (y) on the y-axis, linear regression gives us a straight line that "best fits" the data points.It’s impossible to connect all the marks with a straight line, so you use a best fitting line.
#
# The equation for this line would be the result of your simple linear regression(Remember the equation y= b0 + b1*x that we just derived?). The regression finds the best fitting line.
#
# Now, how do you find the best fitting line? Since our data points(values of x and y) will remain constant for a particular dataset, we can only alter b0 and b1.
#
# Our objective is to find the values of b0 and b1 that will best fit this data.
#
# These 2 variables/coefficients are actually called **hyperparameters**. In machine learning, a hyperparameter is a parameter whose value is used to control the learning process. And we must always try to find some optimal parameters while building a machine learning model.
#
# This line is your **regression model**.
#
# To perform Linear Regression quickly, we will be using the library scikit-learn. If you don’t have it already you can install it using pip:
#
# pip install scikit-learn
# # **Training our model**
# After splitting the data into training and testing sets, finally, the time is to train our algorithm. Firstly, importing of sklearn.linear_model is required for us to access LinearRegression. It then needs to be instantiated and model fit to our training data. This is seen below.
# + id="ip87BcemQQvh" outputId="b8248acc-2eeb-408e-b91b-a8e079b76b3d" colab={"base_uri": "https://localhost:8080/"}
reg= linear_model.LinearRegression()
#fit model to our training data i.e learn coefficients
reg.fit(X_train, y_train)
# + [markdown] id="Kfx4LGj_Qdu9"
#
# # **Interpreting Coefficients**
# The coefficients(b0 and b1) will allow us to model our equation with values and find the best fit line. The linear_regressor variable (assigned to a LinearRegression object), is able to have the intercept and coefficients extracted, using the code below.
# + colab={"base_uri": "https://localhost:8080/"} id="eehn7NHcx_s5" outputId="32f90152-bd02-4fad-e1be-fb8bd66ec485"
# prints y-intercept
print(reg.intercept_)
# prints the coefficient
print(reg.coef_)
# + [markdown] id="v8wChMhfS6UO"
# The intercept will be your b0 value; and coefficient will be b1.
#
# # **Making predictions based on your model**
# Now that we have trained our algorithm, it’s time to make some predictions. To do so, we will use our test data and see how accurately our algorithm predicts the salaries.
#
# Making predictions based on your model is as simple as using the code below: passing the predict method your test data. This will return predicted values of y given the new test X data.
# + id="97hVrjPRyHWy"
y_pred = reg.predict(X_test) # make predictions
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="yzn7znNhxd7D" outputId="fa41bd46-f096-429c-93b7-7916bb15a179"
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.plot(X_test, y_test,'rx')
plt.plot(X_test, y_pred, color='black')
plt.show()
# + [markdown] id="ki5LjnsQTYwX"
# As you can see, the algorithm has drawn a line that passes through the maximum test data points and has the minimum distance from the others. This line is known as the "best-fit" or the regression line.
#
# Since this line has a positive slope, we can say that the salary increases as no. of years of experience increase.
# + [markdown] id="iEJCMo0zTatF"
# Using this line, you can even compute the salaries for the years of experience not present in the dataset by finding the corresponding value of y on the line.
# + [markdown] id="Z9J5lSOXTlf-"
# # **Model Evaluation**
# There are three primary metrics used to evaluate linear models. These are: Mean absolute error (MAE), Mean squared error (MSE), or Root mean squared error (RMSE).
# + id="ZCis1UCTTgbX" outputId="933c32e5-2197-47d4-9c52-7e3576179ad8" colab={"base_uri": "https://localhost:8080/"}
# import metrics library
from sklearn import metrics
# print result of MAE
print(metrics.mean_absolute_error(y_test, y_pred))
#print result of MSE
print(metrics.mean_squared_error(y_test, y_pred))
#print result of RMSE
print(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
| DPhi- Simple Linear Regression Example2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from scripts import project_functions
df = project_functions.load_and_process('../../data/raw/mlb_elo.csv')
df.to_csv(r'../../data/processed/mlb_game_data.csv')
lists = project_functions.variables()
# +
dft = df
current_home_df = df[(df["home_team"].isin(lists["current_teams"]))]
current_away_df = df[(df["away_team"].isin(lists["current_teams"]))]
current_home_df.to_csv(r'../../data/processed/mlb_home_elo.csv')
current_away_df.to_csv(r'../../data/processed/mlb_away_elo.csv')
# +
#all world series games in sorted by winning teams pitcher1 from worst to best
df3 = df[(df["playoff"] == "w") ]
df4 = df3.drop(["neutral", "home_prob", "away_prob", "home_post", "away_post", "home_pre", "away_pre"], axis="columns")
df4 = df4[(df4["home_pitcher_rating"] != "s") ]
# subtract away score from home score to find winner if positive home = winner
# subtrract away pitcher2_adj from pitcher1_adj to find better pitcher
score = df4["home_score"] - df4["away_score"]
df4["compare_score"] = score
#need to drop home_pitcher_rating & away_pitcher_rating with "s"
df4 = df4[(df4["home_pitcher_rating"] != "s") ]
comp_pitcher = df4["home_pitcher_rating"] - df4["away_pitcher_rating"]
df4["compare_pitchers"] = comp_pitcher
df4["home_win"] = np.where(df4["compare_score"] > 0, True, False)
df4["away_win"] = np.where(df4["compare_score"] < 0, True, False)
win_pitch = np.where(df4["home_win"] == True, (df4["home_pitcher_rating"]), (df4["away_pitcher_rating"]))
df4["win_pitch_adj"] = win_pitch
#make function to compare winner of game with differnce in pitcher ratings
df4["pitcher_difference"] = np.where(df4["home_win"] == True, (df4["compare_pitchers"]), (df4["compare_pitchers"] * -1))
# sort win_difference by smallest to largest
df4.to_csv(r'../../data/processed/ws_pitcher_adj.csv')
lose_pitch = np.where(df4["home_win"] == True, (df4["away_pitcher_rating"]), (df4["home_pitcher_rating"]))
df4["lose_pitch_adj"] = lose_pitch
df4 = df4.sort_values(by="lose_pitch_adj", ascending = False)
df4.to_csv(r'../../data/processed/ws_pitcher_adj.csv')
print(df4)
# -
# # Describing the dataset
#
# - Who
# - What
# - Where
# - When
# - How
# - Why
# +
# df.describe(include='all')
#df4.describe("win_pitch_adj")
# -
# ## First Things First
#
# ### Let's isolate the data to only include world series games
#
# >When doing this the Home and away starting pitcher's adjustment to their team's rating before 1913 were not tracked so that data will removed
#
# >we will narrow down the selected games using the above criteria with the following code
# ### This code is used earlier but just shown now
#
# df3 = df[(df["playoff"] == "w") ]
# df4 = df4.drop(["neutral", "home_prob", "away_prob", "home_post", "away_post", "home_pre", "away_pre"], axis="columns")
# df4 = df4[(df4["home_pitcher_rating"] != "s") ]
#
# print(df4)
#
#
# ## Part 2
#
# > Here we will show the average pitcher's adjustment to the team rating for those that won the game. A positive number means that the pitcher provides a boost to their team, while a negativ number indicates their team has to overcome some bad pitching to win
#
dfj = df4.sort_values(by="win_pitch_adj", ascending = True)
print (dfj)
# home pitcher Aaron
# <NAME>
sns.displot(df4, x="win_pitch_adj", binwidth = 2.5)
plt.xlabel ("Winning Picher's adjuested rating")
plt.title ("Winning Picher's adjuested rating histogram")
# >as you can see the majority of the teams to win have pitchers who have a positive impact on their team
# >we are not interested in those pitchers, we want to find the worst adjusted pitcher rating to win a World series game. That dot is way in the bottom left
# >next we will show the biggest difference bewteen the starting pitchers adjustment to their team and find which overcame the biggest gap to win
# +
plt.figure(figsize=(15,14))
sns.set_theme(style="white")
# adj_comp = df4.eval("win_pitch_adj / pitcher_difference").rename(adj_comp)
sp = sns.color_palette("viridis", as_cmap=True)
#'#008080'
sns.relplot(x="pitcher_difference", y="win_pitch_adj",
sizes=400, alpha=.7, palette=sp, hue = "pitcher_difference",
height=6, data=df4)
plt.xlabel("Winning pitcher's adjusted rating", size=15)
plt.ylabel("Difference in opposing pitchers\n positive is for winning pitcher\n having higher adjusted rating", size=15)
plt.title("Winning pitcher adjusted rating vs \nDifference in opposing pitcher's adjusted rating", size=18)
plt.show()
# -
sns.displot(df4, x="pitcher_difference", binwidth = 2.5)
plt.xlabel ("Difference in opposing pitchers adjusted ratings, (0 means the pitchers are even and the further to the right means the winning pitcher had a big a)")
plt.title("Opposing pitchers differnce in Adjusted ratings", size=18)
# ## Now this is getting a little more intersting
#
# as you can see the majority have an advantage in their pitchers adjustment to their team vs the opponents pitcher. The top right has teams with great pitching vs opponents whose pitching puts them at a disadvantage so it's no surprise that the majority of these are above 0 in pitcher difference.
# Once again those guys are the norm and not that interesting, the more intersting cases are the ones furthest to bottom left. The furthest left is the biggesest penalty in adjusted pitcher ratings to win a World Series Game.
#
# > Using the following code we can sort by that difference to find that game
# +
df5 = df4.sort_values(by="pitcher_difference", ascending = True)
df5 = df5.drop(["compare_pitchers", "playoff"], axis="columns")
print(df5)
df5 = df5.sort_values(by="pitcher_difference", ascending = False)
df5.to_csv(r'../../data/processed/ws_pitcher_adj_difference.csv')
# print(df5)
# -
| analysis/Levi_Kolkind/M2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Funciones de Hash
# La librería PyCryptoDome tiene funciones de hash para varios algoritmos. Vamos a cargar algunas de ellas. La lista completa está en: https://pycryptodome.readthedocs.io/en/latest/src/hash/hash.html
#
# (Recuerda: MD5 está obsoleto y roto, no se tiene que utilizar en aplicaciones reales)
from Crypto.Hash import MD5, SHA256, SHA512, SHA3_256
# Tradicionalmente, una función de hash se va alimentanto de datos (`update()`) hasta que necesitas el hash (también llamado "digest"). Este procedimiento de `update()` funciona, por ejemplo, si tienes que calcular el hash de un conjunto de mensajes o lees los bytes bloque a bloque de un archivo grande.
hasher = SHA256.new()
hasher.update(b'hola')
hasher.update(b'mundo')
hash1 = hasher.hexdigest()
print(hash1)
# Si ya conoces el mensaje puedes hacerlo todo en una sola línea como en el ejemplo siguiente. Observa que el hash coincide con el calculado antes.
hash2 = SHA256.new(data=b'holamundo').hexdigest()
print(hash2)
print(hash1 == hash2)
# ¿Qué pasa si cambiamos ligeramente el mensaje? Por ejemplo, añadimos un espacio, o ponemos letras en mayúsculas, o signos de admiración...
hash2 = SHA256.new(data=b'hola mundo').hexdigest()
print(hash2)
print(hash1 == hash2)
# Preguntas:
#
# - Prueba varios cambios en el mensaje "hola mundo" y apunta los hashes, verás que cambian totalmente por muy pequeños que sean los cambios: algunas mayúsculas, números, signos de puntuación...
# - ¿Cuántos mensajes existen que tengan el mismo hash que "hola mundo"?
# - ¿Podrías encontrar alguno de estos mensajes que tengan el mismo hash que "hola mundo"?
# - Calcula el valor de hash de un archivo de texto con el texto "hola mundo" en tu ordenador desde línea de comandos. ¿Coincide con el hash anterior?
# - Powershell en Windows: `Get-FileHash NOMBREDEARCHIVO`
# - Linux/OSX: `sha256sum NOMBREDEARCHIVO`
# - Cambia el nombre del archivo y calcula su hash. ¿Ha cambiado el hash al cambiar el nombre del archivo?
# # Tamaño de un resumen hash
# Fíjate: el hash SHA256 siempre tiene la misma longitud, sea como sea de largo el texto de la entrada.
#
# - SHA256: longitud 256 bits
# - SHA512: longitud 512 bits
print(SHA256.new(data=b'hola').hexdigest())
print(SHA256.new(data=b'hola mundo').hexdigest())
print(SHA256.new(data=b"""Cryptographic hash functions take arbitrary binary strings as input,
and produce a random-like fixed-length output (called digest or hash value).
It is practically infeasible to derive the original input data from the digest. In other
words, the cryptographic hash function is one-way (pre-image resistance).
Given the digest of one message, it is also practically infeasible to find another
message (second pre-image) with the same digest (weak collision resistance).
Finally, it is infeasible to find two arbitrary messages with the same digest
(strong collision resistance).
Regardless of the hash algorithm, an n bits long digest is at most as secure as a
symmetric encryption algorithm keyed with n/2 bits (birthday attack).
Hash functions can be simply used as integrity checks. In combination with a
public-key algorithm, you can implement a digital signature.""").hexdigest())
# En los ejemplos anteriores hemos utilizado la función `hexdigest`, que es como tradicionalmente se presentan los hashes para poder imprimirlos. Esa es la representación hexadecimal de un número. Por ejemplo, el número 14 (decimal) se representa como '0e' (hexadecimal) y el número 254 (decimal) como 'fe' (hexadecimal). Fíjate: 8 bits son un byte, es decir, un número entre 0 y 255 (en decimal), es decir, un número entre 00 y ff (en hexadecimal). **Un byte son dos caracteres hexadecimales**.
#
# Podemos accede a la cadena binariade bytes, sin pasarla a hexadecimal, utilizando la función `digest()` en vez de `hexdigest()`. Pero no podríamos imprimirla.
#
# Así que:
#
# - El resumen SHA256 es de 256 bits, sea como sea el tamaño de la entrada
# - 256 bits son **32 bytes**
# - Que se representan como **64 caracteres hexadecimales**
# - Pero ambas representaciones son equivalentes. Simplemente, una podemos imprimirla y la otra no. A veces querremos imprimir hashes y por eso es común que los veamos en hexadecimal
# +
hasher = SHA256.new(data=b'hola')
hash_bytes = hasher.digest()
hash_hexa = hasher.hexdigest()
print(f'Valor de hash SHA256 en binario. Longitud={len(hash_bytes)} bytes Valor={hash_bytes}')
print(f'Valor de hash SHA256 en hexadecimal. Longitud={len(hash_hexa)} caracteres Valor={hash_hexa}')
# -
# Por tradición, se ha preferido codificar los hashes en hexadecimal y no en base64, que hubiese sido otra opción perfectamente válida.
# # Velocidades de cálculo de hash
# Vamos a calcular cuántos hashes podemos calcular por segundo.
#
# OJO: este cálculo simplemente nos vale para comparar algoritmos entre sí. Jupyter no tiene acceso a la GPU de tu ordenador, ni Pycryptodome está pensada para gran velocidad. Si exprimes tu ordenador con programas externos seguro que obtendrás números mucho mayores.
#
# La ejecución de estas líneas puede llevar desde varios segundos a un minuto. Fíjate en que el bloque empieza con `In[*]` mientras está haciendo cálculos.
# +
import timeit
NUM=500000
print(f'Calculando {NUM} hashes en MD5, SHA256, SHA512, SHA3_256...')
time_md5 = timeit.timeit(lambda: MD5.new(data=b'hola').hexdigest(), number=NUM)
time_sha256 = timeit.timeit(lambda: SHA256.new(data=b'hola').hexdigest(), number=NUM)
time_sha512 = timeit.timeit(lambda: SHA512.new(data=b'hola').hexdigest(), number=NUM)
time_sha3 = timeit.timeit(lambda: SHA3_256.new(data=b'hola').hexdigest(), number=NUM)
print(f'MD5: spent={time_md5} s speed={NUM / time_md5} H/s')
print(f'SHA256: spent={time_sha256} s speed={NUM / time_sha256} H/s')
print(f'SHA512: spent={time_sha512} s speed={NUM / time_sha512} H/s')
print(f'SHA3_256: spent={time_sha3} s speed={NUM / time_sha3} H/s')
# -
# Preguntas:
#
# - ¿Cuál de los algoritmos es más rápido? ¿Cómo afecta doblar el número de bits (es decir, pasar de 256 a 512 bits)?
# - Calcula el hash SHA-256 y SHA-512 de un archivo de unos 500MB en tu ordenador (por ejemplo, una película) ¿Cuánto tiempo le lleva?
# ## Proof of work
#
# Vamos a programar una "*Proof of work*" (*pow*) muy sencilla inspirada en [Hashcat](https://en.wikipedia.org/wiki/Hashcat), como bitcoin: "encuentra un número tal que al juntarlo a un texto y calcular su hash, el hash empiece por un número determinado de ceros".
#
# La idea es que no se aceptará un correo electrónico, nuevo bloque bitcoin... lo que sea, si el emisor no incluye una "proof of work" junto con el mensaje. Es decir, que pruebe que ha estado X minutos buscando el contador que resulte en un hash correcto. De esta manera se limita el número de mensajes por minuto que puede emitir una persona. Fíjate que **encontrar** el contador es un proceso costoso, pero **comprobar** que el contador es correcto es muy rápido: solo tienes que calcular el hash y mirar si empieza con el número correcto de ceros.
#
# Nosotros vamos a utilizar el mensaje "hola" solo para ilustrar. Bitcoin funciona de una manera similar, pero el "mensaje" en bitcoin es un bloque con descripciones de transacciones y sus firmas electrónicas.
#
# Una propuesta de función (poco eficiente) sería así:
def pow(msg, effort):
test = '0' * effort
ctr = 0
blk = f'{msg}:{ctr}'.encode()
h = SHA256.new(data=blk).hexdigest()
while not h[:effort] == test:
ctr += 1
blk = f'{msg}:{ctr}'.encode()
h = SHA256.new(data=blk).hexdigest()
return blk, h
# Y vamos a probar con dos esfuerzos: que el hash "MSG:CONTADOR" empiece por solo un cero, o que empiece por dos ceros.
print(pow('hola', 1))
print(pow('hola', 5))
# ¿Con qué esfuerzo tu PC necesita (más o menos) 30 segundos para encontrar el contador?
# # Firma digital de un documento
# Vamos a aprovechar lo que ya sabemos de cifrado asimétrico y hashes para ver cómo funciona una firma digital.
# ## Alice firma un documento
document = b"""Cryptographic hash functions take arbitrary binary strings as input,
and produce a random-like fixed-length output (called digest or hash value).
It is practically infeasible to derive the original input data from the digest. In other
words, the cryptographic hash function is one-way (pre-image resistance).
Given the digest of one message, it is also practically infeasible to find another
message (second pre-image) with the same digest (weak collision resistance).
Finally, it is infeasible to find two arbitrary messages with the same digest
(strong collision resistance).
Regardless of the hash algorithm, an n bits long digest is at most as secure as a
symmetric encryption algorithm keyed with n/2 bits (birthday attack).
Hash functions can be simply used as integrity checks. In combination with a
public-key algorithm, you can implement a digital signature."""
# Vamos a generar un par de claves RSA para Alice: una pública `alice_pk` y otra privada `alice_sk`. Recuerda: la clave pública la conoce todo el mundo, la clave privada solo la conoce Alice. Ya veremos cómo se distribuye esa clave pública.
#
# NOTA: En un entorno real esto se hace mucho antes de firmar: ¡el par de claves debería estar preparado y la clave pública distribuida desde meses antes de la firma! Veremos esto en el tema de PKI
# Clave de 2048 bits de Alice, pública y secreta
from Crypto.PublicKey import RSA
alice_sk = RSA.generate(2048) # Clave secreta de Alice
alice_pk = alice_sk.publickey() # Clave pública de Alice
# PyCryptodome ya incluye un módulo para firmar usando las recomendaciones PKCS1. Vamos a aprovechar el módulo, para aprender buenas costumbres y porque PyCryptodome no nos deja utilizar RSA de forma insegura.
#
# Ese módulo de firma:
#
# - Calcula el hash del documento utilizando el hasher que le pasemos (que será SHA256)
# - Cifra el hash del documento utilizando la clave privada de Alice
# - Todo lo hace siguiendo las recomendaciones PKCS1
# - La firma la codifica en base64, para que podamos verla por pantalla (esto no es necesario en realidad)
#
# Finalmente, Alice enviaría en un mensaje el documento y su firma. En realidad, lo más probable es que Alice además cifre el documento utilizando algún tipo de cifrado simétrico como AES para proteger su confidencialidad, pero vamos a obviar esa parte en este ejercicio.
#
# NOTA: dado que se necesita la clave privada de Alice para firmar, **solo Alice puede generar esta firma de este documento ya que solo ella conoce su clave privada**.
# +
from Crypto.Signature import pkcs1_15
from base64 import b64encode, b64decode
hasher = SHA256.new(data=document)
signature = pkcs1_15.new(alice_sk).sign(hasher)
msg = dict(document=document, signature=b64encode(signature))
print(msg)
# -
# Pregunta:
#
# - ¿Por qué crees que Alice cifra **solo** el hash del mensaje con RSA, en vez de cifrar directamente **todo** el mensaje con RSA?
# ## Bob verifica la firma de Alice
#
# Bob recibe el mensaje `msg`, que incluye el documento y la firma de Alice, y ya conoce la clave pública de alice `alice_pk` de alguna manera (ver tema PKI)
#
# Así que Bob hace el proceso inverso:
#
# - Calcula el hash SHA256 del documento recibido
# - Decodifica el base64 y descifra la firma recibida utilizando la clave pública de Alice
# - Todo lo hace siguiendo las recomendaciones PKCS1
#
# Como curiosidad, la librería PyCryptodome lanza un error cuando la firma no es válida, y no hace nada si es correcta.
#
# Si la verificación de la firma con la clave pública de Alice es correcta, entonces **Bob sabe que el documento lo ha enviado Alice, y no puede haberlo enviado nadie más**.
# +
rcv_document = msg['document']
rcv_signature = b64decode(msg['signature'])
pkcs1_15.new(alice_pk).verify(SHA256.new(data=rcv_document), rcv_signature)
print("La firma es válida")
# -
# ¿Qué pasa si un atacante intercepta el mensaje y cambia el documento? Aquí vemos un ejemplo: el atacante ha interceptado el documento y ha puesto información falsa. ¿Puedes identificar qué parte del documento ha cambiado el atacante?
#
# Observa que ahora la firma de Alice no verifica y la función lanza un error.
# +
rcv_document = b"""Cryptographic hash functions take arbitrary binary strings as input,
and produce a random-like fixed-length output (called digest or hash value).
It is practically infeasible to derive the original input data from the digest. In other
words, the cryptographic hash function is one-way (pre-image resistance).
Given the digest of one message, it is also practically infeasible to find another
message (second pre-image) with the same digest (weak collision resistance).
Finally, it is infeasible to find two arbitrary messages with the same digest
(strong collision resistance).
Regardless of the hash algorithm, an n bits long digest is at most as secure as a
symmetric encryption algorithm keyed with n/3 bits (birthday attack).
Hash functions can be simply used as integrity checks. In combination with a
public-key algorithm, you can implement a digital signature."""
pkcs1_15.new(alice_pk).verify(SHA256.new(data=rcv_document), rcv_signature)
print("La firma es válida")
# -
# Podemos gestionar los errores con un `try/except` e informar al usuario.
try:
pkcs1_15.new(alice_pk).verify(SHA256.new(data=rcv_document), rcv_signature)
print("La firma es válida")
except ValueError:
print("La firma NO es válida")
| ejercicios/06/Hashes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 8 Practice: Seaborn
#
# Use this notebook to follow along with the lesson in the corresponding lesson notebook: [L08-Seaborn-Lesson.ipynb](./L08-Seaborn-Lesson.ipynb).
#
# ## Instructions
# Follow along with the teaching material in the lesson. Throughout the tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: . You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook. For each task, use the cell below it to write and test your code. You may add additional cells for any task as needed or desired.
# ## Task 1a Setup
#
# Import the following packages:
# + seaborn as sns
# + pandas as pd
# + numpy as np
# + matplotlib.pyplot as plt
#
# Activate the `%matplotlib inline` magic.
# ## Task 2a Load Data
#
# + View available datasets by calling `sns.get_dataset_names`.
# + Choose one of those datasets and explore it.
# What is the shape?
# What are the columns?
# What are the data types?
# Are there missing values?
# Are there duplicated rows?
# For categorical columns find the unique set of categories.
# Is the data tidy?
# ## Task 2b Preview Seaborn
#
# Take some time to peruse the Seaborn [example gallery](https://seaborn.pydata.org/examples/index.html). Indicate which plot types are most interesting to you. Which do you expect will be most useful with current research projects you may be working on?
# + active=""
#
# -
# ## Task 3a Using `relplot`
#
# Experiment with the `size`, `hue` and `style` semantics by applying them to another example dataset of your choice.
#
# *You should produce three or more plots for this task.*
# ## Task 4a: Explore built-in styles
#
# Using a dataset of your choice, practice creating a plot for each of these different styles:
#
# + darkgrid
# + whitegrid
# + dark
# + white
# + ticks
# ## Task 4b
#
# Experiment with the style options and palettes introduced above. Create and demonstrate a style of your own using a dataset of your choice.
# ## Task 5a
#
# Examine the [Seaborn gallery](https://seaborn.pydata.org/examples/index.html) and find **two to four plots** types that interest you. Re-create a version of those plots using a different data set (make any other style changes you wish).
#
| Module2-Data_Analytics/L08-Seaborn-Practice.ipynb |