content
stringlengths 6
1.05M
|
|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %%timeit -n1
with open("fibo_input.txt","r") as f:
x=f.read().splitlines()
x= [int(x) for x in x]
# %%timeit -n1
fiblist=[]
def fib(n):
a=0
b=1
for i in range(1,n+1):
c = a+b
fiblist.append(b)
a = b
b = c
fib(max(x))
for i in x:
print fiblist[i-1]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (herschelhelp_internal)
# language: python
# name: helpint
# ---
# # EGS master catalogue
# ## Preparation of Pan-STARRS1 - 3pi Steradian Survey (3SS) data
#
# This catalogue comes from `dmu0_PanSTARRS1-3SS`.
#
# In the catalogue, we keep:
#
# - The `uniquePspsSTid` as unique object identifier;
# - The r-band position which is given for all the sources;
# - The grizy `<band>FApMag` aperture magnitude (see below);
# - The grizy `<band>FKronMag` as total magnitude.
#
# The Pan-STARRS1-3SS catalogue provides for each band an aperture magnitude defined as “In PS1, an 'optimal' aperture radius is determined based on the local PSF. The wings of the same analytic PSF are then used to extrapolate the flux measured inside this aperture to a 'total' flux.”
#
# The observations used for the catalogue where done between 2010 and 2015 ([ref](https://confluence.stsci.edu/display/PANSTARRS/PS1+Image+data+products)).
#
# **TODO**: Check if the detection flag can be used to know in which bands an object was detected to construct the coverage maps.
#
# **TODO**: Check for stellarity.
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
import datetime
print("This notebook was executed on: \n{}".format(datetime.datetime.now()))
# +
# %matplotlib inline
# #%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
from collections import OrderedDict
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from herschelhelp_internal.flagging import gaia_flag_column
from herschelhelp_internal.masterlist import nb_astcor_diag_plot, remove_duplicates
from herschelhelp_internal.utils import astrometric_correction, mag_to_flux
# +
OUT_DIR = os.environ.get('TMP_DIR', "./data_tmp")
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
RA_COL = "ps1_ra"
DEC_COL = "ps1_dec"
# -
# ## I - Column selection
# +
imported_columns = OrderedDict({
"objID": "ps1_id",
"raMean": "ps1_ra",
"decMean": "ps1_dec",
"gFApMag": "m_ap_gpc1_g",
"gFApMagErr": "merr_ap_gpc1_g",
"gFKronMag": "m_gpc1_g",
"gFKronMagErr": "merr_gpc1_g",
"rFApMag": "m_ap_gpc1_r",
"rFApMagErr": "merr_ap_gpc1_r",
"rFKronMag": "m_gpc1_r",
"rFKronMagErr": "merr_gpc1_r",
"iFApMag": "m_ap_gpc1_i",
"iFApMagErr": "merr_ap_gpc1_i",
"iFKronMag": "m_gpc1_i",
"iFKronMagErr": "merr_gpc1_i",
"zFApMag": "m_ap_gpc1_z",
"zFApMagErr": "merr_ap_gpc1_z",
"zFKronMag": "m_gpc1_z",
"zFKronMagErr": "merr_gpc1_z",
"yFApMag": "m_ap_gpc1_y",
"yFApMagErr": "merr_ap_gpc1_y",
"yFKronMag": "m_gpc1_y",
"yFKronMagErr": "merr_gpc1_y"
})
catalogue = Table.read("../../dmu0/dmu0_PanSTARRS1-3SS/data/PanSTARRS1-3SS_EGS_v2.fits")[list(imported_columns)]
for column in imported_columns:
catalogue[column].name = imported_columns[column]
epoch = 2012
# Clean table metadata
catalogue.meta = None
# +
# Adding flux and band-flag columns
for col in catalogue.colnames:
if col.startswith('m_'):
errcol = "merr{}".format(col[1:])
# -999 is used for missing values
catalogue[col][catalogue[col] < -900] = np.nan
catalogue[errcol][catalogue[errcol] < -900] = np.nan
flux, error = mag_to_flux(np.array(catalogue[col]), np.array(catalogue[errcol]))
# Fluxes are added in µJy
catalogue.add_column(Column(flux * 1.e6, name="f{}".format(col[1:])))
catalogue.add_column(Column(error * 1.e6, name="f{}".format(errcol[1:])))
# Band-flag column
if "ap" not in col:
catalogue.add_column(Column(np.zeros(len(catalogue), dtype=bool), name="flag{}".format(col[1:])))
# TODO: Set to True the flag columns for fluxes that should not be used for SED fitting.
# -
catalogue[:10].show_in_notebook()
# ## II - Removal of duplicated sources
# We remove duplicated objects from the input catalogues.
# +
SORT_COLS = ['merr_ap_gpc1_r', 'merr_ap_gpc1_g', 'merr_ap_gpc1_i', 'merr_ap_gpc1_z', 'merr_ap_gpc1_y']
FLAG_NAME = 'ps1_flag_cleaned'
nb_orig_sources = len(catalogue)
catalogue = remove_duplicates(catalogue, RA_COL, DEC_COL, sort_col=SORT_COLS, flag_name=FLAG_NAME)
nb_sources = len(catalogue)
print("The initial catalogue had {} sources.".format(nb_orig_sources))
print("The cleaned catalogue has {} sources ({} removed).".format(nb_sources, nb_orig_sources - nb_sources))
print("The cleaned catalogue has {} sources flagged as having been cleaned".format(np.sum(catalogue[FLAG_NAME])))
# -
# ## III - Astrometry correction
#
# We match the astrometry to the Gaia one. We limit the Gaia catalogue to sources with a g band flux between the 30th and the 70th percentile. Some quick tests show that this give the lower dispersion in the results.
gaia = Table.read("../../dmu0/dmu0_GAIA/data/GAIA_EGS.fits")
gaia_coords = SkyCoord(gaia['ra'], gaia['dec'])
catalogue[RA_COL].unit = u.deg
catalogue[DEC_COL].unit = u.deg
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
# +
delta_ra, delta_dec = astrometric_correction(
SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]),
gaia_coords
)
print("RA correction: {}".format(delta_ra))
print("Dec correction: {}".format(delta_dec))
# -
catalogue[RA_COL] += delta_ra.to(u.deg)
catalogue[DEC_COL] += delta_dec.to(u.deg)
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
# ## IV - Flagging Gaia objects
catalogue.add_column(
gaia_flag_column(SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]), epoch, gaia)
)
# +
GAIA_FLAG_NAME = "ps1_flag_gaia"
catalogue['flag_gaia'].name = GAIA_FLAG_NAME
print("{} sources flagged.".format(np.sum(catalogue[GAIA_FLAG_NAME] > 0)))
# -
# # V - Saving to disk
catalogue.write("{}/PS1.fits".format(OUT_DIR), overwrite=True)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # On-Device AI Co., Ltd.
#
# Web : https://on-device-ai.com/
# Email : yilintung@on-device-ai.com
# https://github.com/MIT-LCP/wfdb-python
# https://github.com/PIA-Group/BioSPPy
import numpy as np
import wfdb
from biosppy.signals import ecg
# MIT-BIH Arrhythmia Database
# https://physionet.org/content/mitdb/1.0.0/
mitdbs = [100,101,102,103,104,105,106,107,108,109,111,112,113,114,115,116,117,118,119,121,122,123,124,200,201,202,203,205,207,208,209,210,212,213,214,215,217,219,220,221,222,223,228,230,231,232,233,234]
# Beat annotations
# https://archive.physionet.org/physiobank/annotations.shtml
beat_annotations = ['N', 'L', 'R', 'B', 'A', 'a', 'J', 'S', 'V', 'r', 'F', 'e', 'j', 'n', 'E', '/', 'Q', '?' ]
# Beat annotations to AAMI EC57 categories
aami_normal = ['N','L','R', 'e','j']
aami_supraventricular_ectopic_beat = ['A','a', 'J', 'Q']
aami_ventricular_ectopic_beat = ['V','E']
aami_fusion_beat = ['F']
aami_unknown_beat = ['/','f','u']
for db in mitdbs :
# Read a WFDB record
sig, fields = wfdb.rdsamp('./mitdb/'+str(db))
# Only using Lead II signal
if fields['sig_name'][0] == 'MLII' :
signal = sig[:,0]
elif fields['sig_name'][1] == 'MLII' :
signal = sig[:,1]
else :
signal = None
# Read a WFDB annotation
ann = wfdb.rdann('./mitdb/'+str(db), 'atr')
annsample = ann.sample
annsymbol = ann.symbol
if signal is not None :
# Convert signal (numpy array) type and values from Float64 to Float32
signal = np.float32(signal)
# ECG R-peak segmentation algorithm.
# Follows the approach by P.S. Hamilton, “Open Source ECG Analysis Software Documentation”, E.P.Limited, 2002.
out = ecg.hamilton_segmenter(signal=signal, sampling_rate=fields['fs'])
rpeaks = out['rpeaks']
count = 0
signal_size = len(signal)
ann_data_size = len(annsample)
beat_list = list()
while count < ann_data_size:
sample = annsample[count]
symbol = annsymbol[count]
if symbol in beat_annotations:
# Find the R Peak for the beat annotation
beat_rpeak = None
check_rpeak_start = sample - 90
check_rpeak_end = sample + 90
if check_rpeak_start >= 0 and check_rpeak_end < signal_size :
for rpeak in rpeaks :
if rpeak >= check_rpeak_start and rpeak <= check_rpeak_end :
beat_rpeak = rpeak
break
# If found R Peak, set ECG heartbeat segmentation
if beat_rpeak is not None :
segmentation_start = beat_rpeak - 90
segmentation_end = beat_rpeak + 170
if segmentation_start >= 0 and segmentation_end < signal_size :
# Segmentation : R Peak - 90 ~ R Peak + 170
segmentation = signal[segmentation_start:segmentation_end]
# z-score normalization
segmentation_copy = np.copy(segmentation)
normalized_zone = (segmentation_copy - np.mean(segmentation_copy))/np.std(segmentation_copy)
# AAMI categories : ANSI/AAMI EC57; 2012
# Class N SVEB VEB F Q
# id 0 1 2 3 4
if symbol in aami_normal :
aami_id = 0
elif symbol in aami_supraventricular_ectopic_beat :
aami_id = 1
elif symbol in aami_ventricular_ectopic_beat :
aami_id = 2
elif symbol in aami_fusion_beat :
aami_id = 3
elif symbol in aami_unknown_beat :
aami_id = 4
else :
aami_id = 4
# Add to list : [0] AAMI categorie , [1] Normalized signal
beat_list.append([aami_id,normalized_zone])
# Next annotation
count += 1
# Save Numpy array
np.save('./beats/'+ str(db) + '.npy',{'beats':beat_list})
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="1knRwdnNvqCj"
# # BITS F312 - Neural Network and Fuzzy Logic
#
#
# + [markdown] id="2kL1ugGKAEgK"
# # NNFL Assignment 2
# + colab={"base_uri": "https://localhost:8080/"} id="Omj7b7rT_iRC" outputId="84d8f314-031a-4abc-afc0-6bdc11708e8d"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="ESmgBJdcAAz5" outputId="ea25fe4a-d619-4780-e459-246ffa54923a"
# Changing directory to the directory containing dataset
# %cd drive/MyDrive/NNFL/Data_A2/
# + colab={"base_uri": "https://localhost:8080/"} id="meFTsfIpAeGP" outputId="1fb8a342-2162-4262-f703-1f2d6d6a2c49"
# listing datasets
# %ls -l
# + id="monX5050Awmc"
# libraries required
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from pprint import pprint
# + id="k4wDLjGQSaEU"
# supressing warnings
import warnings
warnings.filterwarnings('ignore')
# + [markdown] id="cubp0wJ0Bjyz"
# # Q1
# Implement non-linear perceptron algorithm for the classification using Online Learning (Hebbian learning)
# algorithm. The dataset (data55.xlsx) contains 19 features and the last column is the output (class label).
# You can use hold-out cross-validation (70, 10, and 20%) for the selection of training, validation and test
# instances. Evaluate accuracy, sensitivity and specificity measures for the evaluation of test instances
# (Packages such as Scikitlearn, keras, tensorflow, pytorch etc. are not allowed).
# + id="YqJTKvnxC2Rg"
def sigmoid(x):
"""
Returns (float) sigmoid of the input variable x
"""
val = 1/(1+ np.exp(-x))
return val
# + id="AHRw19JTDUUt"
def sigmoidDerivative(x):
"""
Returns (float) the derivative of the sigmoid of the input variable x
"""
val = x * (1 - x)
return val
# + id="0n9z9WgODcPk"
def perceptron(X_train_data, Y_train_data, bias, W, alpha = 0.001, epochs = 20000):
for i in range(epochs):
layer = np.dot(X_train_data, W)
input = layer + bias
output = sigmoid(input)
error = output - Y_train_data
derivative = sigmoidDerivative(output)
update = error*derivative
WNew = np.dot(X_train_data.T, update)
W = W - alpha*WNew
update_bias = update
bias = bias - alpha*update
return W, bias
def pred_eval(X, W, bias):
layer = np.dot(X, W)
input = layer + bias[0]
output = sigmoid(input)
return output
# + id="yvIBYe9qFKpx"
def resultQ1(filename = 'data55.xlsx'):
dataset = pd.read_excel(filename, header = None)
row, col = dataset.shape
feats = col - 1
# normalization
dataset.loc[:, dataset.columns != feats] = (dataset.loc[:, dataset.columns != feats]-dataset.loc[:, dataset.columns != feats].mean(axis=0))/dataset.loc[:, dataset.columns != feats].std(axis=0)
# spliting dataset into train test and val
training_data, validation_data, testing_data = np.split(dataset.sample(frac=1),[int(0.7*len(dataset)), int(0.8*len(dataset))])
training_data = np.array(training_data)
validation_data = np.array(validation_data)
testing_data = np.array(testing_data)
training_data_X = training_data[:, :feats]
training_data_y = training_data[:, feats]
validation_data_X = validation_data[:, :feats]
validation_data_y = validation_data[:, feats]
testing_data_X = testing_data[:, :feats]
testing_data_y = testing_data[:, feats]
train_row, train_col = training_data_X.shape
W = np.random.randn(train_col)
bias = np.ones(train_row)
W, bias = perceptron(training_data_X, training_data_y, bias, W)
print("The Weights after training is as follows: \n")
pprint(W)
print("The Bias after training is as follows: ", bias[0])
train_pred = pred_eval(training_data_X, W, bias)
train_pred = np.where(train_pred > 0.475, 1,0)
print("Training Accuracy: ", (np.abs(np.sum(train_pred == training_data_y))/len(training_data_y)))
test_pred = pred_eval(testing_data_X, W, bias)
test_pred = np.where(test_pred > 0.475, 1,0)
print("Testing Accuracy: ", (np.abs(np.sum(test_pred == testing_data_y))/len(testing_data_y)))
validation_pred = pred_eval(validation_data_X, W, bias)
validation_pred = np.where(validation_pred > 0.475, 1,0)
print("Validation Accuracy: ", (np.abs(np.sum(validation_pred == validation_data_y))/len(validation_data_y)))
# + [markdown] id="KVNAUdGEPonq"
# ### Output
# + colab={"base_uri": "https://localhost:8080/"} id="IAs9yjc0HZ8N" outputId="c4685485-6a66-42a1-9894-65a8cb569281"
resultQ1()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WQU Capstone project - Short-term trading strategy on G10 currencies
# ## Notebook four - Fractional differentiation
#
# * Sergey Chigrinov - chigrinov.s.88@gmail.com
# * Dhruv Agrawal - dhruva1@stanfordalumni.org
# * Man Sing Ho - mshoalbert@gmail.com
#
# ### Jun-Aug-2020
# Financial time series are usually non-stationary. Differentiation is the most common way to solve this. However, this approach does not keep the "memory" of a series. As an alternative option, we may use fractionally differentieted features.
import sys
import os
#insert you own path or use relative path
path_to_project = os.path.realpath('..') # r'C:\WQU\Capstone\Working_files'
sys.path.append(path_to_project)
import pandas as pd
import numpy as np
import datetime as dt
from multiprocessing import cpu_count
import warnings
warnings.filterwarnings('ignore')
from WQUcapstoneCode.fracdif.fracdif import frac_diff_ffd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
plt.style.use('seaborn-talk')
plt.style.use('bmh')
ticker = 'AUD/USD'
input_path = os.path.join(path_to_project, 'preprocessed_data', ''.join(ticker.split('/')) + '.csv')
features = pd.read_csv(input_path, index_col=0)
# If we use large enough timespan, the data becomes stationary. For the purpose of this exercise let's exclude 33% of this data. We're going to train our data only on a part of the whole datased anyway.
features = features[int(len(features)/3):]
from statsmodels.tsa.stattools import adfuller
result = adfuller(features.price)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
# Looking at the above, we fail to reject the null hypothesis that the data is non-stationary. Therefore, we need to transform it to make it stationary.
# Let's check if other features are non-stationary as well.
non_stationary = {col:'non-stationary' for col in features.columns if adfuller(features[col].dropna())[1]>0.05}
non_stationary
# Ok, we have a number of non-stationary variables. To make them stationary, we may use first order differencing, as below.
{col:'non-stationary' for col in features.columns if adfuller(features[col].diff().dropna())[1]>0.05}
# However, we want to keep "memory". Therefore, we need to find a minimum value of the parameter d to make series stationary.
def analyze_ffd(target_col):
cols = ['adfStat','pVal','lags','nObs','95% conf']#,'corr']
out = pd.DataFrame(columns=cols)
for d in np.linspace(0,1,11):
df = frac_diff_ffd(pd.DataFrame(features[target_col].dropna()),d,thresh=1e-5)
df = adfuller(df[target_col].dropna())
out.loc[d]=list(df[:4])+[df[4]['5%']]
f,ax=plt.subplots()
out['adfStat'].plot(ax=ax, marker='X')
ax.axhline(out['95% conf'].mean(),lw=1,color='r',ls='dotted')
ax.set_title('min d with thresh=0.01')
ax.set_xlabel('d values')
ax.set_ylabel('adf stat');
display(out)
analyze_ffd('price')
# From the above chart it can be seen that d-value above 0.2 allows to make the price time series stationary. First-differencing is equivalent to d=1. To decide on d, we may experiment with ofther non-stationary columns.
analyze_ffd('upper_band')
analyze_ffd('senkou_span_b')
# Optionally, we may tweak the analyze_ffd function to return the minimum value of d that makes a series stationary. However, experiments show, that d=0.35 is applicable in most cases. Therefore, to we may reduce computational cost and use this constant.
d=0.35
df = pd.concat([frac_diff_ffd(pd.DataFrame(features[c].dropna()), diff_amt=d, thresh=1e-5) for c in non_stationary.keys()], axis = 1)
non_stationary = {col:'non-stationary' for col in df.columns if adfuller(df[col].dropna())[1]>0.05}
non_stationary
# After the above operation, all features except for 'senkou_span_b' are stationary.
# However, as a result of this operation we lost a significant number of rows because they were used to 'smooth' the differencing.
print(f'''Initial number of rows: {features.shape[0]}
Number of rows after fracDiff: {df.dropna().shape[0]}''')
# ### Conclusion
# Fractionally differentiated features can be used in further analysis as inputs for machine learning models. However, to have enough data we may need to increase our the time span of the input data.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import urllib.parse
url = 'https://www.google.co.jp/search?q=%E6%A1%9C&tbm=isch'
print(urllib.parse.urlparse(url))
qs = urllib.parse.urlparse(url).query
print(qs)
print(type(qs))
qs_d = urllib.parse.parse_qs(qs)
print(qs_d)
print(type(qs_d))
print(qs_d['q'])
print(type(qs_d['q']))
print(qs_d['q'][0])
print(type(qs_d['q'][0]))
qs_l = urllib.parse.parse_qsl(qs)
print(qs_l)
print(type(qs_l))
print(qs_l[0])
print(type(qs_l[0]))
print(qs_l[0][1])
print(type(qs_l[0][1]))
d = {'key1': 'value / one', 'key2': 'バリュー2'}
d_qs = urllib.parse.urlencode(d)
print(d_qs)
print(type(d_qs))
l = [('key1', 'value / one'), ('key2', 'バリュー2')]
l_qs = urllib.parse.urlencode(l)
print(l_qs)
print(type(l_qs))
print(urllib.parse.urlencode(d))
print(urllib.parse.urlencode(d, quote_via=urllib.parse.quote))
print(urllib.parse.urlencode(d, safe='/'))
print(urllib.parse.urlencode(d, safe='/', quote_via=urllib.parse.quote))
print(qs_d)
print(urllib.parse.urlencode(qs_d))
print(urllib.parse.urlencode(qs_d, doseq=True))
print(url)
print(url.replace('isch', 'vid'))
def update_query(url, key, org_val, new_val):
pr = urllib.parse.urlparse(url)
d = urllib.parse.parse_qs(pr.query)
l = d.get(key)
if l:
d[key] = [new_val if v == org_val else v for v in l]
else:
d[key] = new_val
return urllib.parse.urlunparse(pr._replace(query=urllib.parse.urlencode(d, doseq=True)))
print(update_query(url, 'tbm', 'isch', 'vid'))
print(update_query(url, 'q', '桜', '梅'))
print(update_query(url, 'new-key', 'xxx', 'yyy'))
def remove_query(url, key):
pr = urllib.parse.urlparse(url)
d = urllib.parse.parse_qs(pr.query)
d.pop(key, None)
return urllib.parse.urlunparse(pr._replace(query=urllib.parse.urlencode(d, doseq=True)))
print(remove_query(url, 'tbm'))
print(remove_query(url, 'new-key'))
def remove_all_query(url):
return urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(query=None))
print(remove_all_query(url))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ReGlxKp0Kl6j"
# # Assignment 05 - COVID-19 Cases Prediction in Alberta
#
# The goal of this assignment is for you to create a recurrent neural network model to predict new daily cases of COVID-19 in Alberta up to seven days in the future. The deliverable for this assignment is this jupyter-notebook completed with your solution. Name the notebook as enel645_assignment05_team_(team number).ipynb. Submit the notebook to your team dropbox in the course D2L page.
#
# We will be using the University of Oxford Blavatnik School of Government dataset. They are tracking coronavirus government responses since January 1st, 2020. They have collected information across different countries and regions. For this assignment, we will focus in the province of Alberta, Canada.
#
# The dataset was already pre-processed to handle missing values, etc. You can focus just on your model development. Only edit the sections indicated in this notebook. You are free to add extra cells of text and code to this sections.
#
# At the end of the notebook, please include a short description of what each team member did in the assignment. Also include the consensus score between 0 and 3 to each team member. This score will be used to adjust the final grade of each student. Students developing the project individually do not need this description and score.
#
# You are being assessed based on:
#
# 1. Code execution - 20%
# 2. Clarity of the code (e.g., easy to follow, has pertinent comments, etc.) - 20%
# 3. Proper usage of the techniques seen in class - 30%
# 4. Accuracy of the models - 30%
#
# ## Don't edit the cells below
# + executionInfo={"elapsed": 2325, "status": "ok", "timestamp": 1616612575485, "user": {"displayName": "Roberto Medeiros de Souza", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNSx6sawbAkG0C_mz5xTKcM7sE5CS_OTIRmiD3ESc=s64", "userId": "15090920763882844564"}, "user_tz": 360} id="sccKOb_2IDJH"
# %matplotlib inline
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pylab as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 600} executionInfo={"elapsed": 1570, "status": "ok", "timestamp": 1616613572351, "user": {"displayName": "Roberto Medeiros de Souza", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNSx6sawbAkG0C_mz5xTKcM7sE5CS_OTIRmiD3ESc=s64", "userId": "15090920763882844564"}, "user_tz": 360} id="YxOBKsUIIDJL" outputId="a02b4a32-f7e0-40df-e7c4-6b9f76f84695"
# Features - information about the features can be found here:
# https://github.com/OxCGRT/covid-policy-tracker/blob/master/documentation/codebook.md
columns = ['Date', 'C1_School closing','C2_Workplace closing','C3_Cancel public events',\
'C4_Restrictions on gatherings','C5_Close public transport','C6_Stay at home requirements',\
'C7_Restrictions on internal movement','C8_International travel controls', 'E1_Income support',\
'E1_Flag', 'E2_Debt/contract relief','E3_Fiscal measures', 'E4_International support',\
'H1_Public information campaigns','H1_Flag', 'H2_Testing policy', 'H3_Contact tracing',\
'H4_Emergency investment in healthcare', 'H5_Investment in vaccines', 'H6_Facial Coverings',\
'H6_Flag', 'H7_Vaccination policy','H7_Flag', 'H8_Protection of elderly people', \
'ConfirmedCases']
# Load the dataset
DATA_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
df = pd.read_csv(DATA_URL,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str,
"RegionCode": str},
error_bad_lines=False)
# We are only interested in Alberta
df = df[(df["RegionName"] == "Alberta")]
# + colab={"base_uri": "https://localhost:8080/", "height": 583} executionInfo={"elapsed": 244, "status": "ok", "timestamp": 1616613582244, "user": {"displayName": "Roberto Medeiros de Souza", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNSx6sawbAkG0C_mz5xTKcM7sE5CS_OTIRmiD3ESc=s64", "userId": "15090920763882844564"}, "user_tz": 360} id="hLO5vs8nrUMl" outputId="9e370bd5-51c3-437f-b048-0a412dfeab62"
# Keep only the columns that we want and display the first 10 samples
df = df[columns]
df.tail(10)
# + executionInfo={"elapsed": 147, "status": "ok", "timestamp": 1616612781022, "user": {"displayName": "Roberto Medeiros de Souza", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNSx6sawbAkG0C_mz5xTKcM7sE5CS_OTIRmiD3ESc=s64", "userId": "15090920763882844564"}, "user_tz": 360} id="r1XVlKqIIDJN"
# Fill missing confirmed cases with 0s (initial cases) and interpolating the remaining
df["ConfirmedCases"].values[:70] = 0
df["ConfirmedCases"] = df["ConfirmedCases"].interpolate()
df["ConfirmedCases"].values[159] = (df["ConfirmedCases"].values[160] + df["ConfirmedCases"].values[158])/2
# + colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"elapsed": 339, "status": "ok", "timestamp": 1616612781844, "user": {"displayName": "Roberto Medeiros de Souza", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNSx6sawbAkG0C_mz5xTKcM7sE5CS_OTIRmiD3ESc=s64", "userId": "15090920763882844564"}, "user_tz": 360} id="OaAPSyKUb9Mm" outputId="4b24872a-6039-445e-aee1-ec29a465ebe7"
plt.figure()
plt.plot(df["ConfirmedCases"].values)
plt.xlabel("Number of days")
plt.ylabel("Total number of confirmed cases")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 583} executionInfo={"elapsed": 188, "status": "ok", "timestamp": 1616612782739, "user": {"displayName": "Roberto Medeiros de Souza", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNSx6sawbAkG0C_mz5xTKcM7sE5CS_OTIRmiD3ESc=s64", "userId": "15090920763882844564"}, "user_tz": 360} id="WK2vgPIXb9i2" outputId="b4018bc8-4290-4587-f7f4-5bef041fdae4"
df["DailyChangeConfirmedCases"] = df.ConfirmedCases.diff().fillna(0)
df["Cases7DayAverage"] = df["DailyChangeConfirmedCases"].rolling(7).mean().fillna(0)
# Fill misisng values with -1
for npi_column in columns:
df[npi_column] = df[npi_column].fillna(-1)
df.tail(10)
df.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"elapsed": 396, "status": "ok", "timestamp": 1616612784518, "user": {"displayName": "Roberto Medeiros de Souza", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNSx6sawbAkG0C_mz5xTKcM7sE5CS_OTIRmiD3ESc=s64", "userId": "15090920763882844564"}, "user_tz": 360} id="hBBtkYGskBYA" outputId="b8ffc482-4a51-4cc1-9514-5c64f6641c2c"
plt.figure()
plt.plot(df["DailyChangeConfirmedCases"].values, label = "Official daily count")
plt.plot(df["Cases7DayAverage"].values, label = "7-day average")
plt.xlabel("Number of days")
plt.ylabel("Daily cases")
plt.legend()
plt.show()
# +
DAYS_BACK = 20 # We look 20 days back into the 7 day average to perform our prediction
DAYS_FORWARD = 7 # We want to predict daily cases up to 7 days ahead
X = df[df.columns[1:]].values[:,1:] # Get numpy array from pandas dataframe
X_static = X[DAYS_BACK:-DAYS_FORWARD,:-3] # static features
X_timeseries = np.zeros((X.shape[0]- DAYS_BACK - DAYS_FORWARD,DAYS_BACK)) # time series features,
# i.e., historical daily cases
Y = np.zeros((X.shape[0]- DAYS_BACK - DAYS_FORWARD,DAYS_FORWARD))
# Preparing the time series data and the labels
for ii in range(DAYS_BACK, X.shape[0]- DAYS_FORWARD):
X_timeseries[ii-DAYS_BACK,:] = X[(ii - DAYS_BACK):ii,-1]
Y[ii-DAYS_BACK,:] = X[ii:(ii + DAYS_FORWARD),-1]
# +
# Splitting the dataset
X_static_dev = X_static[45:-21]
max_static = X_static_dev.max(axis = 0,keepdims = True)
X_static_dev = X_static_dev/(max_static + 1e-10)
X_timeseries_dev = X_timeseries[45:-21]#[:,:,np.newaxis]
max_temporal = X_timeseries_dev.max()
X_timeseries_dev = X_timeseries_dev/max_temporal
Y_dev = Y[45:-21]
indexes = np.arange(X_static_dev.shape[0])
np.random.shuffle(indexes)
X_static_dev = X_static_dev[indexes]
X_timeseries_dev = X_timeseries_dev[indexes]
Y_dev = Y_dev[indexes]
X_static_test = X_static[-21:-1]
X_static_test = X_static_test/(max_static+1e-10)
X_timeseries_test = X_timeseries[-21:-1]/max_temporal
Y_test = Y[-21:-1]
# -
# ## Only edit the cells below for the assignment
# +
# Your model should receive these two inputs
input_A = tf.keras.layers.Input(shape=(X_static_dev.shape[1],), name="inputA")
input_B = tf.keras.layers.Input(shape=(None, 1), name="inputB")
#Define your model here
# -
model.summary()
# +
opt = tf.keras.optimizers.SGD(lr= 1e-3) # Feel free to choose your optimizer
model.compile(loss='mae',optimizer=opt)
model_name = "covid_alberta_lstm.h5"
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience = 1300)
monitor = tf.keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss',\
verbose=0,save_best_only=True,\
save_weights_only=True,\
mode='min')
# Learning rate schedule
def scheduler(epoch, lr):
if epoch%50 == 0 and epoch!= 0:
lr = lr/1.1
return lr
lr_schedule = tf.keras.callbacks.LearningRateScheduler(scheduler,verbose = 0)
# -
history = model.fit((X_static_dev, X_timeseries_dev),Y_dev, epochs=1500, batch_size = 24, \
callbacks= [early_stop, monitor, lr_schedule],shuffle = True,\
validation_split = 0.2)
# ## Don't edit the cell below
model.load_weights(model_name)
model.evaluate((X_static_test,X_timeseries_test),Y_test)
# ## Team members participtaion
# (include the description of what each team member did and the consensus score for each team member)
#
# - **Arya Stark** helped design the model and write the code for fully connected model (**score 3**)
# - **Luke Skywalker** helped design helped to implement the data augmentation module (**score 3**)
# - ...
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Experiment 3.1: Basic deep learning on lying videos
#
# * input: lying video
# * Output: Left/right leg amplitude/duration (4 scores)
# +
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
import tensorflow.keras as keras
from src.helpers import read_scores
from src.data_generators import RawDataGenerator
from src.data_selection import MultipleScoreSelector
from src.ai_func import cross_validation_generator
from src.settings import LYING_VIDEOS_DATA_FOLDER, SITTING_VIDEOS_DATA_FOLDER, DATA_FOLDER
# -
# %load_ext autoreload
# %autoreload 2
# + [markdown] tags=[]
# ### Read metadata
# -
scores_df = read_scores(DATA_FOLDER / 'data_clinical_scoring.xlsx')
# ## Definitions
SCORES_TO_USE = ['D_RLP_R_tD_pscore', 'D_LLP_R_tD_pscore', 'D_RLP_R_tA_pscore', 'D_LLP_R_tA_pscore']
SCORERS = [1, 2, 3]
# ## Pipeline for training a deep neural network
# ### Define model architecture (here: simple CNN)
# Generate some dev data to get X shape
selector = MultipleScoreSelector(scores_to_use=SCORES_TO_USE, scorer_to_use=1)
dev_selection = selector.transform(scores_df)
dev_generator = RawDataGenerator(dev_selection, videos_folder=LYING_VIDEOS_DATA_FOLDER)
X, y = dev_generator.__getitem__(0)
n_timesteps, n_features = (X.shape[1], X.shape[2])
# +
n_outputs = len(SCORES_TO_USE)
def get_model():
# simple CNN
input_layer = keras.layers.Input(shape=(n_timesteps,n_features))
norm_layer = keras.layers.BatchNormalization()(input_layer)
cnn_layer = keras.layers.Conv1D(filters=32, kernel_size=3, activation='relu')(norm_layer)
cnn_layer = keras.layers.Conv1D(filters=32, kernel_size=3, activation='relu')(cnn_layer)
cnn_layer = keras.layers.MaxPooling1D(pool_size=2)(cnn_layer)
cnn_layer = keras.layers.Dropout(0.7)(cnn_layer)
cnn_layer = keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu')(cnn_layer)
cnn_layer = keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu')(cnn_layer)
cnn_layer = keras.layers.MaxPooling1D(pool_size=2)(cnn_layer)
cnn_layer = keras.layers.Dropout(0.7)(cnn_layer)
cnn_layer = keras.layers.Flatten()(cnn_layer)
cnn_layer = keras.layers.Dense(100)(cnn_layer)
output_layer = keras.layers.Dense(n_outputs)(cnn_layer)
return keras.Model(inputs=input_layer, outputs=output_layer)
get_model().summary()
# + [markdown] tags=[]
# ## Train network
# +
def train_model(train_generator):
model = get_model()
model.compile(loss='mse', optimizer=keras.optimizers.Adam())
model.fit(train_generator, epochs=30)
return model
def train_cross_val(cross_val):
y_pred = []
y_test = []
for i_split, (train_scores, test_scores) in enumerate(cross_val):
print(f'Fitting for 5-fold split {i_split}')
train_generator = RawDataGenerator(train_scores, videos_folder=LYING_VIDEOS_DATA_FOLDER)
test_generator = RawDataGenerator(test_scores, videos_folder=LYING_VIDEOS_DATA_FOLDER)
model = train_model(train_generator)
y_pred.append(model.predict(test_generator))
y_test.append(test_scores)
y_pred = np.vstack(y_pred)
y_test = pd.concat(y_test)
return y_test, y_pred
def evaluate(y_test, y_pred):
results = []
for i_score, column in enumerate(y_test):
mae = mean_absolute_error(y_test.iloc[:, i_score], y_pred[:, i_score])
results.append({'score': column, 'mae': mae})
return pd.DataFrame(results)
# + jupyter={"outputs_hidden": true} tags=[]
results = []
for scorer in SCORERS:
print(f'Training model for scorer {scorer}')
selector = MultipleScoreSelector(scores_to_use=SCORES_TO_USE, scorer_to_use=scorer)
selected_data = selector.transform(scores_df)
cross_val = cross_validation_generator(selected_data)
y_test, y_pred = train_cross_val(cross_val)
results.append((y_test, y_pred))
# -
for scorer, (y_test, y_pred) in zip(SCORERS, results):
print(f'results for scorer {scorer}')
print(evaluate(y_test, y_pred))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### IMPORT PACKAGES
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from IPython.display import display
from JSAnimation.IPython_display import display_animation
# %matplotlib inline
print ("PACKAGES LOADED")
# ### DEFINE DISPLAY FUNCTION
def display_frames_as_gif(frames):
plt.figure(figsize=(8, 4), dpi = 72)
patch = plt.imshow(frames[0])
plt.axis('on')
title_obj = plt.title(0)
def animate(i):
patch.set_data(frames[i])
plt.setp(title_obj, color='k', text=('Time: %d' % (i)))
anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)
display(display_animation(anim, default_mode='loop'))
print ("ANIMATE FUNCTION READY")
# ### RUN
imgs = []
for i in range(100):
img = np.random.randint(0, 255, (10, 10, 3)).astype(np.ubyte)
imgs.append(img)
print ('DONE')
display_frames_as_gif(imgs)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
resultPath = "/home/cflores/cflores_workspace/comments-retriever/results"
media = "20minutos"
scrapper = VeinteMinutosSimpleScrapper()
dates = scrapper.generateDates(start="01/01/2019", end="31/08/2019")
commentsFilesNames = [ "{rootPath}/{media}/{d}-comments.json" for d in dates ]
commentsFilesNames
# -
text = "«No vamos a presentar listas fantasma», añade.Conforme a los criterios deasdfasdfasdfasdfasdfasdfasdfasdfasdfasdf"
text[:text.find("Conforme a los criterios de")]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Problem Statement
#
# Using only the text column in the csv file, predict 2 continous variables.
# ### Import Relevant Packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
import os
from sklearn.model_selection import train_test_split, KFold
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import FunctionTransformer
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor
from xgboost import XGBRegressor
from nltk.corpus import stopwords
german_stop_words = stopwords.words('german')
df = pd.read_csv('raalucaa-attachments/training.txt', header=None)
test = pd.read_csv('raalucaa-attachments/validation.txt', header=None)
def rename_cols(x):
x.rename(mapper={0:'ID', 1:'target_1',2:'target_2',3:'text'}, axis=1, inplace=True)
def new_csv(x,to_drop,new):
x.drop(columns=[i for i in to_drop]).to_csv(new, index=False)
rename_cols(df)
rename_cols(test)
# ### Creating new datasets based on problem statement
#
# +
### Run only once
# new_csv(df,['target_2'],'training_1.csv')
# new_csv(df,['target_1'],'training_2.csv')
# new_csv(test,['target_2'],'validation_1.csv')
# new_csv(test,['target_1'],'validation_2.csv')
# -
train_1 = pd.read_csv('training_1.csv')
test_1 = pd.read_csv('validation_1.csv')
train_2 = pd.read_csv('training_2.csv')
test_2 = pd.read_csv('validation_2.csv')
# ### Explore Dataset
train_1.head(3)
train_1.ID.nunique() / train_1.shape[0]
sns.boxplot(x='target_1', data=train_1)
train_1.sample(5).text
# ### Data Cleaning / Preparation
TOKEN_ALPHANUMERIC = '[^a-zA-Z0-9äöüÄÖÜß]'
TOKEN_ALPHABETIC = '[^a-zA-ZäöüÄÖÜß-]'
mae_dict = {}
seed = 0
lr = 0.03
stop = 100
def clean_words(df, token, col):
'''
df : Dataframe
token : regex pattern to keep characters in text
col : name of column in Dataframe that contains the text
'''
corpus = []
for i in range(df.shape[0]):
text = df.iloc[i][col]
review = re.sub(token,' ', text).lower() # keep only characters in token
corpus.append(review)
return corpus
# 1st Model Variation
# ### Model Building
# Train_1
get_clean_train = clean_words(train_1, TOKEN_ALPHANUMERIC, 'text') #since we're using the same features for training for both tasks.
target_1 = train_1.target_1.values
cvec = CountVectorizer(max_features=8000)
train_vec = cvec.fit_transform(get_clean_train).toarray()
lgb_params_1 = {
'bagging_fraction': 0.9,
'feature_fraction': 0.5,
'learning_rate': 0.03,
'max_depth': -1,
'min_child_weight':5,
'min_split_gain': 0.03,
'num_iterations': 2000,
'num_leaves': 500
}
# %%time
lgb1 = LGBMRegressor(random_state=seed,
n_estimators = lgb_params_1['num_iterations'],
learning_rate = lgb_params_1['learning_rate'],
bagging_fraction = lgb_params_1['bagging_fraction'],
feature_fraction = lgb_params_1['feature_fraction'],
max_depth = lgb_params_1['max_depth'],
min_child_weight = lgb_params_1['min_child_weight'],
min_split_gain = lgb_params_1['min_split_gain'],
num_leaves = lgb_params_1['num_leaves'])
lgb1.fit(train_vec, target_1)
get_clean_test = clean_words(test_1, TOKEN_ALPHANUMERIC, 'text') #since we're making inference on the same features for the 2 tasks.
test_vec = cvec.transform(get_clean_test).toarray()
p1 = lgb1.predict(test_vec)
display(mean_absolute_error(p1, test_1.target_1))
mae_dict['train_1_mae'] = (mean_absolute_error(p1, test_1.target_1))
# Train_2
target_2 = train_2.target_2
lgb_params_2 = {
'bagging_fraction': 0.8,
'feature_fraction': 0.4,
'learning_rate': 0.01,
'max_depth': -1,
'min_child_weight': 20,
'min_split_gain': 0.05,
'num_iterations': 3000,
'num_leaves': 300
}
# %%time
lgb2 = LGBMRegressor(random_state=seed,
n_estimators = lgb_params_2['num_iterations'],
learning_rate = lgb_params_2['learning_rate'],
bagging_fraction = lgb_params_2['bagging_fraction'],
feature_fraction = lgb_params_2['feature_fraction'],
max_depth = lgb_params_2['max_depth'],
min_child_weight = lgb_params_2['min_child_weight'],
min_split_gain = lgb_params_2['min_split_gain'],
num_leaves = lgb_params_2['num_leaves'])
lgb2.fit(train_vec, target_2)
p2 = lgb2.predict(test_vec)
display(mean_absolute_error(p2, test_2.target_2))
mae_dict['train_2_mae'] = (mean_absolute_error(p2, test_2.target_2))
print('Mean of absolute error for the 2 target features = {}'.format(pd.Series(mae_dict).mean()))
# 2nd Model Variation
# #### KFold Cross validation LGBM
kfold_mae_dict = {}
def KFOLD_LGBM(train, target, val, params_dict, k=5, early=stop):
'''
train : Train array
target : Prediction array
val : Validationa array
params_dict : dictionary of parameters to pass to lightgbm
k : number of splits to perform cross validation
early : early_stop_rounds parameter
return : Prediction result on validation array
'''
fold = KFold(n_splits = k, shuffle=True, random_state=seed)
prediction = []
err = []
i = 1
X = pd.DataFrame(train)
y = pd.DataFrame(target)
for train_index, test_index in fold.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
m = LGBMRegressor(random_state=seed,
n_estimators = params_dict['num_iterations'],
learning_rate = params_dict['learning_rate'],
bagging_fraction = params_dict['bagging_fraction'],
feature_fraction = params_dict['feature_fraction'],
max_depth = params_dict['max_depth'],
min_child_weight = params_dict['min_child_weight'],
min_split_gain = params_dict['min_split_gain'],
num_leaves = params_dict['num_leaves'],
eval_metric='mae')
#m = LGBMRegressor(n_estimators=n_estimators, eval_metric='mae',learning_rate=lr, random_seed=seed, use_best_model=True )
m.fit(X_train,y_train, eval_set=[(X_train,y_train),(X_test, y_test)], early_stopping_rounds=early, verbose=100)
preds = m.predict(X_test)
print("err: ",mean_absolute_error(y_test,preds))
err.append(mean_absolute_error(y_test,preds))
p = m.predict(val)
prediction.append(p)
i+=1
print('Mean of absolute error for {} folds = {}'.format(k,np.mean(err)))
return prediction
# Train_1
# %%time
test1_kfold = KFOLD_LGBM(train_vec, target_1, test_vec, lgb_params_1, k=5)
display(mean_absolute_error(test_1.target_1, np.mean(test1_kfold, 0))) # Found the mean prediction of the folds
kfold_mae_dict['train_1_mae'] = mean_absolute_error(test_1.target_1, np.mean(test1_kfold, 0))
# Train_2
# %%time
test2_kfold = KFOLD_LGBM(train_vec, target_2, test_vec, lgb_params_2, k=5)
display(mean_absolute_error(test_2.target_2, np.mean(test2_kfold, 0))) # Found the mean prediction of the folds
kfold_mae_dict['train_2_mae'] = mean_absolute_error(test_2.target_2, np.mean(test2_kfold, 0))
print('Mean of absolute error for the 2 target features = {}'.format(pd.Series(kfold_mae_dict).mean()))
# Blending of Results
blend_mae_dict = {}
def (pred1, pred2, factor):
'''
pred1 : prediction to blend
pred2 : prediction to blend
factor : percentage to use in blending pred1 and pred2
return : blended results of pred1 and pred2
'''
result = (pred1*pred2) + (pred2*(1-factor))
return result
blended_test_1 = blend(p1, np.mean(test1_kfold,0), 0.5)
display(mean_absolute_error(test_1.target_1, blended_test_1)) # Found the mean prediction of the folds
blend_mae_dict['test_1_mae'] = mean_absolute_error(test_1.target_1, blended_test_1)
blended_test_2 = blend(p2, np.mean(test2_kfold,0), 0.5)
display(mean_absolute_error(test_2.target_2, blended_test_2)) # Found the mean prediction of the folds
blend_mae_dict['test_2_mae'] = mean_absolute_error(test_2.target_2, blended_test_2)
print('Mean of absolute error for the 2 target features = {}'.format(pd.Series(blend_mae_dict).mean()))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('..')
from LL_functions import *
import utils #For getting the filename
import os
import matplotlib.pyplot as plt
import matplotlib
#Set plot fonts
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
print_bold = '\033[1m'
# -
# ## Hyperparameters
# These hyperparameters were determined by a hyperparameter search. The hyperparameters with two values correspond to \[baseline value, ps value\].
# +
#Dataset
n_workers = 24
n_bs = [2000, 1000]
n_bs_incep = [2000, 1000]
G_acc = [1, 2]
D_acc = [1, 2]
shuffle = [True, False]
#Optimization
n_D_step = [5, 5]
G_lr = [0.005477874786325012, 0.005477874786325012]
D_lr = 0.0011634802123515887
G_lr_decay = [1.00064523807154, 1.00064523807154]
D_lr_decay = 1.0001567511690173
n_epochs = 1000
#Misc
n_test_every = 50
n_save_every = 50
train_fn = ['GAN', 'PS']
#Architecture
G_ch = 8
D_ch = [28, 28]
dim_z_vals = np.flip(2**(np.arange(10) + 2)) #Do largest first
#Seed
seed_vals = [0]
#Resume
resume = False
# -
# ## Useful functions
# +
#Function for formating arguments
def format_args(_n_bs, _n_bs_incep, _n_workers, _G_acc, _D_acc, _n_D_step, _G_lr, _D_lr, _n_epochs, shuffle, train_fn,
_G_lr_decay, _D_lr_decay, _G_ch, _D_ch, _n_test_every, _n_save_every, _seed, _resume, _dim_z):
#Args
args_dataset = ['--dataset', 'celeba_64_cc_SUB10000', '--parallel', '--num_workers', str(_n_workers),
'--batch_size', str(_n_bs), '--inception_batchsize', str(_n_bs_incep)]
if shuffle:
args_dataset += ['--shuffle']
args_acc = ['--num_G_accumulations', str(_G_acc), '--num_D_accumulations', str(_D_acc)]
args_optim = ['--num_D_steps', str(_n_D_step), '--G_lr', str(_G_lr), '--D_lr', str(_D_lr),
'--D_B2', '0.900', '--G_B2', '0.900', '--num_epochs', str(_n_epochs),
'--G_lr_decay', str(_G_lr_decay), '--D_lr_decay', str(_D_lr_decay)]
args_arch = ['--G_attn', '0', '--D_attn', '0', '--G_nl', 'relu', '--D_nl', 'relu', '--G_ortho', '0.0',
'--D_thin', '--G_init', 'xavier', '--D_init', 'xavier', '--G_eval_mode',
'--G_ch', str(_G_ch), '--D_ch', str(_D_ch), '--dim_z', str(_dim_z)]
args_tol = ['--SN_eps', '1e-8', '--BN_eps', '1e-5', '--adam_eps', '1e-8']
args_saving = ['--test_every', str(_n_test_every), '--save_every', str(_n_save_every),
'--num_best_copies', '5', '--num_save_copies', '2', '--seed', str(_seed)]
args_misc = ['--name_suffix', 'SNGAN', '--data_root', '/data', '--load_in_mem', '--which_train_fn', train_fn]
#Combine
args = args_dataset + args_acc + args_optim + args_arch + args_tol + args_saving + args_misc
if _resume:
args += ['--resume']
return args
#This reads the information on the log
def get_log_info(f_log):
#Read in file
if os.path.exists(f_log + '_log.jsonl'):
fid = open(f_log + '_log.jsonl', 'r')
raw = fid.read()
fid.close()
else:
return
#Clean up a bit
raw = raw.split('\n')[:-1]
#Initialize arrays
n_tests = len(raw)
itr = np.zeros(n_tests, dtype=int)
IS_mean = np.zeros(n_tests)
IS_std = np.zeros(n_tests)
FID = np.zeros(n_tests)
time_stamp = np.zeros(n_tests)
#Populate arrays
for i in range(n_tests):
itr[i] = raw[i].split('\"itr\": ')[1].split(',')[0]
IS_mean[i] = raw[i].split('\"IS_mean\": ')[1].split(',')[0]
IS_std[i] = raw[i].split('\"IS_std\": ')[1].split(',')[0]
FID[i] = raw[i].split('\"FID\": ')[1].split(',')[0]
time_stamp[i] = raw[i].split('\"_stamp\": ')[1].split('}')[0]
#Transfer arrays over to a dictionary
result = dict()
result['itr'] = itr
result['IS_mean'] = IS_mean
result['IS_std'] = IS_std
result['FID'] = FID
result['time_stamp'] = time_stamp
return result
def get_log():
parser = utils.prepare_parser()
config = vars(parser.parse_args())
f_model = utils.name_from_config(config)
log = get_log_info(os.path.join('logs', f_model))
return log, f_model
# -
# ## Run through seed_vals and dim_z_vals for the baseline and PS models
# +
#Initialize f_model array
f_models = []
#Do the baseline and then the ps
for ps in [0,1]:
#Loop through each seed
for i in range(len(seed_vals)):
#Set seed
seed = seed_vals[i]
set_seed(seed)
#Loop through each latent size
for ii in range(len(dim_z_vals)):
#Set dim_z
dim_z = dim_z_vals[ii]
#Logging
s_viz = '------------------------------------------------------------\n'
print(s_viz + s_viz +'SEED = ' + str(seed) + '. DIM_Z = ' + str(dim_z) + '\n' + s_viz + s_viz)
#Populate arguments
args = format_args(n_bs[ps], n_bs_incep[ps], n_workers, G_acc[ps], D_acc[ps], n_D_step[ps], G_lr[ps], D_lr, n_epochs, shuffle[ps], train_fn[ps],
G_lr_decay[ps], D_lr_decay, G_ch, D_ch[ps], n_test_every, n_save_every, seed, resume, dim_z)
sys.argv = ['train.py'] + args
#Get filename
log, f_model = get_log()
f_models.append(f_model)
#Attempt to resume if possible
if log is None:
print('No log. Starting from scratch')
args = format_args(n_bs[ps], n_bs_incep[ps], n_workers, G_acc[ps], D_acc[ps], n_D_step[ps], G_lr[ps], D_lr, n_epochs, shuffle[ps], train_fn[ps],
G_lr_decay[ps], D_lr_decay, G_ch, D_ch[ps], n_test_every, n_save_every, seed, False, dim_z)
sys.argv = ['train.py'] + args
exec(open("train.py").read())
elif log['itr'][-1] < n_epochs:
print('Log exists, but we are at iteration #', log['itr'][-1], ', continuing training to n_epochs.')
args = format_args(n_bs[ps], n_bs_incep[ps], n_workers, G_acc[ps], D_acc[ps], n_D_step[ps], G_lr[ps], D_lr, n_epochs, shuffle[ps], train_fn[ps],
G_lr_decay[ps], D_lr_decay, G_ch, D_ch[ps], n_test_every, n_save_every, seed, True, dim_z)
sys.argv = ['train.py'] + args
exec(open("train.py").read())
else:
log, f_model = get_log()
print('Done training. FID =',np.nanmin(log['FID']))
pass
# -
# +
FID = []
IS_mean = []
IS_std = []
for i in range(len(f_models)):
#Get the log
f_model = f_models[i]
f_log = os.path.join('logs',f_models[i])
log = get_log_info(f_log)
#Get the performance metrics
FID.append(log['FID'])
IS_mean.append(log['IS_mean'])
IS_std.append(log['IS_std'])
#Get the latent dimension
dim_z = dim_z_vals
#Get itr of any one experiment
itr = log['itr']
#Convert to np array
FID = np.array(FID)
IS_mean = np.array(IS_mean)
IS_std = np.array(IS_std)
#Start figure
plt.figure(figsize=[16,6])
#Last slice IS
plt.subplot(1,2,1)
ind = np.reshape(np.nanargmax(IS_mean, axis=1),[-1,1])
plt.errorbar(dim_z_vals, np.take_along_axis(IS_mean, ind, axis=1).squeeze(),
yerr=np.take_along_axis(IS_std, ind, axis=1).squeeze())
plt.xscale('log')
plt.title('Inception Score')
plt.ylabel('IS')
plt.xlabel('dim_z')
#Last slice FID
plt.subplot(1,2,2)
plt.semilogx(dim_z_vals, np.nanmin(FID, axis=1))
plt.title('Frechet Inception Distance')
plt.ylabel('FID')
plt.xlabel('dim_z')
plt.show()
# -
set_seed(0)
x = torch.randn(6, 2)
# plt.errorbar(dim_z_vals, IS_mean[:,ind], yerr=IS_std[:,ind])
x.shape
dim_z
x = -1
y = 1
z = 2
if not (z == -1 or z > 0):
print('fail')
else:
print('pass')
import torch
import numpy as np
a = torch.randn(10)
a[0] = float('nan')
a
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
import radical.analytics as ra
import radical.pilot as rp
import radical.utils as ru
import radical.entk as re
import more_itertools as mit
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
import csv
import pandas as pd
import json
from scipy.optimize import curve_fit
from scipy import stats
from pprint import pprint
from sklearn.metrics import r2_score
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.unicode'] = True
blues = cm.get_cmap(plt.get_cmap('Blues'))
greens = cm.get_cmap(plt.get_cmap('Greens'))
reds = cm.get_cmap(plt.get_cmap('Reds'))
oranges = cm.get_cmap(plt.get_cmap('Oranges'))
purples = cm.get_cmap(plt.get_cmap('Purples'))
greys = cm.get_cmap(plt.get_cmap('Greys'))
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import warnings
warnings.filterwarnings('ignore')
# !radical-stack
# +
images = pd.read_csv('../Data/Des3Images.csv')
train_des2_tilling = pd.DataFrame(columns=['Image','Size','Time'])
sids = ['../Data/Design3Run2_2','../Data/Design1M5']
for sid in sids:
tiling_files = glob(sid+'/pilot.0000/unit.00*/tilling*.csv')
for tiling_file in tiling_files:
Tilling = pd.read_csv(tiling_file)
for index, row in Tilling.iterrows():
try:
image = row['Image'].split('/')[-1]
size = int(images.loc[images['Filename'] == row['Image']]['Size'].values[0])
time = row['End'] - row['Start']
train_des2_tilling.loc[len(train_des2_tilling)] = [image,size,time]
except:
print row['Image']
train_des2_tilling_binned = pd.DataFrame(columns=['Bin','SizeArray','SizeMean','SizeStd','SizeMin','SizeMax','TimeArray','TimeMean','TimeStd','TimeMin','TimeMax'])
for i in range(50,2800,125):
train_des2_tilling_binned.loc[len(train_des2_tilling_binned)] = [[i, i + 125],train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Size'].values,
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Size'].values.mean(),
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Size'].values.std(),
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Size'].values.min(),
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Size'].values.max(),
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Time'].values,
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Time'].values.mean(),
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Time'].values.std(),
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Time'].values.min(),
train_des2_tilling[(i < train_des2_tilling['Size']) & (train_des2_tilling['Size']< (i + 125))]['Time'].values.max()]
# +
images = pd.read_csv('../Data/Des3Images.csv')
train_des2_pred = pd.DataFrame(columns=['Image','Size','Time'])
path = '/pylon5/mc3bggp/bspitz/Seals/'
sids = ['../Data/Design1M5','../Data/Design3Run2_2']
for sid in sids:
tiling_files = glob(sid+'/pilot.0000/unit.00*/pred*.csv')
for tiling_file in tiling_files:
Tilling = pd.read_csv(tiling_file)
for index, row in Tilling.iterrows():
try:
image = row['Image']
abs_name = path + row['Image'] + '.tif'
size = int(images.loc[images['Filename'] == abs_name]['Size'].values[0])
time = row['End'] - row['Start']
train_des2_pred.loc[len(train_des2_pred)] = [image,size,time]
except:
print abs_name
train_des2_pred_binned = pd.DataFrame(columns=['Bin','SizeArray','SizeMean','SizeStd','SizeMin','SizeMax','TimeArray','TimeMean','TimeStd','TimeMin','TimeMax'])
for i in range(50,2800,125):
train_des2_pred_binned.loc[len(train_des2_pred_binned)] = [[i, i + 125],train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Size'].values,
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Size'].values.mean(),
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Size'].values.std(),
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Size'].values.min(),
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Size'].values.max(),
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Time'].values,
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Time'].values.mean(),
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Time'].values.std(),
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Time'].values.min(),
train_des2_pred[(i < train_des2_pred['Size']) & (train_des2_pred['Size']< (i + 125))]['Time'].values.max()]
# +
def func_lin(x, a, b):
return a * x + b
xdata = train_des2_tilling_binned['SizeMean'].values[4:19]
ydata = train_des2_tilling_binned['TimeMean'].values[4:19]
poptTillingDes1lin, pcovTillingDes1lin = curve_fit(func_lin,xdata,ydata)
y = func_lin(xdata, *poptTillingDes1lin)
s_test = np.sqrt(np.sum(np.power((train_des2_tilling_binned['TimeMean'].values[4:19] - y), 2))/(len(y)-2))
print 'Linear: R-squared', r2_score(train_des2_tilling_binned['TimeMean'].values[4:19],y), '$S_{error}$', s_test
print 'Parameter Values', poptTillingDes1lin
# -
fig,axis = plt.subplots(nrows=1,ncols=1,figsize=(11,5),sharey='row')
_ = axis.boxplot(train_des2_tilling_binned['TimeArray'].values)
_ = axis.errorbar(range(1,23),train_des2_tilling_binned['TimeMean'].values,yerr=train_des2_tilling_binned['TimeStd'].values,marker='o',label='Mean Execution Time')
_ = axis.plot(range(4,19), y, 'r-', label="Fitted Linear Curve")
_ = axis.fill_between(range(4,19),y - s_test, y + s_test, color=reds(250), alpha=0.2)
_ = axis.set_ylabel('Execution Time in seconds',fontsize=24)
_ = axis.set_xlabel('Bin Index',fontsize=24)
_ = axis.set_xticklabels(axis.get_xticks().astype('int').tolist(),fontsize=22)
_ = axis.set_yticklabels(axis.get_yticks().astype('int').tolist(),fontsize=22)
_ = axis.grid('on')
_ = axis.legend(fontsize=20)
# fig.savefig('stage_0_tx_box_des2.pdf',dpi=800,bbox_inches='tight')
# +
def func_lin(x, a, b):
return a * x + b
xdata = train_des2_pred_binned['SizeMean'].values[4:19]
ydata = train_des2_pred_binned['TimeMean'].values[4:19]
poptPredDes1lin, pcovPredDes1lin = curve_fit(func_lin,xdata,ydata)
y = func_lin(xdata, *poptPredDes1lin)
s_test = np.sqrt(np.sum(np.power((train_des2_pred_binned['TimeMean'].values[4:19] - y), 2))/(len(y)-2))
print 'Linear: R-squared', r2_score(train_des2_pred_binned['TimeMean'].values[4:19],y), '$S_{error}$', s_test
print 'Parameter Values', poptPredDes1lin
# -
fig,axis = plt.subplots(nrows=1,ncols=1,figsize=(11,5),sharey='row')
_ = axis.boxplot(train_des2_pred_binned['TimeArray'].values)
_ = axis.errorbar(range(1,23),train_des2_pred_binned['TimeMean'].values,yerr=train_des2_pred_binned['TimeStd'].values,marker='o',label='Mean Execution Time')
_ = axis.plot(range(4,19), y, 'g-', label="Fitted Linear Curve")
_ = axis.fill_between(range(4,19),y - s_test, y + s_test, color=greens(250), alpha=0.2)
_ = axis.set_ylabel('Execution Time in seconds',fontsize=24)
_ = axis.set_xlabel('Bin Index',fontsize=24)
_ = axis.set_xticklabels(axis.get_xticks().astype('int').tolist(),fontsize=22)
_ = axis.set_yticklabels(axis.get_yticks().astype('int').tolist(),fontsize=22)
_ = axis.grid('on')
_ = axis.legend(fontsize=20)
# fig.savefig('stage_1_tx_box_des2.pdf',dpi=800,bbox_inches='tight')
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ### Data Processing
# DataPreparing.ipynbで準備したデータ
#
# s3://###default bucket###/xgboost-churn-stepfunctions/xgboost-churn/churn.txt
# #### 実行権限
# Notebookインスタンスの実行Roleに AmazonEC2ContainerRegistryFullAccess をアタッチします
# 1. IAM Consoleに移動します
# 1. Roleからのnotebookの実行権限Roleを検索します
# 1. アクセス権限 tabにあるポリシーをアタッチする、をクリックします
# 1. PolicyからAmazonEC2ContainerRegistryFullAccessを検索して、アタッチします
# #### preprocessing
# +
import sagemaker
sess = sagemaker.Session()
bucket = sess.default_bucket()
# データをS3から取得
import boto3
s3 = boto3.resource('s3')
s3.Bucket(bucket).download_file('xgboost-churn-stepfunctions/xgboost-churn/churn.txt', 'churn.txt')
sagemaker.__version__
# -
# 上記セルを実行して、SageMaker Python SDK Version が 1.xx.x の場合、以下のセルのコメントアウトを解除してから実行してください。実行が完了したら、上にあるメニューから [Kernel] -> [Restart kernel] を選択してカーネルを再起動してください。
#
# 再起動が完了したら、このノートブックの一番上のセルから再度実行してください。その場合、以下のセルを実行する必要はありません。
# +
# #!pip install -U --quiet "sagemaker==2.16.1"
# -
# #### ライブラリのセットアップ
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import io
import os
import sys
import time
import json
from IPython.display import display
from time import strftime, gmtime
import re
import pandas as pd
churn = pd.read_csv('./churn.txt')
pd.set_option('display.max_columns', 500)
churn
#
# データをみると 3,333 行のデータしかなく、現在の機械学習の状況から見ると、やや小さいデータセットです。各データのレコードは、ある米国の携帯電話会社の顧客のプロフィールを説明する21の属性からなります。その属性というのは、
#
# - State: 顧客が居住している米国州で、2文字の省略形で記載されます (OHとかNJのように)
# - Account Length: アカウントが利用可能になってからの経過日数
# - Area Code: 顧客の電話番号に対応する3桁のエリアコード
# - Phone: 残りの7桁の電話番号
# - Int’l Plan: 国際電話のプランに加入しているかどうか (yes/no)
# - VMail Plan: Voice mail の機能を利用しているかどうか (yes/no)
# - VMail Message: 1ヶ月の Voice mail のメッセージの平均長
# - Day Mins: 1日に通話した時間(分)の総和
# - Day Calls: 1日に通話した回数の総和
# - Day Charge: 日中の通話にかかった料金
# - Eve Mins, Eve Calls, Eve Charge: 夜間通話にかかった料金
# - Night Mins, Night Calls, Night Charge: 深夜通話にかかった料金
# - Intl Mins, Intl Calls, Intl Charge: 国際通話にかかった料金
# - CustServ Calls: カスタマーサービスに電話をかけた回数
# - Churn?: そのサービスから離反したかどうか (true/false)
#
# 最後の属性 Churn? は目的変数として知られ、MLのモデルで予測する属性になります。目的変数は2値 (binary) なので、ここで作成するモデルは2値の予測を行います。これは2値分類といわれます。
#
# それではデータを詳しく見てみます。
#
# まずはカテゴリデータごとにデータの頻度をみてみます。カテゴリデータは、State, Area code, Phone, Int’l Plan, VMail Plan, Churn?で、カテゴリを表す文字列や数値がデータとして与えられているものです。pandasではある程度自動で、カテゴリデータを認識し、objectというタイプでデータを保存します。以下では、object 形式のデータをとりだして、カテゴリごとの頻度を表示します。
#
# また describe()を利用すると各属性の統計量を一度に見ることができます。
# +
# Frequency tables for each categorical feature
for column in churn.select_dtypes(include=['object']).columns:
display(pd.crosstab(index=churn[column], columns='% observations', normalize='columns'))
# Histograms for each numeric features
display(churn.describe())
# %matplotlib inline
hist = churn.hist(bins=30, sharey=True, figsize=(10, 10))
# -
# データを見てみると以下のことに気づくと思います。
#
# - State の各頻度はだいたい一様に分布しています。
# - Phone はすべて同じ数値になっていて手がかりになりそうにありません。この電話番号の最初の3桁はなにか意味がありそうですが、その割当に意味がないのであれば、使うのは止めるべきでしょう
# - たった14%の顧客が離反しているので、インバランスなデータと言えるでしょうが、そこまで極端ではありません
# - 数値的な特徴量は都合の良い形で分布しており、多くは釣り鐘のようなガウス分布をしています。ただ、VMail Messageは例外です。
# - Area code は数値データとみなされているようなので、非数値に変換しましょう
#
# さて、実際にPhoneの列を削除して、Area codeを非数値に変換します。
churn = churn.drop('Phone', axis=1)
churn['Area Code'] = churn['Area Code'].astype(object)
# それでは次に各属性の値を、目的変数の True か False か、にわけて見てみます。
# +
for column in churn.select_dtypes(include=['object']).columns:
if column != 'Churn?':
display(pd.crosstab(index=churn[column], columns=churn['Churn?'], normalize='columns'))
for column in churn.select_dtypes(exclude=['object']).columns:
print(column)
hist = churn[[column, 'Churn?']].hist(by='Churn?', bins=30)
plt.show()
# -
# データ分析の結果から、離反する顧客について、以下のような傾向が考えられます。
#
# - 地理的にもほぼ一様に分散している
# - 国際通話を利用している
# - VoiceMailを利用していない
# - 通話時間で見ると長い通話時間と短い通話時間の人に分かれる
# - カスタマーサービスへの通話が多い (多くの問題を経験した顧客ほど離反するというのは理解できる)
#
# 加えて、離反する顧客に関しては、Day Mins と Day Charge で似たような分布を示しています。しかし、話せば話すほど、通常課金されるので、驚くことではないです。もう少し深く調べてみましょう。corr() を利用すると相関係数を求めることができます。
display(churn.corr())
pd.plotting.scatter_matrix(churn, figsize=(12, 12))
plt.show()
# いくつかの特徴は互いに100%の相関をもっています。このような特徴があるとき、機械学習のアルゴリズムによっては全くうまくいかないことがあり、そうでなくても結果が偏ったりしてしまうことがあります。これらの相関の強いペアは削除しましょう。Day Mins に対する Day Charge、Night Mins に対する Night Charge、Intl Mins に対する Intl Charge を削除します。
churn = churn.drop(['Day Charge', 'Eve Charge', 'Night Charge', 'Intl Charge'], axis=1)
# ここまででデータセットの前処理は完了です。これから利用する機械学習のアルゴリズムを決めましょう。前述したように、数値の大小 (中間のような数値ではなく)で離反を予測するような変数を用意すると良さそうです。線形回帰のようなアルゴリズムでこれを行う場合は、複数の項(もしくはそれらをまとめた項)を属性として用意する必要があります。
#
# そのかわりに、これを勾配ブースティング木 (Gradient Boosted Tree)を利用しましょう。Amazon SageMaker は、マネージドで、分散学習が設定済みで、リアルタイム推論のためのホスティングも可能な XGBoost コンテナを用意しています。XGBoost は、特徴感の非線形な関係を考慮した勾配ブースティング木を利用しており、特徴感の複雑な関連性を扱うことができます。
#
# Amazon SageMaker の XGBoostは、csv または LibSVM 形式のデータを学習することができます。ここでは csv を利用します。csv は以下のようなデータである必要があります。
#
# - 1列目が予測対象のデータ
# - ヘッダ行はなし
#
# まずはじめに、カテゴリ変数を数値データに変換する必要があります。get_dummies() を利用すると数値データへの変換が可能です。
#
# そして、Churn?_Trueのデータを最初の列にもってきて、Churn?_False., Churn?_True.のデータを削除した残りのデータをconcatenate (連結) します。
model_data = pd.get_dummies(churn)
model_data = pd.concat([model_data['Churn?_True.'], model_data.drop(['Churn?_False.', 'Churn?_True.'], axis=1)], axis=1)
# ここで学習用、バリデーション用、テスト用データにわけましょう。これによって overfitting (学習用データには精度が良いが、実際に利用すると制度が悪い、といった状況) を回避しやすくなり、未知のテストデータに対する精度を確認することができます。
train_data, validation_data, test_data = np.split(model_data.sample(frac=1, random_state=1729), [int(0.7 * len(model_data)), int(0.9 * len(model_data))])
train_data.to_csv('train.csv', header=False, index=False)
validation_data.to_csv('validation.csv', header=False, index=False)
test_data.to_csv('test.csv', header=False, index=False)
sagemaker_session = sagemaker.Session()
input_train = sagemaker_session.upload_data(path='train.csv', key_prefix='xgboost-churn-stepfunctions/xgboost-churn')
input_validation = sagemaker_session.upload_data(path='validation.csv', key_prefix='xgboost-churn-stepfunctions/xgboost-churn')
input_test = sagemaker_session.upload_data(path='test.csv', key_prefix='xgboost-churn-stepfunctions/xgboost-churn')
print(input_train)
print(input_validation)
print(input_test)
# ### 前処理をpythonスクリプトに書き出す
# ```python
# 今回のデータに必要な前処理
#
# import pandas as pd
#
# churn = pd.read_csv('./churn.txt')
# churn = churn.drop('Phone', axis=1)
# churn['Area Code'] = churn['Area Code'].astype(object)
# churn = churn.drop(['Day Charge', 'Eve Charge', 'Night Charge', 'Intl Charge'], axis=1)
# model_data = pd.get_dummies(churn)
# model_data = pd.concat([model_data['Churn?_True.'], model_data.drop(['Churn?_False.', 'Churn?_True.'], axis=1)], axis=1)
#
# train_data, validation_data, test_data = np.split(model_data.sample(frac=1, random_state=1729), [int(0.7 * len(model_data)), int(0.9 * len(model_data))])
# train_data.to_csv('train.csv', header=False, index=False)
# validation_data.to_csv('validation.csv', header=False, index=False)
# ```
# ### SageMaker Processing
# +
import boto3
# boto3の機能を使ってリポジトリ名に必要な情報を取得する
account_id = boto3.client('sts').get_caller_identity().get('Account')
region = boto3.session.Session().region_name
print(region)
print(account_id)
ecr_repository = 'xgboost-churn-processing'
tag = ':latest'
nlpsample_repository_uri = '{}.dkr.ecr.{}.amazonaws.com/{}'.format(account_id, region, ecr_repository + tag)
# !$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email)
# リポジトリの作成
# すでにある場合はこのコマンドは必要ありません。
# !aws ecr create-repository --repository-name $ecr_repository
# -
# !docker build -t xgboost-churn-processing .
# docker imageをecrにpush
# !docker tag {ecr_repository + tag} $nlpsample_repository_uri
# !docker push $nlpsample_repository_uri
# ### localで実行の確認
# +
from sagemaker import get_execution_role
from sagemaker.processing import ScriptProcessor, ProcessingInput, ProcessingOutput
role = get_execution_role()
script_processor = ScriptProcessor(
image_uri='%s.dkr.ecr.ap-northeast-1.amazonaws.com/%s:latest' % (account_id, ecr_repository),
role=role,
command=['python3'],
instance_count=1,
instance_type='ml.m5.xlarge')
# -
script_processor.run(code='processing.py',
inputs=[ProcessingInput(
source='churn.txt',
destination='/opt/ml/processing/input')],
outputs=[
ProcessingOutput(source='/opt/ml/processing/output/data')]
)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="71vD2LrsVU0u"
# Import Libraries/ Read Data from GitHub
#
#
# + id="IVirOKYfUt7Z" executionInfo={"status": "ok", "timestamp": 1620232167054, "user_tz": 240, "elapsed": 1435, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}}
import pandas as pd
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
# upload NYC Crime data for 2020
url = 'https://raw.githubusercontent.com/duketran1996/NYC-Crime/main/clean-dataset/nypd_arrest_data_clean_2020.csv'
df = pd.read_csv(url)
# + id="IiDXa0l-VgvV" executionInfo={"status": "ok", "timestamp": 1620232169012, "user_tz": 240, "elapsed": 572, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}}
#upload NYC Census Data
url1 = 'https://raw.githubusercontent.com/duketran1996/NYC-Crime/main/association_rule/nyc_population_census_2019.csv'
df_pop = pd.read_csv(url1)
# + id="mItT7nxV5BZe" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620201707491, "user_tz": 240, "elapsed": 469, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="c37f82d8-72eb-4bf6-e508-51a127f6faa4"
df.columns
# df.shape
# + [markdown] id="bbuDuUTBjKRE"
# Group Datasets by Borough and Race
# + id="_9UeZOVJx7OZ" executionInfo={"status": "ok", "timestamp": 1620232172301, "user_tz": 240, "elapsed": 368, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}}
df_crime_race_dist = df.groupby(['ARREST_BORO','PERP_RACE'])['ARREST_KEY'].count()
df_crime_race_dist = df_crime_race_dist.to_frame()
# + id="c79ciVvQWNn_" executionInfo={"status": "ok", "timestamp": 1620232174276, "user_tz": 240, "elapsed": 327, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}}
df_pop_race_dist = df_pop.groupby(['BOROUGH','RACE'])['POPULATION'].sum()
df_pop_race_dist = df_pop_race_dist.to_frame()
# + [markdown] id="qo5Cyi14jZ0_"
# Join Datasets of Crime and Population to find normalised rate of crime by race in every borough
# + id="XDE7m6TVWT13" executionInfo={"status": "ok", "timestamp": 1620232176038, "user_tz": 240, "elapsed": 388, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}}
df_joined = pd.concat([df_crime_race_dist, df_pop_race_dist], axis=1, join="inner")
# + [markdown] id="8W0LzGechnfz"
# ### **Normalise Crime Rate of Race by Population for each Borough**
#
# ---
#
#
#
# ---
#
#
# + id="jgxXfvAlf9M-" executionInfo={"status": "ok", "timestamp": 1620232177845, "user_tz": 240, "elapsed": 352, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}}
normalise_race_of_crime = ((df_joined['ARREST_KEY']/df_joined['POPULATION'])*100)
# + id="0fOkfrr-hE2M" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620232180175, "user_tz": 240, "elapsed": 328, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="40ee40dd-2a55-4dd7-9767-a1ced8367ef1"
normalise_race_of_crime
# + [markdown] id="hZlyxfSth28q"
# **Observation: ***
# Normalised data shows black has much higher rate of crime per borough.
#
# But in my opinion this doesnot tell much as the data can be skewed.
#
# There can be 100 Black individuals arrested for 11923 crimes committed in Queens. While there could be 3445 white individual arrested for 3445 crimes in Queens.
# + [markdown] id="b-flZnEGsr2w"
# ### **ASSOCIATION RULES**
#
# ---
#
#
#
# ---
#
#
#
# Based on suggestions from: https://pbpython.com/market-basket-analysis.html
# + [markdown] id="GMwyxlCRiHj_"
# Function to Onehot encode occurence count of offense.
# + id="fl_ymbMSZzys"
def encode_units(x):
if x <= 0:
return 0
if x >= 1:
return 1
# + [markdown] id="NGuTchhjipN9"
# Finding association of offenses likely to occur together in **Manhattan** on a given day.
# + id="keBU8EUqswZf" colab={"base_uri": "https://localhost:8080/", "height": 540} executionInfo={"status": "ok", "timestamp": 1620185018811, "user_tz": 240, "elapsed": 533, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="b79c2a41-e040-4a69-9f51-e3f6d89de7f9"
basket_man = (df[df['ARREST_BORO'] =="Manhattan"]
.groupby(['ARREST_DATE', 'OFNS_DESC'])['ARREST_KEY'].count().unstack().reset_index().fillna(0).set_index('ARREST_DATE'))
basket_man
# + [markdown] id="hZaHHD6_jh4D"
# There are a lot of zeros in the data but we also need to make sure any positive values are converted to a 1 and anything less the 0 is set to 0. This step will complete the one hot encoding of the data
# + id="MvkQ6eXTuk-o" colab={"base_uri": "https://localhost:8080/", "height": 540} executionInfo={"status": "ok", "timestamp": 1620184997093, "user_tz": 240, "elapsed": 303, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="8edbb4db-c3b6-47d4-cfa4-a8ea9258dd9d"
basket_sets_man = basket_man.applymap(encode_units)
basket_sets_man
# + id="rGVuu6zJu0yu"
frequent_itemsets_man = apriori(basket_sets_man, min_support=0.4, use_colnames=True)
# + id="U5AuePpou7JG" colab={"base_uri": "https://localhost:8080/", "height": 247} executionInfo={"status": "ok", "timestamp": 1620184948423, "user_tz": 240, "elapsed": 58774, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="2075633b-838b-4a8f-ee26-9335ca929091"
rules_man = association_rules(frequent_itemsets_man, metric="lift", min_threshold=1)
rules_man.head()
# + [markdown] id="T_drNGFPjnVy"
# ### Same process is repeated for each Borough
#
# ---
#
#
# + [markdown] id="mlMv75Xx-o8Y"
# Finding association of offenses likely to occur together in **Bronx** on a given day.
# + id="eqzxEYBl4_0h"
basket_brx = (df[df['ARREST_BORO'] =="Bronx"]
.groupby(['ARREST_DATE', 'OFNS_DESC'])['ARREST_KEY'].count().unstack().reset_index().fillna(0).set_index('ARREST_DATE'))
# + id="p7Y-C76s_rq-"
basket_sets_brx = basket_brx.applymap(encode_units)
# + id="nwTWJ7VO_wec"
frequent_itemsets_brx = apriori(basket_sets_brx, min_support=0.4, use_colnames=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 247} id="lE7I6jlBj9x7" executionInfo={"status": "ok", "timestamp": 1620185415357, "user_tz": 240, "elapsed": 38338, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="52c93eab-3edd-42e6-ab96-336c9aef7ef6"
rules_brx = association_rules(frequent_itemsets_brx, metric="lift", min_threshold=1)
rules_brx.head()
# + [markdown] id="GYxgbd1pkxKj"
# Finding association of offenses likely to occur together in **Queens** on a given day.
# + id="FG9hC_1mAEib"
basket_qns = (df[df['ARREST_BORO'] =="Queens"]
.groupby(['ARREST_DATE', 'OFNS_DESC'])['ARREST_KEY'].count().unstack().reset_index().fillna(0).set_index('ARREST_DATE'))
# + id="XIfFxAEjECa4"
basket_sets_qns = basket_qns.applymap(encode_units)
frequent_itemsets_qns = apriori(basket_sets_qns, min_support=0.4, use_colnames=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 247} id="YazZvfwnk9Ap" executionInfo={"status": "ok", "timestamp": 1620185522626, "user_tz": 240, "elapsed": 56035, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="385dbe00-57d1-4e97-e1d6-1355fe18cd14"
rules_qns = association_rules(frequent_itemsets_qns, metric="lift", min_threshold=1)
rules_qns.head()
# + [markdown] id="de0GsGb9lT-a"
# Finding association of offenses likely to occur together in **Brooklyn** on a given day.
# + id="2s4DscmsUsLi"
basket_brk = (df[df['ARREST_BORO'] =="Brooklyn"]
.groupby(['ARREST_DATE', 'OFNS_DESC'])['ARREST_KEY'].count().unstack().reset_index().fillna(0).set_index('ARREST_DATE'))
# + id="ZvKctw4IUyeb"
basket_sets_brk = basket_brk.applymap(encode_units)
frequent_itemsets_brk = apriori(basket_sets_brk, min_support=0.5, use_colnames=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 247} id="ugfwCD1Na3dV" executionInfo={"status": "ok", "timestamp": 1620185770036, "user_tz": 240, "elapsed": 61061, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="c634ff17-4af4-4b61-ce0b-ddace1ee194e"
rules_brk = association_rules(frequent_itemsets_brk, metric="lift", min_threshold=1)
rules_brk.head()
# + [markdown] id="XVK9r27AlmtT"
# Finding association of offenses likely to occur together in **Staten Island** on a given day.
# + id="ufgzWWxkdtmj"
basket_si = (df[df['ARREST_BORO'] =="Staten Island"]
.groupby(['ARREST_DATE', 'OFNS_DESC'])['ARREST_KEY'].count().unstack().reset_index().fillna(0).set_index('ARREST_DATE'))
basket_sets_si = basket_si.applymap(encode_units)
frequent_itemsets_si = apriori(basket_sets_si, min_support=0.5, use_colnames=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 247} id="dEgm8LHcd0Xy" executionInfo={"status": "ok", "timestamp": 1620185863012, "user_tz": 240, "elapsed": 327, "user": {"displayName": "Bornita Das", "photoUrl": "https://lh4.googleusercontent.com/-CqYLRoBa-WA/AAAAAAAAAAI/AAAAAAAAABI/bZS9sts68Lw/s64/photo.jpg", "userId": "14595891489077571335"}} outputId="56fd4524-99e7-4f25-fc6c-324863d12a00"
rules_si = association_rules(frequent_itemsets_si, metric="lift", min_threshold=1)
rules_si.head()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reset -f
# # Implementing KEGG API
#
# 1. REST: https://www.kegg.jp/kegg/rest/
# 2. API: https://www.kegg.jp/kegg/rest/keggapi.html
# 3. DB entry: https://www.kegg.jp/kegg/docs/dbentry.html
# 4. Weblinks: https://www.kegg.jp/kegg/docs/weblink.html
#
#
# ## Libraries Installed
#
# 1. conda install -c conda-forge notebook
# 2. conda install -c anaconda urllib3
# 3. conda install pandas
#
# ### Info:
#
# KEGG:
# kegg Kyoto Encyclopedia of Genes and Genomes
# kegg Release 97.0+/01-22, Jan 21
# Kanehisa Laboratories
# pathway 765,259 entries
# brite 266,803 entries
# module 516 entries
# orthology 23,978 entries
# genome 7,314 entries
# genes 33,358,984 entries
# compound 18,756 entries
# glycan 11,043 entries
# reaction 11,474 entries
# rclass 3,168 entries
# enzyme 7,813 entries
# network 1,443 entries
# variant 441 entries
# disease 2,478 entries
# drug 11,400 entries
# dgroup 2,310 entries
# environ 864 entries
# -----------------------------------------------------------------------------
#
# pathway KEGG Pathway Database
# path Release 97.0+/01-23, Jan 21
# Kanehisa Laboratories
# 765,259 entries
#
# linked db module
# ko
# genome
# <org>
# compound
# glycan
# reaction
# rclass
# enzyme
# network
# disease
# drug
# pubmed
#
# -----------------------------------------------------------------------------
#
# compound KEGG Compound Database
# cpd Release 97.0+/01-23, Jan 21
# Kanehisa Laboratories
# 18,756 entries
#
# linked db pathway
# brite
# module
# genome
# glycan
# reaction
# enzyme
# network
# disease
# drug
# environ
# pubchem
# chebi
#
# -----------------------------------------------------------------------------
#
# reaction KEGG Reaction Database
# rn Release 97.0+/01-23, Jan 21
# Kanehisa Laboratories
# 11,474 entries
#
# linked db pathway
# brite
# module
# ko
# compound
# glycan
# rclass
# enzyme
# 
import urllib3
import io
import pandas as pd
def parseResponse(cols, *args):
keggUrl = 'http://rest.kegg.jp/'
for arg in args:
keggUrl += arg+'/'
df = pd.DataFrame(columns=cols)
http = urllib3.PoolManager()
pathwayResponse = http.request('GET', keggUrl, preload_content=False)
pathwayResponse.auto_close=False
for line in io.TextIOWrapper(pathwayResponse):
df = df.append(pd.Series(line.strip('\n').split('\t'),index = df.columns), ignore_index=True)
return df
# ## LIST
# Pathway
cols = ['pathwayId','pathwayDesc']
pathwayDF = parseResponse(cols, 'list','pathway')
print('Pathways Count: '+str(len(pathwayDF)))
pathwayDF.to_csv('data/pathway.tsv', sep='\t', index=False)
pathwayDF.head()
# Compound
cols = ['compoundId','compoundDesc']
compoundDF = parseResponse(cols, 'list','cpd')
print('Compounds Count: '+str(len(compoundDF)))
compoundDF.to_csv('data/compound.tsv', sep='\t', index=False)
compoundDF.head()
# Reaction
cols = ['reactionId','reactionDesc']
reactionDF = parseResponse(cols, 'list','rn')
print('Reactions Count:' + str(len(reactionDF)))
reactionDF.to_csv('data/reaction.tsv', sep='\t', index=False)
reactionDF.head()
# KO
cols = ['koId','koDesc']
koDF = parseResponse(cols, 'list','ko')
print('KO Count:' + str(len(koDF)))
koDF.to_csv('data/ko.tsv', sep='\t', index=False)
koDF.head()
# ## LINK
# +
# Reaction -> Pathway
cols = ['reactionId', 'pathwayId']
reactionPathwayLinkDF = parseResponse(cols, 'link','pathway','rn')
reactionPathwayLinkDF = reactionPathwayLinkDF.drop_duplicates()
print('Reaction2Pathway Links:' + str(len(reactionPathwayLinkDF)))
reactionPathwayLinkDF.to_csv('data/reactionPathwayLink.tsv', sep='\t', index=False)
reactionPathwayLinkDF.head()
# +
# Compound <-> Reaction
cols = ['compoundId', 'reactionId']
compoundReactionLinkDF = parseResponse(cols, 'link','rn', 'cpd')
compoundReactionLinkDF = compoundReactionLinkDF.drop_duplicates()
print('Compound2Reaction Links:' + str(len(compoundReactionLinkDF)))
compoundReactionLinkDF.to_csv('data/compoundReactionLink.tsv', sep='\t', index=False)
compoundReactionLinkDF.head()
# +
# Compound <-> Pathway
cols = ['compoundId', 'pathwayId']
compoundPathwayLinkDF = parseResponse(cols, 'link','pathway', 'cpd')
compoundPathwayLinkDF = compoundPathwayLinkDF.drop_duplicates()
print('Compound2Pathway Links:' + str(len(compoundPathwayLinkDF)))
compoundPathwayLinkDF.to_csv('data/compoundPathwayLink.tsv', sep='\t', index=False)
compoundPathwayLinkDF.head()
# -
# KO <-> Pathway
cols = ['koId', 'pathwayId']
koPathwayLinkDF = parseResponse(cols, 'link','pathway', 'ko')
koPathwayLinkDF = koPathwayLinkDF.drop_duplicates()
print('KO2Pathway Links:' + str(len(koPathwayLinkDF)))
koPathwayLinkDF.to_csv('data/koPathwayLink.tsv', sep='\t', index=False)
koPathwayLinkDF.head()
# +
# KO <-> Reaction
cols = ['koId', 'reactionId']
koReactionLinkDF = parseResponse(cols, 'link','rn', 'ko')
koReactionLinkDF = koReactionLinkDF.drop_duplicates()
print('KO2Reaction Links:' + str(len(koReactionLinkDF)))
koReactionLinkDF.to_csv('data/koReactionLink.tsv', sep='\t', index=False)
koReactionLinkDF.head()
# -
# ## Reaction <=> Compound <=> Pathway
rnCpdPath = compoundReactionLinkDF.merge(compoundPathwayLinkDF,how='outer', on='compoundId', sort=True)
rnCpdPath = rnCpdPath.drop_duplicates()
print('rn <-> cpd <-> path # of rows: '+ str(len(rnCpdPath)))
rnCpdPath.head()
rnCpdPath[rnCpdPath['compoundId'] == 'cpd:C00031'] #glucose
rnCpdPath.to_csv('data/reaction-compound-pathway.tsv', sep='\t', index=False)
# ## Reaction <=> KO <=> Pathway
rnKoPath = koPathwayLinkDF.merge(koReactionLinkDF,how='outer', on='koId', sort=True)
rnKoPath.drop_duplicates()
print('rn <-> ko <-> path # of rows: '+ str(len(rnKoPath)))
rnKoPath.head()
rnKoPath.to_csv('data/reaction-KO-pathway.tsv', sep='\t', index=False)
# ## Reaction <=> KO <=> Compound <=> Pathway
rnKoCpdPath = pd.merge(rnCpdPath, rnKoPath, how='outer', on=['reactionId', 'pathwayId'], sort=True )
rnKoCpdPath.drop_duplicates()
print('rn <-> ko <-> cpd <-> path # of rows: '+ str(len(rnKoCpdPath)))
rnKoCpdPath.head()
rnKoCpdPath.to_csv('data/rn-KO-cpd-path.tsv', sep='\t', index=False)
# ## GET
#
# This gets back given DB entries. For e.g.:<br/>
# - For Reaction: http://rest.kegg.jp/get/rn:R00001 <br/>
# - For Coumpounds: http://rest.kegg.jp/get/cpd:C00001 <br/>
# - For Pathways: http://rest.kegg.jp/get/path:map00010
#
# ### Reaction
def hasDigit(string):
for s in string:
if s.isdigit():
return True
return False
def makeDataFrame(df, idColName):
#count = 0
listOfDict = []
keggUrl = 'http://rest.kegg.jp/'
nonColumnChars = ['-', ' ', ';']
for row in df.iterrows():
dictionary = {}
lastKey = ''
item = row[1][idColName]
newKeggUrl = keggUrl + 'get/' + item
http = urllib3.PoolManager()
pathwayResponse = http.request('GET', newKeggUrl, preload_content=False)
pathwayResponse.auto_close=False
#count += 1
for line in io.TextIOWrapper(pathwayResponse):
lineElements = line.split(' ')
listofElements = [x.strip() for x in lineElements if x]
if listofElements[0].isupper() \
and not hasDigit(listofElements[0]) \
and not any(map(listofElements[0].__contains__, nonColumnChars)) \
and len(listofElements) > 1 \
and len(listofElements[0]) > 3:
lastKey = listofElements[0]
if lastKey == 'ENZYME':
dictionary[lastKey] = ' | '.join(listofElements[1:])
elif lastKey in dictionary.keys():
dictionary[lastKey] += (' | '+'-'.join(listofElements[1:]))
else:
dictionary[lastKey] = ' '.join(listofElements[1:])
else:
if lastKey == 'COMMENT':
dictionary[lastKey] += (' '+' '.join(listofElements))
else:
dictionary[lastKey] += (' | '+'-'.join(listofElements))
dictionary[lastKey] = dictionary[lastKey].replace(' | ///', '')
listOfDict.append(dictionary)
#if count == 100:
#print(dictionary)
#break;
return pd.DataFrame(listOfDict)
# +
# %%time
keggReactionsDF = makeDataFrame(reactionDF, 'reactionId')
# -
keggReactionsDF.head()
keggReactionsDF.to_csv('data/KEGG-Reactions.tsv', sep='\t', index=False)
# ### Pathways
# +
# %%time
keggPathwaysDF = makeDataFrame(pathwayDF, 'pathwayId')
# -
keggPathwaysDF.head()
keggPathwaysDF.to_csv('data/KEGG-Pathways.tsv', sep='\t', index=False)
# ### Compounds
# +
# %%time
keggCompoundsDF = makeDataFrame(compoundDF, 'compoundId')
# -
keggCompoundsDF.head()
keggCompoundsDF.to_csv('data/KEGG-Compounds.tsv', sep='\t', index=False)
# ### KO
# +
# %%time
keggOrthologyDF = makeDataFrame(koDF, 'koId')
# -
keggOrthologyDF.head()
keggOrthologyDF.to_csv('data/KEGG-Orthology.tsv', sep='\t', index=False)
# ## CONV
# +
# %%time
# Compound <-> CHEBI
cols = ['compoundId', 'CHEBI']
compoundChebiConvDF = parseResponse(cols, 'conv', 'chebi', 'cpd')
print('CompoundCHEBI Conversions:' + str(len(compoundChebiConvDF)))
compoundChebiConvDF.to_csv('data/compoundChebiConv.tsv', sep='\t', index=False)
compoundChebiConvDF.head()
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Z0ORZ5iY4GS9"
# # INSTALLING SELENIUM ON COLAB
# + colab={"base_uri": "https://localhost:8080/"} id="cwILgFBGx5wB" outputId="90d9e56b-e5ff-4e6a-8790-dc59ba198582"
# !pip install selenium
# !apt-get update # to update ubuntu to correctly run apt install
# !apt install chromium-chromedriver
# !cp /usr/lib/chromium-browser/chromedriver /usr/bin
# + id="_D7bVs2GzX62"
import sys
sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
# + [markdown] id="l9alSHuY3kiz"
# # GETTING ALL AVAILABLE LINKS FROM CREDLY SITE
#
# + id="PHLJm1ik0FMw"
from selenium import webdriver
# from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support.ui import WebDriverWait # for implicit and explict waits
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from time import sleep
import pandas as pd
import string
from tqdm import tqdm
def scrap_links(credit0,credit1,alpha=26):
print("Getting all links, you have to wait...")
temp_list = []
print("\nChecking driver in catch or Downloading it ...")
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument('--no-sandbox')
option.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome('chromedriver',options=option)
# driver = webdriver.Chrome(ChromeDriverManager().install(),options=option)
driver.get("https://www.credly.com/users/sign_in")
WebDriverWait(driver, timeout=3)
username=driver.find_element_by_name ("email")
username.send_keys(credit0)
password =driver.find_element_by_name ("password")
password.send_keys(credit1)
login_button = driver.find_element_by_xpath ("//button[@type='submit']")
login_button.click()
sleep(5)
vm_button = driver.find_element_by_xpath ('//*[@id="website-header"]/div[4]/div/nav/div[1]')
vm_button.click()
sleep(1)
# print("fetching data...")
for key in tqdm(string.ascii_lowercase[:alpha], position=0, leave=True):
try:
# print("for key ",key)
searchbar=driver.find_element_by_id("search_input_2")
searchbar.send_keys(Keys.BACKSPACE)
searchbar.send_keys(key)
sleep(3)
vm_button = driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div/div[1]/nav/div[2]/ul/li[2]')
vm_button.click()
sleep(2)
cfreq_source = driver.find_elements_by_xpath("/html/body/div[1]/div/div[2]/div/div/div[2]/div/div/div/div/a")
for x in cfreq_source:
# to check available attributes in element
# attrs = driver.execute_script('var items = {}; for (index = 0; index < arguments[0].attributes.length; ++index) { items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; return items;', x)
# print(attrs)
temp_list.append(x.get_attribute('href'))
# print(x.get_attribute('href'))
sleep(3)
except NoSuchElementException as ex:
print("Content not found!\nErr: ",ex)
try:
df = pd.DataFrame(list(set(temp_list)))
df.to_csv('file_add.csv', index=False, header=False)
except:
print("Couldn't save file")
print("quiting driver ...")
driver.quit()
return temp_list
# print(scrap_links('example@gmail.com','password',1))
# + [markdown] id="JImcXwZi37LS"
# # SCRAP PARAMETERED URLS
# + colab={"base_uri": "https://localhost:8080/"} id="0v_6fdEZvC9Y" outputId="6f24afe5-c978-4755-b966-18782f9cbf45"
from selenium import webdriver
# from webdriver_manager.chrome import ChromeDriverManager
from time import sleep
import pandas as pd
from tqdm import tqdm
def scrap_free(paratroops,add_list):
print("geting para links")
temp_list_free = []
if len(add_list) > 0:
temp_list = add_list
else:
try:
df = pd.read_csv(r'file_add.csv')
temp_list = df.iloc[:,0].tolist()
except:
print("Couldn't find file.")
exit("Dead Process")
print("\nChecking driver in catch or Downloading it ...")
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument('--no-sandbox')
option.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome('chromedriver',options=option)
# driver.maximize_window()
for i in tqdm(temp_list, position=0, leave=True):
driver.get(i)
try:
cost_exist = driver.find_elements_by_xpath("/html/body/main/div[1]/div/div/div[2]/div[1]/ul/li/span[2]")
for x in cost_exist:
# to check available attributes in element
# attrs = driver.execute_script('var items = {}; for (index = 0; index < arguments[0].attributes.length; ++index) { items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; return items;', x)
# print(attrs)
if x.get_attribute("innerText") in paratroops:
print("\n",x.get_attribute("innerText")," link: ",i)
temp_list_free.append(i)
except:
print("Cost not found.")
print("Closing browser")
driver.close()
try:
if len(temp_list_free) > 0:
df = pd.DataFrame(list(set(temp_list_free)))
df.to_csv('para_list.csv', index=False, header=False)
except:
print("Couldn't save file")
return temp_list_free
scrap_free(['Free'],scrap_links('example@gmail.com','password',26))
# other options are here:(choose any one)
# Validation
# Foundational
# Months
# Learning
# Intermediate
# Hours
# Experience
# Free
# Advanced
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from prediction import *
from gridworld import *
env = GridWorld((4,4), 0.9)
print(env.get_actions(1))
print(env.get_terminal_states())
print(env.get_random_actions([0,1,2,3]))
print(env.get_states())
print(env.reward(0, -1))
print(env.get_probability(7, -1, 3, 4))
states = env.get_states()
policy = [0] * len(states)
for s in states:
actions = env.get_actions(s)
# action = actions[np.random.randint(len(actions))]
action = actions[0]
policy[s] = action
print(policy)
ipe = IterativePolicyEvaluation(env)
print(ipe.run(policy))
ipe.v
t = PolicyIteration(env)
t.run()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Developing, Training, and Deploying a TensorFlow model on Google Cloud Platform (completely within Jupyter)
#
#
# In Chapter 9 of [Data Science on the Google Cloud Platform](http://shop.oreilly.com/product/0636920057628.do), I trained a TensorFlow Estimator model to predict flight delays.
#
# In this notebook, we'll modernize the workflow:
# * Use eager mode for TensorFlow development
# * Use tf.data to write the input pipeline
# * Run the notebook as-is on Cloud using Deep Learning VM or Kubeflow pipelines
# * Deploy the trained model to AI Platform as a web service
#
# The combination of eager mode, tf.data and DLVM/KFP makes this workflow a lot easier.
# We don't need to deal with Python packages or Docker containers.
# + tags=["parameters"]
# change these to try this notebook out
# In "production", these will be replaced by the parameters passed to papermill
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
DEVELOP_MODE = True
NBUCKETS = 5 # for embeddings
NUM_EXAMPLES = 1000*1000 # assume 1 million examples
# -
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
# + language="bash"
# gcloud config set project $PROJECT
# gcloud config set compute/region $REGION
# -
# ## Creating the input data pipeline
DATA_BUCKET = "gs://cloud-training-demos/flights/chapter8/output/"
TRAIN_DATA_PATTERN = DATA_BUCKET + "train*"
VALID_DATA_PATTERN = DATA_BUCKET + "test*"
# !gsutil ls $DATA_BUCKET
# ### Use tf.data to read the CSV files
import os, json, math, shutil
import numpy as np
import tensorflow as tf
print("Tensorflow version " + tf.__version__)
# +
CSV_COLUMNS = ('ontime,dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay' + \
',carrier,dep_lat,dep_lon,arr_lat,arr_lon,origin,dest').split(',')
LABEL_COLUMN = 'ontime'
DEFAULTS = [[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],\
['na'],[0.0],[0.0],[0.0],[0.0],['na'],['na']]
def load_dataset(pattern, batch_size=1):
return tf.data.experimental.make_csv_dataset(pattern, batch_size, CSV_COLUMNS, DEFAULTS)
# -
if DEVELOP_MODE:
dataset = load_dataset(TRAIN_DATA_PATTERN)
for n, data in enumerate(dataset):
numpy_data = {k: v.numpy() for k, v in data.items()} # .numpy() works only in eager mode
print(numpy_data)
if n>3: break
# %%writefile example_input.json
{"dep_delay": 14.0, "taxiout": 13.0, "distance": 319.0, "avg_dep_delay": 25.863039, "avg_arr_delay": 27.0, "carrier": "WN", "dep_lat": 32.84722, "dep_lon": -96.85167, "arr_lat": 31.9425, "arr_lon": -102.20194, "origin": "DAL", "dest": "MAF"}
{"dep_delay": -9.0, "taxiout": 21.0, "distance": 301.0, "avg_dep_delay": 41.050808, "avg_arr_delay": -7.0, "carrier": "EV", "dep_lat": 29.984444, "dep_lon": -95.34139, "arr_lat": 27.544167, "arr_lon": -99.46167, "origin": "IAH", "dest": "LRD"}
# +
def features_and_labels(features):
label = features.pop('ontime') # this is what we will train for
return features, label
def prepare_dataset(pattern, batch_size, truncate=None, mode=tf.estimator.ModeKeys.TRAIN):
dataset = load_dataset(pattern, batch_size)
dataset = dataset.map(features_and_labels)
dataset = dataset.cache()
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(1000)
dataset = dataset.repeat()
dataset = dataset.prefetch(1)
if truncate is not None:
dataset = dataset.take(truncate)
return dataset
if DEVELOP_MODE:
print("Calling prepare")
one_item = prepare_dataset(TRAIN_DATA_PATTERN, batch_size=5, truncate=1)
print(list(one_item)) # should print one batch of 2 items
# -
# ## Create TensorFlow wide-and-deep model
#
# We'll create feature columns, and do some discretization and feature engineering.
# See the book for details.
# +
import tensorflow as tf
real = {
colname : tf.feature_column.numeric_column(colname)
for colname in
('dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay' +
',dep_lat,dep_lon,arr_lat,arr_lon').split(',')
}
sparse = {
'carrier': tf.feature_column.categorical_column_with_vocabulary_list('carrier',
vocabulary_list='AS,VX,F9,UA,US,WN,HA,EV,MQ,DL,OO,B6,NK,AA'.split(',')),
'origin' : tf.feature_column.categorical_column_with_hash_bucket('origin', hash_bucket_size=1000),
'dest' : tf.feature_column.categorical_column_with_hash_bucket('dest', hash_bucket_size=1000)
}
inputs = {
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='float32')
for colname in real.keys()
}
inputs.update({
colname : tf.keras.layers.Input(name=colname, shape=(), dtype='string')
for colname in sparse.keys()
})
# -
# ### Feature engineering
# +
latbuckets = np.linspace(20.0, 50.0, NBUCKETS).tolist() # USA
lonbuckets = np.linspace(-120.0, -70.0, NBUCKETS).tolist() # USA
disc = {}
disc.update({
'd_{}'.format(key) : tf.feature_column.bucketized_column(real[key], latbuckets)
for key in ['dep_lat', 'arr_lat']
})
disc.update({
'd_{}'.format(key) : tf.feature_column.bucketized_column(real[key], lonbuckets)
for key in ['dep_lon', 'arr_lon']
})
# cross columns that make sense in combination
sparse['dep_loc'] = tf.feature_column.crossed_column([disc['d_dep_lat'], disc['d_dep_lon']], NBUCKETS*NBUCKETS)
sparse['arr_loc'] = tf.feature_column.crossed_column([disc['d_arr_lat'], disc['d_arr_lon']], NBUCKETS*NBUCKETS)
sparse['dep_arr'] = tf.feature_column.crossed_column([sparse['dep_loc'], sparse['arr_loc']], NBUCKETS ** 4)
#sparse['ori_dest'] = tf.feature_column.crossed_column(['origin', 'dest'], hash_bucket_size=1000)
# embed all the sparse columns
embed = {
'embed_{}'.format(colname) : tf.feature_column.embedding_column(col, 10)
for colname, col in sparse.items()
}
real.update(embed)
# one-hot encode the sparse columns
sparse = {
colname : tf.feature_column.indicator_column(col)
for colname, col in sparse.items()
}
if DEVELOP_MODE:
print(sparse.keys())
print(real.keys())
# -
# ## Train the model and evaluate once in a while
#
# Also checkpoint
model_dir='gs://{}/flights/trained_model'.format(BUCKET)
os.environ['OUTDIR'] = model_dir # needed for deployment
print('Writing trained model to {}'.format(model_dir))
# !gsutil -m rm -rf $OUTDIR
# +
# Build a wide-and-deep model.
def wide_and_deep_classifier(inputs, linear_feature_columns, dnn_feature_columns, dnn_hidden_units):
deep = tf.keras.layers.DenseFeatures(dnn_feature_columns, name='deep_inputs')(inputs)
for layerno, numnodes in enumerate(dnn_hidden_units):
deep = tf.keras.layers.Dense(numnodes, activation='relu', name='dnn_{}'.format(layerno+1))(deep)
wide = tf.keras.layers.DenseFeatures(linear_feature_columns, name='wide_inputs')(inputs)
both = tf.keras.layers.concatenate([deep, wide], name='both')
output = tf.keras.layers.Dense(1, activation='sigmoid', name='pred')(both)
model = tf.keras.Model(inputs, output)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return model
model = wide_and_deep_classifier(
inputs,
linear_feature_columns = sparse.values(),
dnn_feature_columns = real.values(),
dnn_hidden_units = [64, 32])
tf.keras.utils.plot_model(model, 'flights_model.png', show_shapes=False, rankdir='LR')
# +
# training and evaluation dataset
train_batch_size = 64
if DEVELOP_MODE:
eval_batch_size = 100
steps_per_epoch = 3
epochs = 2
else:
eval_batch_size = 10000
steps_per_epoch = NUM_EXAMPLES // train_batch_size
epochs = 10
train_dataset = prepare_dataset(TRAIN_DATA_PATTERN, train_batch_size)
eval_dataset = prepare_dataset(VALID_DATA_PATTERN, eval_batch_size, eval_batch_size*10, tf.estimator.ModeKeys.EVAL)
checkpoint_path = '{}/checkpoints/flights.cpt'.format(model_dir)
shutil.rmtree(checkpoint_path, ignore_errors=True)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
history = model.fit(train_dataset,
validation_data=eval_dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[cp_callback])
# -
print(history.history.keys())
# +
import matplotlib.pyplot as plt
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(['loss', 'accuracy']):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
# -
# ## Export and deploy the trained model
import time
export_dir = '{}/export/flights_{}'.format(model_dir, time.strftime("%Y%m%d-%H%M%S"))
print('Exporting to {}'.format(export_dir))
tf.keras.experimental.export_saved_model(model, export_dir)
# + language="bash"
# model_dir=$(gsutil ls ${OUTDIR}/export | tail -1)
# echo $model_dir
# saved_model_cli show --dir ${model_dir} --all
# + language="bash"
# MODEL_NAME="flights"
# MODEL_VERSION="kfp"
# TFVERSION="2.0"
# MODEL_LOCATION=$(gsutil ls ${OUTDIR}/export/exporter | tail -1)
# echo "Run these commands one-by-one (the very first time, you'll create a model and then create a version)"
# #yes | gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
# #gcloud ml-engine models delete ${MODEL_NAME}
# gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
# gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
# -
# !gcloud ml-engine predict --model=flights --version=kfp --json-instances=example_input.json
# Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# # Lagrange Multiplier Motivation
# ## Otimization problem with equality constraint
using Optim, Calculus, LinearAlgebra, Plots
# +
# Objective function
f(x) = -exp(-(x[1] * x[2] - 3/2)^2 - (x[2]- 3 /2)^2)
# Equality constraint
h(x) = x[1] - x[2]^2;
# -
# ## Find optimal solution
# +
# Substitute the constraint into the objective function
x₁(x₂) = x₂.^2
fu(x₂) = f([x₁(x₂); x₂])
# solve unconstrained objective
res = optimize(x₂ -> fu(first(x₂)), [0.], LBFGS())
opt_x₂ = first(Optim.minimizer(res))
opt_x = [x₁(opt_x₂), opt_x₂]; min_f = f(opt_x)
println("Optimal x₁, x₂: ", opt_x)
println(" Min f: ", min_f)
# -
# ## Alignment of gradients
# Objective and constraint gradients
∇f = Calculus.gradient(f)
∇h = Calculus.gradient(h);
# +
# eval gradients at optimal point
nf = normalize!(∇f(opt_x))
nh = normalize!(∇h(opt_x));
println("nf: ", nf)
println("nh: ", nh)
# +
# plots
pyplot(xlabel="x₁", ylabel="x₂", colorbar=false, aspect_ratio=:equal)
x = y = LinRange(0, 2.5, 100)
contour(x, y, (x, y)->f([x, y]), levels=[1.1, 1, 0.75, 0.55, 0.3, 0.1]*min_f, c=:black, contour_labels=true)
contour!(x, y, (x, y)->h([x, y]), levels=[0.], c=:red)
scatter!([x₁(opt_x₂)], [opt_x₂], markersize=5, c=:white, label="optimum x")
perpend_x = opt_x + nh
tangent_x = opt_x + [-nh[2], nh[1]]
plot!([opt_x[1], perpend_x[1]], [opt_x[2], perpend_x[2]], color=:blue, label="perpendicular")
plot!([opt_x[1], tangent_x[1]], [opt_x[2], tangent_x[2]], color=:purple, label="tangent")
# -
∇f(opt_x) ./ ∇h(opt_x)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
import warnings
warnings.filterwarnings('ignore')
# -
titanic = pd.read_csv('train.csv')
titanic.head()
## checking the head of our data set
titanic.info()
## checking info of all columns
titanic.shape
## checking shape of data set
titanic.describe()
## statistical information about numerical variable
# # **Data Quality Check**
#
# handling missing values as well
#
round(100*(titanic.isnull().sum()/len(titanic)),2)
## checking missing value percentage in all columns
titanic.drop('Cabin',axis=1,inplace=True)
## cabin almost have 77% of missing values hence remove this column from data set
age_median = titanic['Age'].median(skipna=True)
titanic['Age'].fillna(age_median,inplace=True)
## as there is 19% of missing values in age column hence it is not a good idea to remove this row wise or column wise hence impute those missing values with the median of age
titanic = titanic[titanic['Embarked'].isnull()!=True]
## as embarked has a very small amount of missing values hence remove those rows which have missing values in embarked column
titanic.shape
## checking shape after removing null values
# # duplicate check
titanic_dub = titanic.copy()
## creating copy of the data frame to check duplicate values
titanic_dub.shape
## comparing shapes of two data frames
titanic.shape
## shape of original data frame
# # checking for outliers
import seaborn as sns
import matplotlib.pyplot as plt
## importing libraries for data visualitation
# +
plt.figure(figsize=(15,5), dpi=80)
plt.subplot(1,4,1)
sns.boxplot(y=titanic['Age'])
plt.title("Outliers in 'Age'")
plt.subplot(1,4,2)
ax = sns.boxplot(y=titanic['Fare'])
ax.set_yscale('log')
plt.title("Outliers in 'Fare'")
plt.subplot(1,4,3)
sns.boxplot(y=titanic['SibSp'])
plt.title("Outliers in 'SibSp'")
plt.subplot(1,4,4)
sns.boxplot(y=titanic['Parch'])
plt.title("Outliers in 'Parch'")
#ax.set_yscale('log')
plt.tight_layout()
plt.show()
## plotting all four variables to check for outliers
## it clearly shows that all four variables has some outliers
# +
sns.catplot(x="SibSp", col = 'Survived', data=titanic, kind = 'count', palette='pastel')
sns.catplot(x="Parch", col = 'Survived', data=titanic, kind = 'count', palette='pastel')
plt.tight_layout()
plt.show()
## plotting of sibsp and parch in basis of survived and not survived
# -
# sibsp and parch basically tells us that whether a person is accompanied by someone else or not
# so we can make two category by merging them to find whether a single person is acompanied by some one else or not
def alone(x):
if (x['SibSp']+x['Parch']>0):
return (1)
else:
return (0)
titanic['Alone'] = titanic.apply(alone,axis=1)
## creating a function to make one variable which tells us whether a person is single or accompanied by some on the ship
sns.catplot(x="Alone", col = 'Survived', data=titanic, kind = 'count', palette='pastel')
plt.show()
# it clearly shows that those person who are not alone survived more
## drop parch and sibsp
titanic = titanic.drop(['Parch','SibSp'],axis=1)
titanic.head()
sns.distplot(titanic['Fare'])
plt.show()
# there is some skewness in the fare column
# hence removing the skewness using log function
titanic['Fare'] = titanic['Fare'].map(lambda x: np.log(x) if x>0 else 0)
## converting fare into a logarithmic scale
sns.distplot(titanic['Fare'])
plt.show()
## again check the distribution of fare
# +
sns.catplot(x="Sex", y="Survived", col="Pclass", data=titanic, saturation=.5, kind="bar", ci=None, aspect=0.8, palette='deep')
sns.catplot(x="Sex", y="Survived", col="Embarked", data=titanic, saturation=.5, kind="bar", ci=None, aspect=0.8, palette='deep')
plt.show()
## plotting of survive on basis of pclass
# -
# females are more likely to be survived
survived_0 = titanic[titanic['Survived']==0]
survived_1 = titanic[titanic['Survived']==1]
## divided our dataset into survived or not survived to check the distribution of age in both the cases
survived_0.shape
## checking shape of the data set that contains the data of passengers who not survived
survived_1.shape
## checking shape of the data set that contains the data of passengers who survived
sns.distplot(survived_0['Age'])
plt.show()
## checking distribution of age in not survived data set
sns.distplot(survived_1['Age'])
plt.show()
## checking distribution of age in survived dataset
# young persons are survived more (age group between 20-40)
sns.boxplot(x='Survived',y='Fare',data=titanic)
plt.show()
## checking survival rate on basis of fare
# those who are survived paid more fares
# # creating dummy variables
# +
Pclass_dummy = pd.get_dummies(titanic['Pclass'],prefix='Pclass',drop_first=True)
Pclass_dummy.head()
## creating dummy variables for pclass
# -
## joing dummy variables
titanic = pd.concat([titanic,Pclass_dummy],axis=1)
titanic.head()
titanic.drop('Pclass',axis=1,inplace=True)
## as there is no use of pclass after joining the columns that contains dummy variables for pclass
Embarked_dummy = pd.get_dummies(titanic['Embarked'],drop_first=True)
Embarked_dummy.head()
## creating dummy variables for embarked and dropping first column
titanic = pd.concat([titanic,Embarked_dummy],axis=1)
titanic.drop('Embarked',axis=1,inplace=True)
## joining dummy variables
titanic.head()
## checking head of the data set after joining dummy variables
# +
def sex_map(x):
if x == 'male':
return (1)
elif x == 'female':
return (0)
titanic['Sex'] = titanic['Sex'].apply(lambda x:sex_map(x))
## creating function for convert sex into binary values
# -
from sklearn.preprocessing import StandardScaler
## import libraries for scaling data
# +
scaler = StandardScaler()
cols = ['Age','Fare']
titanic[cols] = scaler.fit_transform(titanic[cols])
titanic.head()
## using standardization method of scaling for age and fare variables
# -
# no use of passengerid name and ticket column in our prediction
titanic.drop(['Name','Ticket'],axis=1,inplace=True)
## dropping name and ticket column
titanic.head()
## checking head after converting all values
titanic.set_index('PassengerId')
## set index as passengerid
## creating heatmap for checking corelations of variables
sns.heatmap(titanic.corr(),annot=True)
plt.show()
# there are some highly co related dummy variables hence removing such columns
titanic.drop(['Pclass_2','Q'],axis=1,inplace=True)
## removing highly co related dummy variables pclass_2 and q
sns.heatmap(titanic.corr(),annot=True)
plt.show()
## again checking co relations of variables
y_train = titanic.pop('Survived')
X_train = titanic
## divided train data into x and y as independent and dependent variable
X_train = titanic[['Sex','Age','Fare','Alone','Pclass_3','S']]
X_train.head()
## selectig all columns insted of passengerid for our x
## checking head of x after that
import statsmodels.api as sm
## import stats model to build our first model
logm1 = sm.GLM(y_train,sm.add_constant(X_train),family = sm.families.Binomial())
res1 = logm1.fit()
res1.summary()
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif['Features'] = X_train.columns
vif['VIF'] = [variance_inflation_factor(X_train.values, i) for i in range(X_train.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
## removing alone as it has high p value
X_train.drop('Alone',axis=1,inplace=True)
logm2 = sm.GLM(y_train,sm.add_constant(X_train),family = sm.families.Binomial())
res2 = logm2.fit()
res2.summary()
vif = pd.DataFrame()
vif['Features'] = X_train.columns
vif['VIF'] = [variance_inflation_factor(X_train.values, i) for i in range(X_train.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
X_train.drop('Fare',axis=1,inplace=True)
logm3 = sm.GLM(y_train,sm.add_constant(X_train),family = sm.families.Binomial())
res3 = logm3.fit()
res3.summary()
vif = pd.DataFrame()
vif['Features'] = X_train.columns
vif['VIF'] = [variance_inflation_factor(X_train.values, i) for i in range(X_train.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
# our final model is done
X_train.columns
y_train_pred = res3.predict(sm.add_constant(X_train))
y_train_pred[:10]
y_train_pred = y_train_pred.values.reshape(-1)
y_train_pred[:10]
final_pred = pd.DataFrame({'Survived':y_train.values,'Survived_prob':y_train_pred})
final_pred['PassengerId'] = np.arange(1,len(final_pred)+1)
final_pred.head()
final_pred.info()
final_pred['predicted'] = final_pred['Survived_prob'].apply(lambda x: 1 if x>0.5 else 0)
final_pred.head()
# checking accurecy of the model
from sklearn import metrics
confusion = metrics.confusion_matrix(final_pred.Survived,final_pred.predicted)
print(confusion)
metrics.accuracy_score(final_pred.Survived,final_pred.predicted)
## lets define all values of confusion matrix
TN = confusion[0,0]
FP = confusion[0,1]
FN = confusion[1,0]
TP = confusion[1,1]
## lets calculate sensitivity
TP/float(TP+FN)
## lets calculate specificity
TN/float(TN+FP)
## false positive rate
FP/ float(TN+FP)
## positive predictive value
TP / float(TP+FP)
## negative predictive value
TN / float(TN+ FN)
# # plotting ROC curve
def draw_roc( actual, probs ):
fpr, tpr, thresholds = metrics.roc_curve( actual, probs,
drop_intermediate = False )
auc_score = metrics.roc_auc_score( actual, probs )
plt.figure(figsize=(5, 5))
plt.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % auc_score )
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate or [1 - True Negative Rate]')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
return None
fpr, tpr, thresholds = metrics.roc_curve( final_pred.Survived, final_pred.Survived_prob, drop_intermediate = False )
draw_roc(final_pred.Survived, final_pred.Survived_prob)
# # finding optimal cutoff point
numbers = [float(x)/10 for x in range(10)]
for i in numbers:
final_pred[i]= final_pred.Survived_prob.map(lambda x: 1 if x > i else 0)
final_pred.head()
# +
# Now let's calculate accuracy sensitivity and specificity for various probability cutoffs.
cutoff_df = pd.DataFrame( columns = ['prob','accuracy','sensi','speci'])
from sklearn.metrics import confusion_matrix
# TP = confusion[1,1] # true positive
# TN = confusion[0,0] # true negatives
# FP = confusion[0,1] # false positives
# FN = confusion[1,0] # false negatives
num = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for i in num:
cm1 = metrics.confusion_matrix(final_pred.Survived, final_pred[i] )
total1=sum(sum(cm1))
accuracy = (cm1[0,0]+cm1[1,1])/total1
speci = cm1[0,0]/(cm1[0,0]+cm1[0,1])
sensi = cm1[1,1]/(cm1[1,0]+cm1[1,1])
cutoff_df.loc[i] =[ i ,accuracy,sensi,speci]
print(cutoff_df)
# -
cutoff_df.plot.line(x='prob', y=['accuracy','sensi','speci'])
plt.show()
final_pred['final_predicted'] = final_pred['Survived_prob'].apply(lambda x: 1 if x>0.3 else 0)
final_pred.head()
final_confusion = metrics.confusion_matrix(final_pred.Survived,final_pred.final_predicted)
print(final_confusion)
metrics.accuracy_score(final_pred.Survived,final_pred.final_predicted)
## lets define all values of confusion matrix
TN = final_confusion[0,0]
FP = final_confusion[0,1]
FN = final_confusion[1,0]
TP = final_confusion[1,1]
## lets calculate sensitivity
TP/float(TP+FN)
## lets calculate specificity
TN/float(TN+FP)
# Calculate false postive rate - predicting churn when customer does not have churned
print(FP/ float(TN+FP))
# Positive predictive value
print (TP / float(TP+FP))
# Negative predictive value
print (TN / float(TN+ FN))
titanic_test = pd.read_csv('/kaggle/input/titanic/test.csv')
titanic_test.head()
titanic_test.info()
titanic_test['Sex'] = titanic_test['Sex'].apply(lambda x:sex_map(x))
titanic_test.head()
titanic_test.drop(['Name','SibSp','Parch','Ticket','Cabin'],axis=1,inplace=True)
titanic_test.head()
Pclass = pd.get_dummies(titanic_test['Pclass'],prefix = 'Pclass')
Pclass.head()
titanic_test = pd.concat([titanic_test,Pclass],axis=1)
titanic_test.head()
Embarked = pd.get_dummies(titanic_test['Embarked'])
titanic_test = pd.concat([titanic_test,Embarked],axis=1)
titanic_test.head()
titanic_test.drop(['Pclass','Embarked','Pclass_1','Pclass_2','C','Q'],axis=1,inplace=True)
titanic_test[['Age','Fare']] = scaler.transform(titanic_test[['Age','Fare']])
titanic_test.drop('Fare',axis=1,inplace=True)
age_median = titanic_test['Age'].median(skipna=True)
titanic_test['Age'].fillna(age_median,inplace=True)
titanic_test.info()
X_test = titanic_test[['Sex', 'Age', 'Pclass_3', 'S']]
X_test.columns
y_test_pred = res3.predict(sm.add_constant(X_test))
y_test_pred.head()
test_final = pd.DataFrame({'PassengerId': titanic_test.PassengerId,'Survived_prob':y_test_pred.values})
test_final.head()
test_final['Survived'] = test_final['Survived_prob'].apply(lambda x:1 if x>0.3 else 0)
test_final.head()
test_final.drop('Survived_prob',axis=1,inplace = True)
test_final.to_csv("prediction_titanic.csv",index=False)
# # checking model validation using ks statistics
# It stands for Kolmogorov–Smirnov which is named after Andrey Kolmogorov and Nikolai Smirnov. It compares the two cumulative distributions and returns the maximum difference between them. It is a non-parametric test which means you don't need to test any assumption related to the distribution of data. In KS Test, Null hypothesis states null both cumulative distributions are similar. Rejecting the null hypothesis means cumulative distributions are different.
# In data science, it compares the cumulative distribution of events and non-events and KS is where there is a maximum difference between the two distributions. In simple words, it helps us to understand how well our predictive model is able to discriminate between events and non-events.
final_pred.head()
ks_stat_check = final_pred.iloc[ : ,[1,14]]
ks_stat_check.shape
## using function for calculate ks statistics
def ks(data=None,target=None, prob=None):
data['target0'] = 1 - data[target]
data['bucket'] = pd.qcut(data[prob], 10)
grouped = data.groupby('bucket', as_index = False)
kstable = pd.DataFrame()
kstable['min_prob'] = grouped.min()[prob]
kstable['max_prob'] = grouped.max()[prob]
kstable['events'] = grouped.sum()[target]
kstable['nonevents'] = grouped.sum()['target0']
kstable = kstable.sort_values(by="min_prob", ascending=False).reset_index(drop = True)
kstable['event_rate'] = (kstable.events / data[target].sum()).apply('{0:.2%}'.format)
kstable['nonevent_rate'] = (kstable.nonevents / data['target0'].sum()).apply('{0:.2%}'.format)
kstable['cum_eventrate']=(kstable.events / data[target].sum()).cumsum()
kstable['cum_noneventrate']=(kstable.nonevents / data['target0'].sum()).cumsum()
kstable['KS'] = np.round(kstable['cum_eventrate']-kstable['cum_noneventrate'], 3) * 100
#Formating
kstable['cum_eventrate']= kstable['cum_eventrate'].apply('{0:.2%}'.format)
kstable['cum_noneventrate']= kstable['cum_noneventrate'].apply('{0:.2%}'.format)
kstable.index = range(1,11)
kstable.index.rename('Decile', inplace=True)
pd.set_option('display.max_columns', 9)
print(kstable)
#Display KS
from colorama import Fore
print(Fore.RED + "KS is " + str(max(kstable['KS']))+"%"+ " at decile " + str((kstable.index[kstable['KS']==max(kstable['KS'])][0])))
return(kstable)
mydf = ks(data=ks_stat_check,target="final_predicted", prob="Survived_prob")
# event means survived (1) and non events means not survived (0)
# this matrix used to check the discriminatory power of the model
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import sys
sys.path.insert(0, '../python/casadi_f16')
import f16
import control
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
import sympy
from analysis import loop_analysis, rlocus, bode
plt.rcParams['figure.figsize'] = (10, 10)
# -
# # Pitch-Rate CAS Design-Dhruv Jain
#
p = f16.Parameters()
x0, u0 = f16.trim(x=f16.State(VT=550), p=p, phi_dot=0, theta_dot=0, psi_dot=0.349066, gam=0)
# +
def f_control(t, x):
return f16.Control(
thtl=u0.thtl,
ail_cmd_deg=u0.ail_cmd_deg,
elv_cmd_deg=u0.elv_cmd_deg + 1*np.sin(2*np.pi*2*t),
rdr_cmd_deg=u0.rdr_cmd_deg)
f_control(0, x0)
# -
# # A,B,C,D matrices
ss = f16.StateSpace
ss = f16.linearize(x0, u0, p)
print("A= ",ss.A)
print("B= ",ss.B)
print("C= ",ss.C)
print("D= ",ss.D)
res = f16.simulate(x0, f_control, p, 0, 20, 0.01)
plt.plot(res['t'], np.rad2deg(res['x'][:, f16.State().name_to_index('alpha')]))
plt.xlabel('t ,sec')
plt.ylabel(r'$\alpha$, deg')
plt.grid()
plt.title('angle of attack')
# +
res = f16.simulate(x0, f_control, p, 0, 20, 0.01)
plt.plot(res['x'][:, f16.State().name_to_index('p_E')], res['x'][:, f16.State().name_to_index('p_N')])
plt.axis('equal');
plt.xlabel('East, ft')
plt.ylabel('North, ft')
plt.grid()
plt.title('trajectory')
# -
def f_control(t, x):
print(x)
return f16.Control(
thtl=u0.thtl,
ail_cmd_deg=u0.ail_cmd_deg,
elv_cmd_deg=u0.elv_cmd_deg + 1*np.sin(2*np.pi*2*t),
rdr_cmd_deg=u0.rdr_cmd_deg)
def select(n, i):
D = np.zeros((1, n))
D[0, i] = 1
return control.ss([], [], [], D)
ss = f16.linearize(x0, u0, p)
s = control.tf([1, 0], [0, 1])
G = -(180/np.pi)*ss.sub_system(x=['alpha', 'Q', 'elv_deg'],
u=['elv_cmd_deg'], y=['alpha', 'Q']).to_control()
sys3 = control.feedback(G, 0.2*10/(s+10)*select(2, 0))
# # Transfer Function
PI_c = 2.2 + 1*0.8/s
G0 = PI_c*sys3[1,0]
Gc = G0/(1+G0)
print('The Transfer Function is: ',Gc.minreal())
# +
t,y = control.step_response(Gc,T=np.linspace(0,1,1000))
#plt.figure(figsize=(l))
plt.plot(t,y)
print(max(y))
for i in range(len(t)):
if y[i]==max(y):
print(t[i])
print('The PI control satisfy the given constraint')
# -
# # 10 Deg
t,y = control.step_response(Gc,T=np.linspace(0,1,1000))
plt.plot(t,10*y)
# # 100 Deg
t,y = control.step_response(Gc,T=np.linspace(0,1,1000))
plt.plot(t,100*y)
from analysis import rlocus
H = (10/(s+10))*select(2, 0)
plt.figure()
kalpha = 0.3
rlocus('alpha', control.minreal(H*G), kvect=np.linspace(0, 10, 1000), k=kalpha);
plt.plot([0, -2], [0, 2], '--')
plt.axis([-21, 0, -8, 8])
plt.figure()
sys3 = control.feedback(G, kalpha*(10/(s+10))*select(2, 0))
rlocus('p', (s+3)/s*sys3[1, 0], kvect=np.linspace(0, 1, 1000), k=0.5)
plt.plot([0, -10], [0, 10*np.cos(0.707)], '--')
#plt.axis([-20, 0, -5, 5])
|
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Scala
// language: scala
// name: scala
// ---
// # Provingground - HoTT
//
// These notes concern the object _HoTT_, which has the core implementation of homotopy type theory. Implementation details are (rather, will be) in the [scaladocs](http://siddhartha-gadgil.github.io/ProvingGround/).
//
// The major components of homotopy type theory implemented in the object HoTT are
//
// * Terms, types and Universes.
// * Function and dependent function types.
// * λs.
// * Pairs and Dependent pairs.
// * Disjoint union types.
// * Types 0 and 1 and an object in the latter.
// * Identity types
//
// Inductive types, induction and recursion are in different objects as they are rather subtle. The other major way (also not in the _HoTT_ object) of constructing non-composite types is to wrap scala types, possibly including symbolic algebra.
//
// The _core_ project contains code that is agnostic to how it is run. In particular this also compiles to scala-js.
import $ivy.`io.github.siddhartha-gadgil::provingground-core-jvm:0.1.0`
// ### Universes, Symbolic types
//
// We have a family of universes, but mostly use the first one denoted by Type. Given a type, we can construct symbolic objects of that type. We construct such a type _A_.
import provingground._
repl.pprinter.bind(translation.FansiShow.fansiPrint)
import HoTT._
val A ="A" :: Type
A == Type.::("A")
// We consider a symbolic object of the type _A_
val a ="a" :: A
// ## Function types, lambdas, Identity
//
// Given types A and B, we have the function type A → B. An element of this is a function from A to B.
//
// We can construct functions using λ's. Here, for the type _A_, we construct the identity on _A_ using a lambda. We can then view this as a dependent function of _A_, giving the identity function.
//
// In this definition, two λ's are used, with the method _lmbda_ telling the TypecompilerType that the result is a (non-dependent) function.
val id = lambda(A)(lmbda(a)(a))
// The type of the identity function is a mixture of Pi-types and function types. Which of these to use is determined by checking dependence of the type of the value on the varaible in a λ-definition.
id.typ
lmbda(a)(a).typ
lmbda(a)(a).typ.dependsOn(A)
// The lambdas have the same effect at runtime. It is checked if the type of the value depends on the variable.
// The result is either _LambdaFixed_ or _Lambda_ accordingly.
val indep = lmbda(a)(a)
val dep = lambda(a)(a)
indep == dep
// ### Hygiene for λs
//
// A new variable object, which has the same toString, is created in making lambdas. This is to avoid name clashes.
val l = dep.asInstanceOf[LambdaFixed[Term, Term]]
l.variable
l.variable == a
// ## Modus Ponens
//
// We construct Modus Ponens, as an object in Homotopy Type theory. Note that A ->: B is the function type A → B.
// +
val B = "B" :: Type
val f = "f" :: (A ->: B)
val mp = lambda(A)(lambda(B)(lmbda(a)(lmbda(f)(f(a)))))
// -
// The type of Modus Ponens is again a mixture of Pi-types and function types.
mp.typ
// We can apply modus ponens with the roles of _A_ and _B_ reversed. This still works because variable clashes are avoided.
val mpBA = mp(B)(A)
mpBA.typ == B ->: (B ->: A) ->: A
// ### Equality of λs
//
// Lambdas do not depend on the name of the variable.
val aa = "aa" :: A
lmbda(aa)(aa) == lmbda(a)(a)
(lmbda(aa)(aa))(a) == a
// ## Dependent types
//
// Given a type family, we can construct the corresponding Pi-types and Sigma-types. We start with a formal type family, which is just a symbolic object of the appropriate type.
val Bs = "B(_ : A)" :: (A ->: Type)
// ### Pi-Types
//
// In addition to the case class constructor, there is an agda/shapeless-like convenience method for constructing Pi-types. Namely, given a type expression that depends on a varaible _a : A_, we can construct the Pi-type correspoding to the obtained λ-expression.
//
// Note that the !: method just claims and checks a type, and is useful (e.g. here) for documentation.
val fmly = (a !: A) ~>: (Bs(a) ->: A)
// ### Sigma-types
//
// There is also a convenience method for defining Sigma types using λs.
Sgma(a !: A, Bs(a))
Sgma(a !: A, Bs(a) ->: Bs(a) ->: A)
// ## Pair types
//
// Like functions and dependent functions, pairs and dependent pairs can be handled together. The _mkPair_ function assignes the right type after checking dependence, choosing between pair types, pairs and dependent pairs.
val ba = "b(a)" :: Bs(a)
val b = "b" :: B
mkPair(A, B)
mkPair(a, b)
mkPair(a, b).typ
mkPair(a, ba).typ
mkPair(A, B).asInstanceOf[ProdTyp[Term, Term]]
// ## Plus types
//
// We can also construct the plus type _A plus B_, which comes with two inclusion functions.
val AplusB = PlusTyp(A, B)
AplusB.incl1(a)
AplusB.incl2
// In the above, a λ was used, with a variable automatically generated. These have names starting with $ to avoid collision with user defined ones.
// ## Identity type
//
// We have an identity type associated to a type _A_, with reflexivity giving terms of this type.
val eqAa = IdentityTyp(A, a, a)
val ref = Refl(A, a)
ref.typ == eqAa
// ## The Unit and the Nought
//
// Finally, we have the types corresponding to _True_ and _False_
Unit
Zero
Star !: Unit
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
def create_table_is_heritage_data(heritage_data, index_name):
"""
Create new table is heritage to dataset
heritage_data dataframe heritage
index_name index name for create new table
return new table
"""
index = whr_data.pivot_table(index='Country name', columns='year', values=index_name)
index = index.rename_axis('Country', axis=0)
index = index.rename_axis(None, axis=1)
for item in index.columns:
index[item] = pd.to_numeric(index[item], errors='coerce')
return index
def convert_whr_data(whr_data, csv_file):
"""
Convert data World Happines Report source to dataset_index_csv
hdr_file - HDR csv source
csv_file - Dataset csv source
"""
return whr_data.to_csv(csv_file)
# +
#convert_whr_data(create_table_is_whr_data(whr_data, 'Life Ladder'), './dataset/index_life_ladder.csv')
#convert_whr_data(create_table_is_whr_data(whr_data, 'Log GDP per capita'), './dataset/index_gdp_per_capita.csv')
#convert_whr_data(create_table_is_whr_data(whr_data, 'Social support'), './dataset/index_social_support.csv')
#convert_whr_data(create_table_is_whr_data(whr_data, 'Healthy life expectancy at birth'), './dataset/index_healthy_life.csv')
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
# +
import requests
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import re
import os
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed, interact_manual
from io import BytesIO
import geopandas as gpd
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = None
classifications = ["lcWestClassifications", "lcSouthClassifications", "lcEastClassifications", "lcNorthClassifications"]
url_entries = ["lcDownwardPhotoUrl", "lcEastPhotoUrl", "lcUpwardPhotoUrl", "lcNorthPhotoUrl", "lcSouthPhotoUrl", "lcWestPhotoUrl"]
# -
# # Downloading Data
# ## Parameters
#
# - "get_app_only": Set to True to only download data collected from the GLOBE Observer App
# - "get_site_coordinates": Set to True to set the latitude/longitude of the data from the MGRS System to the coordinates collected by the GPS
# - "start_date" and "end_date": Time frame for the data
# +
# get app data
get_app_only = True
# measured at or site coordinates
get_site_coordinates = False
# dates
start_date = "2020-06-01"
end_date = "2020-07-01"
# +
def switch_coordinates(df):
"""Switch latitude and longitude of a Landcover DataFrame from their current MGRS values to the GPS's measurement
Arguments:
df: pd.DataFrame, A Pandas DataFrame Containing GLOBE Observer Landcover Data from the GLOBE API
Returns:
pd.DataFrame, a Landcover DataFrame with updated latitude and longitude columns
"""
coordinates_df = df[df["lcMeasurementLatitude"].notnull()]
coordinates_df = coordinates_df[df["lcMeasurementLongitude"].notnull()]
coordinates_df["latitude"] = coordinates_df["lcMeasurementLatitude"].tolist()
coordinates_df["longitude"] = coordinates_df["lcMeasurementLongitude"].tolist()
return coordinates_df
def app_only(df):
"""Filters a Landcover DataFrame to only contain data collected on the GLOBE Observer App
Arguments:
df: pd.DataFrame, A Pandas DataFrame Containing GLOBE Observer Landcover Data from the GLOBE API
Returns:
pd.DataFrame, A Landcover DataFrame only containing data collected by the GLOBE Observer App
"""
return df[df["lcDataSource"] == "GLOBE Observer App"]
# Build download URL
url = f"https://api.globe.gov/search/v1/measurement/protocol/measureddate/?protocols=land_covers&startdate={start_date}&enddate={end_date}&geojson=FALSE&sample=FALSE"
# Get and unpack GLOBE API Data
response = requests.get(url)
results = response.json()["results"]
df = pd.DataFrame(results)
# Expand the 'data' column by listing the contents and passing as a new dataframe
df = pd.concat([df, pd.DataFrame(list(df['data']))], axis=1)
# Drop the previously nested data column
df = df.drop('data', 1)
# Replace "landcovers" in column name with "lc" to reduce verbosity
df.columns = [col_name.replace("landcovers", "lc") for col_name in df.columns]
# Additional (optional) data cleaning scripts
if get_app_only:
df = app_only(df)
if get_site_coordinates:
df = switch_coordinates(df)
df
# -
# # Convert To Local Times
# +
def convert_times(date, longitude):
"""Converts the measured date from the UTC timezone to the entry's local date.
Adapted from Matt Bandel's Mosquito Habitat Mapper Notebook: https://github.com/IGES-Geospatial/globe-mosquitoes-notebook
Arguments:
date: str, The recorded datetime for the measurement
longitude: float, The measured longitude for the observation
Returns:
(int, int, int, int) tuple, contains: year, month, day, day of the year (1-366)
"""
# convert the date string to date object and normalize based on partitioning
date_obj = datetime.strptime(re.sub(r"\..*", "", date), "%Y-%m-%dT%H:%M:%S")
# convert the date string to date object and correct for longitude
zone = int(round(longitude * 24 / 360, 0))
converted = date_obj + timedelta(hours=zone)
tt = converted.timetuple()
return tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_yday
vectorized_convert = np.vectorize(convert_times)
df["year"], df["month"], df["day"], df["day of year"] = vectorized_convert(df['lcMeasuredAt'].to_numpy(), df['longitude'].to_numpy())
df
# -
# # Image Resolution, Size, and Score
# +
def get_resolution(df):
"""Gets the resolutions for images in a Landcover DataFrame
Arguments:
df: pd.DataFrame, a Pandas DataFrame Containing GLOBE Observer Landcover Data from the GLOBE API
Returns:
pd.DataFrame, an updated Landcover DataFrame with columns for image resolutions in all image directions
"""
# helper function
def get_entry_resolution(row):
"""Adds columns to a Landcover DataFrame row containing image resolution, file size, and content score.
Content score is file size divided by pixels.
Arguments:
row: pd.Series, A Pandas DataFrame Row Containing GLOBE Observer Landcover Data from the GLOBE API
Returns:
pd.Series, An updated row with columns for image resolutions in all image directions
"""
def calc_data(row, entry):
"""Adds columns to Landcover DataFrame Row containing image resolution, file size, and content score
Arguments:
row: pd.Series, A Pandas DataFrame Row Containing GLOBE Observer Landcover Data from the GLOBE API
entry: str, the column key for the url entry of the row
Returns:
pd.Series, An updated row with columns for the specified direction
"""
if row[entry] and "https" in row[entry]:
response = requests.get(row[entry], stream=True)
resolution = Image.open(BytesIO(response.content)).size
size = len(response.content)
row[entry.replace("Url", "Rez")] = resolution
row[entry.replace("Url", "Size")] = size / 1000000 # convert to megabytes
row[entry.replace("Url", "ContentScore")] = size / (resolution[0] * resolution[1]) # bytes / pixel
return row
for entry in url_entries:
try:
row = calc_data(row, entry)
except Exception as e:
print(f"{row[entry]} failed, retrying...")
try:
row = calc_data(row, entry)
print("retry succeded")
except Exception as e:
print(f"{row[entry]} failed: {repr(e)}")
return row
# Add in columns into DataFrame with default null value
for entry in url_entries:
df[entry.replace("Url", "Rez")] = np.nan
df[entry.replace("Url", "Size")] = np.nan
df[entry.replace("Url", "ContentScore")] = np.nan
# Apply helper function to all DataFrame Rows
return df.apply(get_entry_resolution, axis=1)
# %time df = get_resolution(df)
# -
df
# # Download Photos
# The option variable decides between three options for downloading photos:
#
# 1. "original": Downloads the original photo
# 2. "1080p": Resizes the original photo into 1920x1080 resolution
# 3. "overlay": Overlays an image collar at the bottom of every image containing photo ID, direction, siteID, date and MUC classification
option = "1080p"
def download_photo(row, option):
"""Downloads photos in a Landcover DataFrame with 3 download options: original, 1080p, and overlay.
Arguments:
row: pd.Series, A Pandas DataFrame Row Containing GLOBE Observer Landcover Data from the GLOBE API
option: str, 1 of 3 possible strings: original (original photo), 1080p (scale photo to 1080p), and overlay (add image data as image collar).
Returns:
None
"""
# 3 different helper functions
def resize(img, info):
"""Resizes image to 1080p and saves it in the Image Directory with relevant data in its filename
Arguments:
img: Image, an Image object
info: str, a string containing relevant data for the image
Returns:
None
"""
img.resize((1920, 1080)).save(f"Images/{info}.jpg")
def overlay(img, info):
"""Downloads image and saves it in the Image Directory with relevant data in an image collar at the bottom of the image.
Arguments:
img: Image, an Image object
info: str, a string containing relevant data for the image
Returns:
None
"""
draw = ImageDraw.Draw(img)
draw.line([(0,img.size[1]), (img.size[0], img.size[1])], width = 200, fill="#FFFFFF")
fnt = ImageFont.truetype("arial.ttf", 40)
draw.text((0,img.size[1] - 100), info, font = fnt, fill = "#000000", align = "right")
img.save(f"Images/{re.search(r'.*(?=-[a-zA-Z]*-)', info).group(0)}.jpg")
def original(img, info):
"""Saves image in the Image Directory with relevant data in its filename
Arguments:
img: Image, an Image object
info: str, a string containing relevant data for the image
Returns:
None
"""
img.save(f"Images/{info}.jpg")
def get_img(url):
"""Downloads an image from a url
Arguments:
url: str, an image URL for a GLOBE Landcover Image
Returns:
None
"""
try:
return Image.open(requests.get(url, stream=True).raw)
except Exception as e:
print(f"{url} failed, retrying...")
try:
img = Image.open(requests.get(url, stream=True).raw)
print("retry successful")
return img
except Exception as e:
print(f"{url} failed: {repr(e)}")
return None
def download_photo(url, direction, siteid, date, muc, option):
"""Downloads a photo with relevant data saved either with the filename or overlayed on the image
Arguments:
url: str, the URL for the image
direction: str, the direction (Upwards, Downwards, North, East, South, and West) the image was taken in
siteid: str, the siteID for the image
date: str, the date the image was taken
muc: str, the MUC classification code
option: str, one of three photo downloading options: "original", "1080p", or "overlay"
Returns:
None
"""
if url and "https" in url:
photo_id = re.search(r'(?<=\d\d\d\d\/\d\d\/\d\d\/).*(?=\/)', url).group(0)
img_info = f"{photo_id}-{direction}-{siteid}-{date}-{muc}"
img = get_img(url)
options = {"original" : original, "1080p": resize, "overlay": overlay}
options[option](img, img_info)
# Makes Image directory if it doesn't exist
if not os.path.exists("Images"):
os.makedirs("Images")
# Downloads Images
for entry in url_entries:
download_photo(row[entry], entry.replace("lc", "").replace("Url", ""), row["siteId"], row["measuredDate"], row['lcMucCode'], option)
# %time out = df.apply(download_photo, option=option, axis=1)
# # Plot Count over time
#
# Plots a bar graph of datapoints collected for each day specified in date range
# +
time_series = df.groupby("measuredDate").size()
fig = plt.figure(figsize = (10, 5))
time_series.plot.bar()
fig.autofmt_xdate()
# -
# # Classifications analysis
#
# +
def get_classifications(*args):
"""Figures out the primary and secondary landcover type and percentage for each valid set of classifications.
Arguments:
*args: list of strings, classifications for each direction.
Returns:
(str, float, str, float) tuple, contains: primary lc type, primary lc percentage, secondary lc type, secondary lc percentage.
"""
# generate classification list
valid_entries = [entry for entry in args if entry != None and "%" in entry]
classification_list = [obj.split("%") for sublist in valid_entries for obj in sublist.split(";")]
# set primary, secondary default values to None
primary, secondary = (None, None), (None, None)
# checks if there are valid classifications
if len(classification_list) > 0:
# adds in all unique classifications and updates percentages
classification_dict = {}
for classification in classification_list:
classification[1] = classification[1].lstrip()
if classification_dict.get(classification[1], 0) == 0:
classification_dict[classification[1]] = int(classification[0])
else:
classification_dict[classification[1]] += int(classification[0])
# average the summed percentages for each entry over all directions passed
for key, value in classification_dict.items():
classification_dict[key] = value / len(args)
# sort the dict to get primary and secondary types on front
classification_dict = {k: v for k, v in sorted(classification_dict.items(), key=lambda item: item[1], reverse=True)}
# try and pull primary and secondary values
iterator = iter(classification_dict.items())
primary = next(iterator)
try:
secondary = next(iterator)
except StopIteration:
pass
return primary[0], primary[1], secondary[0], secondary[1]
classification_getter = np.vectorize(get_classifications)
df["primary_type"], df["primary_percentage"], df["secondary_type"], df["secondary_percentage"] = classification_getter(
df["lcEastClassifications"].to_numpy(),
df["lcNorthClassifications"].to_numpy(),
df["lcSouthClassifications"].to_numpy(),
df["lcWestClassifications"].to_numpy(),
)
# -
df
# # Identify Repeats
#
# Returns a data frame of suspected repeat entrys and removes them from the DataFrame (keeping the first entry). It does this by finding entries made in the same location within a certain time frame. This time frame can be specified as explained below.
#
# ## Usage:
#
# "same_day": If set to True, the program only flags entrys made within the same day, if set to False it will only take location into account (any coincident observations within the dataset's date range will be flagged)
same_day = True
if same_day:
suspect_repeats = df.groupby(by = ["measuredDate", "lcMeasurementLatitude", "lcMeasurementLongitude"]).filter(lambda x: len(x) > 1)
df = df.drop_duplicates(subset = ["measuredDate", "lcMeasurementLatitude", "lcMeasurementLongitude"], keep ="first")
else:
suspect_repeats = df.groupby(by = ["lcMeasurementLatitude", "lcMeasurementLongitude"]).filter(lambda x: len(x) > 1)
df = df.drop_duplicates(subset = ["lcMeasurementLatitude", "lcMeasurementLongitude"], keep ="first")
suspect_repeats
df
# # Get Decimal Precision
#
# Counts trailing decimals for latitude longitude measurements and appends the data to the dataframe under the "x_decimal" (longitude) and "y_decimal" (latitude) columns.
# +
def get_precision(latitude, longitude):
"""Gets the decimal precision of latitude and longitude measurements
Arguments:
latitude: float, a latitude value
longitude: float, a longitude value
Returns:
(int, int) tuple, contains: longitude precision and latitude precision
"""
y_decimal = 0
x_decimal = 0
try:
y_decimal = len(str(latitude).split(".")[1])
except IndexError:
pass
try:
x_decimal = len(str(longitude).split(".")[1])
except IndexError:
pass
return x_decimal, y_decimal
vectorized_precision = np.vectorize(get_precision)
df["x_decimal"], df["y_decimal"] = vectorized_precision(df["lcMeasurementLatitude"].to_numpy(), df["lcMeasurementLongitude"].to_numpy())
# -
df
# # Count Valid
#
# Counts the number of valid photo entries ("photo_count") and classifications ("classification_count") for each row in the dataset.
# +
def count_valid(identifier, *args):
"""Count the number of valid entries for a set of values
Arguments:
identifier: str, a string that all valid entries should contain
*args: list of strings, collected data entries that may or may not be valid
Returns:
int, the number of valid data entries
"""
count = 0
for url in args:
if url and identifier in url:
count += 1
return count
vectorize_count = np.vectorize(count_valid)
df["photo_count"] = vectorize_count(
"http",
df["lcDownwardPhotoUrl"].to_numpy(),
df["lcUpwardPhotoUrl"].to_numpy(),
df["lcEastPhotoUrl"].to_numpy(),
df["lcNorthPhotoUrl"].to_numpy(),
df["lcSouthPhotoUrl"].to_numpy(),
df["lcWestPhotoUrl"].to_numpy(),
)
df["classification_count"] = vectorize_count(
"%",
df["lcEastClassifications"].to_numpy(),
df["lcNorthClassifications"].to_numpy(),
df["lcSouthClassifications"].to_numpy(),
df["lcWestClassifications"].to_numpy(),
)
# -
df
# # Completeness Score
#
# Generates a completeness score for each row which is essentially a count for all the values in the row that are filled out.
# +
def completeness_score(row):
"""Counts all the values of a Landcover DataFrame Row that are filled out and appends it to the row as a "completeness"
column.
Arguments:
row: pd.Series, a Landcover DataFrame Row
Returns:
None
"""
count = 0
for index, value in row.dropna().items():
if (type(value) != str) or ("rejected" not in value and "(none)" not in value and value):
count += 1
row["completeness"] = count
return row
df = df.apply(completeness_score, axis = 1)
# -
df
# # Plot
#
# Plots the collected data based off of their location.
# +
gdf = gpd.GeoDataFrame(
df, geometry = gpd.points_from_xy(df.longitude, df.latitude)
)
gdf.set_crs(epsg=4326, inplace=True)
gdf.plot()
# -
# # Preview Each row of Data
#
# Select and view different rows using the Row Dropdown menu
# +
rows = df.index.tolist()
def view_row(row = 0):
"""Displays a Landcover DataFrame Row
Arguments:
row: int, the row index
Returns:
None
"""
display(df.loc[row].to_frame())
interactive(view_row, row = rows)
# -
# # Download Data as CSV
# Downloads the data as a CSV file.
#
# "get_raw": Setting Get Raw to true will get the raw data without QA scripts applied. The QA scripts disregard data that doesn't meet the following criteria:
#
# - Contains photos for all directions or atleast the north photo
# - Contains classifications for all directions
get_raw = False
# +
def qa_filter(entry):
"""Determines if the passed entry is valid
Arguments:
entry: str, an entry in a Landcover DataFrame
Returns:
bool, if the passed entry is valid
"""
return entry != None and ("rejected" not in entry and "none" not in entry)
vectorized_qa_filter = np.vectorize(qa_filter)
if not os.path.exists("Data"):
os.makedirs("Data")
date = datetime.now().strftime("%d-%m-%Y")
download_type = "-RAW" if get_raw else ""
path = f"Data/LC-{date}{download_type}.csv"
if not get_raw:
# only gets data that has all 6 images or north image
six_photos = np.all(vectorized_qa_filter(df[url_entries].to_numpy()), axis = 1)
north_photos = vectorized_qa_filter(df[url_entries[3]].to_numpy())
good_photo_data = np.logical_or(six_photos, north_photos)
# data that has all classifications
good_classification = np.all(vectorized_qa_filter(df[classifications].to_numpy()), axis = 1)
good_data = np.logical_and(good_photo_data, good_classification)
df[good_data].to_csv(path)
else:
df.to_csv(path)
# -
df
df[good_data]
# # TODO
# https://stackoverflow.com/questions/27934885/how-to-hide-code-from-cells-in-ipython-notebook-visualized-with-nbviewer
# - Surface Conditions?
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 02-1데이터 집합 불러오기
# ## 데이터 분석의 시작은 데이터 불러오기부터
# 데이터 분석을 위해 가장 먼저 해야 할 일은 무엇일까요? 바로 데이터를 불러오는 것입니다. 이때 불러오는 데이터를 '데이터 집합'이라고 합니다. 그러면 데이터 집합을 불러오는 방법과 데이터를 간단히 살펴보는 방법에 대해 알아보겠습니다. 우리가 처음 불러올 데이터 집합은 갭마인더입니다. '02_practice'를 주피터 노트북으로 열어 실습을 시작해 볼까요?
# ## 갭마인더 데이터 집합 불러오기
#
# 1. 판다스의 여러 기능을 사용하려면 판다스 라이브러리를 불러와야 합니다. 다음과 같이 입력하여 판다스 라이브러리를 불러오세요.
import pandas
# 2. 갭마인더 데이터 집합을 불러오려면 read_csv메서드를 사용해야 합니다. read_csv메서드는 기본적으로 쉽표(,)로 열어 구분되어 있는 데이터를 불러옵니다. 하지만 갭마인더는 열이 탭으로 구분되어 있기 때문에 read_csv 메서드를 호 출할 때 열이 탭으로 구분되어 있따고 미리 알려주어야 합니다. sep 속성값으로 \t를 지정하세요
df = pandas.read_csv('data/gapminder.tsv',sep='\t')
# 3. 판다스에 있는 메서드를 호출하려면 pandas와 점(.) 연산자를 사용해야 합니다. 그런데 매번 pandas라고 입력하려면 번거롭겠죠. 그래서 이를 해결하기 위해 관습적으로 pandas를 pd로 줄여 사용합니다. 다음과 같이 입력하면 pandas를 pd로 줄여 사용할 수 있습니다. 앞으로는 이 방법을 사용하겠습니다.
import pandas as pd
df = pd.read_csv('data/gapminder.tsv',sep='\t')
# ## 시리즈와 데이터프레임
# 갭마인더 데이터 집합을 잘 불러왔나요? 이번에는 판다스에서 사용되는 자료형을 알아볼 차례입니다. 판다스는 데이터를 효율적으로 다루기 위해 시리즈와 데이터프레임이라는 자료형을 사용합니다. 데이터프레임은 엑셀에서 볼 수 있는 시트와 동일한 개념이며 시리즈는 시트의 열 1개를 의미합니다. 파이썬으로 비유하여 설명하면 데이터프레임은 시리즈들이 각 요소가 되는 딕셔너리라고 생각하면 됩니다.
# ### 불러온 데이터 집합 살펴보기.
# 1. rdad_csv 메서드는 데이터 집합을 읽어 들여와 데이터프레임이라는 자료형으로 반환합니다. 데이터프레임에는 데이터 분석에 유용한 여러 메서드가 미리 정의되어 있습니다. 데이터 프레임의 데이터를 확인하는 용도로 자주 사용하는 head 메서드에 대해 먼저 알아보겠습니다. head 메서드는 데이터프레임에서 가장 앞에 있는 5개의 행을 출력하므로 내가 불러온 데이터가 어떤 값을 가지고 있는지 살펴보기에 안성맞춤이죠.
print(df.head())
# 2. 이번에는 df에 저장된 값이 정말 데이터프레임이라는 자료형인지 확인해 보겠습니다. 실행 결과를 보면 판다스의 데이터프레임이라는 것을 알 수 있습니다. type 메서드는 자료형을 출력해 줍니다. 앞으로 자주 사용할 메서드이므로 꼭 기억해 두기 바랍니다.
print(type(df))
# 3. 데이터프레임은 자신이 가지고 있는 데이터의 행과 열의 크기에 대한 정보를 shape라는 속성에 저장하고 있습니다. 다음을 입력하여 실행하면 갭마인더의 행과 열의 크기를 확인할 수 있습니다. 1번째 값은 행의 크기이고 2번째 값은 열의 크기 입니다.
print(df.shape)
# 4.이번에는 갭마인더에 어떤 정보가 들어 있는지 알아보겠습니다. 먼저 열을 살펴보겠습니다. 과정 3에서 shape 속성을 사용했던 것처럼 columns속성을 사용하면 데이터 프레임의 열 이름을 확인할 수 있습니다. 갭마인더를 구성하는 열 이름은 각각 country,continent,year,lifeExp,pop, gdpPercap 입니다.
print(df.columns)
# 5. 데이터프레임을 구성하는 값의 자료형은 데이터프레임의 dtypes 속성이나 info 메서드로 쉽게 확인할 수 있습니다.
print(df.dtypes)
print(df.info())
# ## 판다스와 파이썬 자료형 비교
# 다음 표에 앞으로 판다스를 공부하며 자주 다루게 될 자료형을 정리했습니다. 그런데 판다스와 파이썬은 같은 자료형도 다르게 인식합니다. 예를 들어 판다스는 문자열 자료형을 dbect라는 이름으로 인식하고 파이썬은 string이라는 이름으로 인식합니다. 같은 자료형이라도 판다스, 파이썬이 서로 다른 이름으로 인식한다는 점을 주의 깊게 살펴보고 다음으로 넘어가세요.
# 판다스 자료형$\qquad$파이썬 자료형$\qquad$$\qquad$설명<br>
# object$\qquad$$\qquad$$\quad$string$\qquad$$\qquad$$\qquad$문자열<br>
# int64$\qquad$$\qquad$$\quad$$\;$$\;$$\;$int$\qquad$$\qquad$$\qquad$$\;$$\;$정수<br>
# float64$\qquad$$\qquad$$\quad$float$\qquad$$\qquad$$\qquad$수소점을 가진숫자<br>
# datetime64$\qquad$$\;$$\;$$\;$$\;$datetime$\qquad$$\qquad$$\;$$\;$$\;$$\;$$\;$파이썬 표준 라이브러리인 datetime이 반환하는 자료형
# # 02-2 데이터 추출하기
# 지금까지 데이터프레임의 크기와 자료형을 살펴보는 방법에 대해 알아보았습니다. 앞에서 haed 메서드를 이용해 데이터프레임에서 가장 앞에 있는 5개의 데이터를 추출하여 출력했던 것을 기억하나요? 이번에는 데이터프레임에서 데이터를 열 단위로 추출하는 방법과 행 단위로 추출하는 방법을 알아보겠습니다. 먼저 열 단위로 데이터를 추출하는 방법을 알아보겠습니다.
# ### 열 단위 데이터 추출하기
# 데이터프레임에서 데이터를 열 단위로 추출하려면 대괄호와 열 이름을 사용해야 합니다. 이때 열 이름은 꼭 작은따옴표를 사용해서 지정해야 하고 추출한 열은 변수에 저장해서 사용할 수도 있습니다. 이때 1개의 열만 추출하면 시리즈를 얻을 수 있고 2개 이상의 열을 추출하면 데이터프레임을 얻을 수 있습니다.
# #### 열 단위로 데이터 추출하기
# 1. 다음은 데이터프레임에서 열 이름이 country인 열을 추출하여 country_df에 저장한 것입니다. type 메서드를 사용하면 country_df에 저장된 데이터의 자료형이 시리즈라는 것을 확인할 수 있습니다. 시리즈도 head,tail 메서드를 가지고 있기 때문에 gead,tail메서드로 가장 앞이나 뒤에 있는 5개의 데이터를 출력할 수 있습니다.
country_df=df['country']
print(type(country_df))
print(country_df.head())
print(country_df.tail())
# 2.리스트에 열 이름을 전달하면 여러 개의 열을 한 번에 추출할 수 있습니다. 다음은 열 이름이 country,continent,year인 열을 추출하여 변수 subset에 저장한 것입니다. 이때 1개의 열이 아니라 2개 이상의 열을 추출했기 때문에 시리즈가 아니라 데이터프레임을 얻을 수 있습니다.
subset=df[['country','continent','year']]
print(type(subset))
print(subset.head())
print(subset.tail)
# ## 행단위 데이터 추출하기
# 이번에는 데이터를 행 당위로 추출하는 방법에 대해 알아보겠습니다. 데이터를 행 단위로 추출하려면 loc,iloc 속성을 사용해야 합니다. 밑에 두 속성을 간단하게 정리한 표입니다.<br>
# 속성$\quad$$\quad$$\quad$설명<br>
# loc$\quad$$\quad$$\quad$인덱스를 기준으로 행 데이터 추출<br>
# iloc$\quad$$\quad$$\quad$행 번호를 기준으로 행 데이터 추출
# 표의 설명을 보면 인덱스와 행 번호라는 것이 있습니다. 파이썬을 공부한 독자라면 리스트같은 자료형에 저장된 데이터의 순서를 인덱스라고 알고 있을 것입니다. 하지만 판다스에서는 이런 개념을 행 번호라고 부릅니다. 다음예제를 실습하면서 판다스에서 말하는 인덱스와 행 번호가 무엇인지 알아보겠습니다.
# ## 인덱스와 행 번호 개념 알아보기
# 다음은 갭마인더 데이터 집합을 불러온 다음 head메서드를 실행한 결과입니다.
print(df.head())
# 왼쪽에 번호가 보이나요? 바로 이것이 인덱스입니다. 인덱스는 보통 0부터 시작하지만 행 데이터가 추가, 삭제되면 언제든지 변할 수 있으며 숫자가 아니라 문자열을 사용할 수도 있습니다. 즉, 인덱스는 first, second,third와 같은 문자열로 지정할 수도 있습니다. 반면에 행 번호는 데이터의 순서를 따라가기 때문에 정수만으로 데이터를 조회하거나 추출할 수 있으며 실제 데이터프레임에서는 확인할 수 없는 값입니다.
print(df.loc[0])
print(df.loc[99])
# 2. 만약 데이터프레임의 마지막 행 데이터를 추출하려면 어떻게 해야 할까요? 마지막 행데이터의 인덱스를 알아내야 합니다. shape[0]에 행 크기(1704)가 저장되어 있다는 점을 이용하여 마지막 행의 인덱스를 구하면 됩니다. 다음은 shape[0]에서 1을 뺀 값으로(1704-1=1703)마지막 행 데이터를 추출한 것입니다.
number_of_rows=df.shape[0]
last_row_index=number_of_rows -1
print(df.loc[last_row_index])
# 3. 데이터프레임의 마지막 행 데이터를 추출하는 또 다른 방법으로는 tail메서드를 사용하는 방법이 있습니다. 다음과 같이 tail 메서드의 인자 n에 1을 전달하면 마지막 행의 데이터를 출출할 수 있습니다. 이방법이 조금 더 유용하겠죠?
print(df.tail(n=1))
# 4. 만약 인덱스가 0,99,999인 데이터를 한 번에 추출하려면 리스트에 원하는 인덱스를 담아 loc 속성에 전달하면 됩니다.
print(df.loc[[0,99,999]])
# ### tail메서드와 loc 속성이 반환하는 자료형은 서로 달라요!
# tail 메서드와 loc 속성이 반환하는 데이터의 자료형은 다릅니다. 다음은 tail 메서드와 lic속성으로 추출한 데이터의 자료형을 type메서드로 확인한 것입니다. loc속성이 반환한 데이터 자료형은 시리즈이고 tail 메서드가 반환한 데이터 자료형은 데이터프레임입니다.
# +
subset_loc=df.loc[0]
subset_tail=df.tail(n=1)
print(type(subset_loc))
print(type(subset_tail))
# -
# ### iloc 속성으로 행 데이터 추출하기
# 1. 이번에는 iloc속성으로 행 데이터를 추출하는 방법에 대해 알아보겠습니다. loc속성은 데이터프레임의 인덱스를 사용하여 데이터를 추출했지만 iloc 속성은 데이터 순서를 의미하는 행 번호를 사용하여 데이터를 추출합니다.지금은 인덱스와 행 번호가 동일하여 동일한 결괏값이 출력됩니다. 다음은 iloc속성에 1을 전달하여 데이터를 추출한 것입니다.
print(df.iloc[1])
print(df.iloc[99])
# 2. iloc 속성은 음수를 사용해도 데이터를 추출할 수 있습니다. 다음은 -1을 전달하여 마지막 행 데이터를 추출한 것입니다. 하지만 데이터프레임에 아예 존재하지 않는 행 번호를 전달하면 오류가 발생합니다.
print(df.iloc[-1])
# 3. iloc 속성도 여러 데이터를 한 번에 추출할 수 있습니다. loc 속성을 사용했던 것처럼 원하는 데이터의 행 번호를 리스트에 담아 전달하면 됩니다.
print(df.iloc[[0,99,999]])
# ## loc, iloc 속성 자유자재로 사용하기
# loc, iloc속성을 좀더 자유자재로 사용하려면 추출할 데이터의 행과 열을 지정하는 방법을 알아야 합니다. 두속성 모두 추출할 데이터의 행을 먼저 지정하고 그런 다음 열을 지정하는 방법으로 데이터를 추출합니다. 즉 df.loc[[행],[열]]이나 df.iloc[[행],[열]]과 같은 방법으로 코드를 작성하면 됩니다. <br> 이때 행과 열을 지정하는 방법은 슬라이싱 구문을 사용하는 방법과 range 메서드를 사용하는 방법이 있습니다. 먼저 슬라이싱 구문으로 원하는 데이터를 추출하는 방법을 알아보겠습니다.
# ### 데이터 추출하기--슬라이싱 구문, range메서드
# #### 1.슬라이싱 구문으로 데이터 추출하기
# 다음은 모든 행(:)의 데이터에 대해 tear,pop열을 추출하는 방법입니다. 이때 loc와 iloc속성에 전달하는 열 지정값은 반드시 형식에 맞게 전달해야 합니다. 예를 들어 loc 속성의 열 지정값에 정수 리스트를 전달하면 오류가 발생합니다.
subset=df.loc[:,['year','pop']]
print(subset.head())
subset=df.iloc[:,[2,4,-1]]
print(subset.head())
# #### 2. range메서드로 데이터 추출하기
# 이번에는 iloc 속성과 파이썬 내장 메서드인 range를 응용하는 방법을 알아보겠습니다. range 메서드는 지정한 구간의 정수 리스트를 반환해 줍니다. iloc속성의 열 지정값에는 정수 리스트를 전달해야 한다는 점과 range메서드의 반환값이 정수 리스트인 점을 이용하여 원하는 데이터를 추출하는 것이죠<br> 그런데 range 메서드는 조금 더 정확하게 말하면 지정한 범위의 정수 리스트를 반환하는 것이 아니라 제네레이터를 반환합니다. iloc속성은 제네레이터로 데이터 추출을 할 수 없죠. 다행이 제네레이터는 간단하게 리스트로 변환할 수 있습니다. 다음은 range(5)가 반환한 제네레이터를 정숫값을 가진 리스트 [0,1,2,3,4]로 변환하여 iloc의 열 지정값에 전달한 것입니다. 자주 사용하는 방법은 아니지만 알아두면 유용할 것입니다.|
small_range=list(range(5))
print(small_range)
print(type(small_range))
subset=df.iloc[:,small_range]
print(subset.head())
small_range=list(range(3,6))
print(small_range)
subset=df.iloc[:,small_range]
print(subset.head())
# #### 3. range 메서드에 대해 조금 더 알아볼까요? range 메서드에 range(0,6,2)와 같은 방법으로 3개의 인자를 전달하면 어떻게 될까요? 0부터 5까지 2만큼 건너뛰는 제네레이터를 생성합니다. 이 네네레이터를 리스트로 변환하면 번위는 0~5이고 짝수로 된 정수 리스트를 얻을 수 있죠.
small_range=list(range(0,6,2))
subset=df.iloc[:,small_range]
print(subset.head())
# #### 4.슬라이싱 구문과 range 메서드 비교하기
# 그런데 실무에서는 range 메서드보다는 간편하게 사용할 수 있는 파이썬 슬라이싱 구문을 더 선호합니다. range메서드가 반환한 제네레이터를 리스트로 변환하는 등의 과정을 거치지 않아도 되기 때문이죠. 예를 들어 list(range(3))과 [:3]의 결괏값은 동일합니다.
subset=df.iloc[:,:3]
print(subset.head())
# #### 5. 0:6:2를 열징정값에 전달하면 과정 3에서 얻은 결괏값과 동일한 결괏값을 얻을수 있습니다. range메서드와 슬라이싱 구문을 비교해 보세요.
subset=df.iloc[:,0:6:2]
print(subset.head())
# #### 6. loc,iloc 속성 자유자재로 사용하기
# 만약 iloc 속성으로 0,99,999번째 행의 0,3,5번째 열 데이터를 추출하려면 다음과 같이 코드를 작성하면 됩니다.
print(df.iloc[[0,99,999],[0,3,5]])
# #### 7. iloc 속성의 열 지정값으로 정수 리스트를 전달하는 것이 간편해 보일 수 있지만 이렇게 작성한 코드는 나중에 어떤 데이터를 추출하기 위한 코드인지 파악하지 못 할 수도 있습니다. 그래서 보통은 다음과 같은 방법으로 loc 속성을 이용하여 열 지정값으로 열 이름을 전달합니다.
print(df.loc[[0,99,999],['country','lifeExp','gdpPercap']])
# #### 8. 앞에서 배운 내용을 모두 응용하여 데이터를 추출해 볼까요? 다음은 인덱스가 10인 행부터 13인 행의 country,lifeExp,gdpPercap열 데이터를 추출하는 코드입니다.
print(df.loc[10:13,['country','lifeExp','gdpPercap']])
# # 02-3 기초적인 통계 계산하기
# 지금까지는 데이터를 추출하는 방법에 대해 알아보았습니다. 이번에는 추출한 데이터를 가지고 몇 가지 기초적인 통계 계산을 해보겠습니다. 다음은 갭마인더 데이터 집합에서 0~9번째 데이터를 추출하여 출력한 것입니다.
print(df.head(n=10))
# ### 그룹화한 데이터의 평균 구하기
# #### 1. lifeExp열을 연도별로 그룹화하여 평균 계산하기
# 예를 들어 연도별 lifeExp 열의 평균을 계산하려면 어떻게 해야 할까요? 데이터를 year열로 그룹화하고 lifeExp 열의 평균을 구하면 됩니다. 다음은 데이터프레임의 groupby 메서드에 year 열을 전달하여 연도별로 그룹화한 다음 lifeExp 열을 지정하여 mean 메서드로 평균을 구한 것입니다.
print(df.groupby('year')['lifeExp'].mean())
# #### 2. 과정 1에서 작성한 코드가 조금 복잡해서 어리둥절할 수도 있을 것입니다. 어떤 일이 벌어진 것일까요? 과정 1에서 작성한 코드를 작은 단위로 나누어 살펴보겠습니다. 먼저 데이터프레임을 연도별로 그룹화한 결과를 살펴보겠습니다. groupby 메서드에 year열 이름을 전달하면 연도별로 그룹화한 country, continent,.....gdpPercap 열을 모은 데이터프레임을 얻을 수 있습니다.
grouped_year_df=df.groupby('year')
print(type(grouped_year_df))
# #### 3. groupde_year_df를 출력하면 과정 2에서 얻은 데이터프레임이 저장된 메모리의 위치를 알수 있습니다. 이결과를 통해 연도별로 그룹화한 데이터는 데이터프레임 형태로 현재 메모리의 0x7fa9f012e700이라는 위치에 저장되어 있음을 알 수 있습니다.
print(grouped_year_df)
# #### 4. 이어서 lifeExp 열을 추출한 결과를 살펴보겠습니다. 그룹화한 데이터프레임에서 lifeExp 열을 추출하면 그룹화한 시리즈를 얻을 수 있습니다. 즉, 연도별로 그룹화한 lifeExp 열을 얻을 수 있습니다.
grouped_year_df_lifeExp=grouped_year_df['lifeExp']
print(type(grouped_year_df_lifeExp))
# #### 5. 마지막으로 평군을 구하는 mean 메서드를 사용한 결과를 살펴보겠습니다. 과정 4에서 연도별로 그룹화한 lifeExp에 mean 메서드를 사용했기 때문에 각 연도별 lifeExp 열의 평균값을 얻을 수 있습니다.
mean_lifeExp_by_year=grouped_year_df_lifeExp.mean()
print(mean_lifeExp_by_year)
# #### 6. lifeExpm gdpPercap 열의 평균값을 연도, 지역별로 그룹화하여 한 번에 계산하기.
# 다음은 과정 1~4를 응용한 코드입니다. year, continent 열로 그룹화한 그룹 데이터프레임에서 lifeExp, gdpPercap 열만 추출하여 평균값을 구한 것입니다.
multi_group_var=df.groupby(['year','continent'])[['lifeExp','gdpPercap']].mean()
print(multi_group_var)
print(type(multi_group_var))
# #### 7. 그룹화한 데이터 개수 세기
# 이번에는 그룹화한 데이터의 개수가 몇 개인지 알아보겠습니다. 이를 통계에서는 '빈도수'라고 부릅니다. 데이터의 빈도수는 nunique 메서드를 사용하면 쉽게 구할 수 있습니다. 다음은 continent를 기준으로 데이터프레임을 만들고 country 열만 추출하여 데이터의 빈도수를 계산할 것입니다.
print(df.groupby('continent')['country'].nunique())
# # 02-4 그래프 그리기
# 그래프와 같은 데이터의 시각화는 데이터 분석 과정에서 가장 중요한 요소입니다. 데이터를 시각화하면 데이터를 이해하거나 추이를 파악하는 등의 작업을 할 때 많은 도움이 됩니다. 여기에서는 간단한 그래프를 그려보고 데이터 시각화가 무엇인지 알아보겠습니다. 자세한 내용은 04장에서 더 자세히 설명하겠습니다.
# ### 그래프 그리기
# #### 1.먼저 그래프와 연관된 라이브러리를 불러옵니다.
# %matplotlib inline
import matplotlib.pyplot as plt
# #### 2. 그런 다음 year 열을 기준으로 그릅화한 데이터프레임에서 lifeExp 열만 추출하여 평균 값을 구합니다.
global_yearly_life_expectancy=df.groupby('year')['lifeExp'].mean()
print(global_yearly_life_expectancy)
# #### 3. 과정 2에서 구한 값에 plot메서드를 사용하면 다음과 같은 그래프가 그려집니다.
global_yearly_life_expectancy.plot()
# ### 마무리하며
# 이 장에서는 데이터 집합을 불러오는 방법과 데이터를 추출하는 방법 등을 알아보았습니다. 판다스가 무엇인지 감이 좀 잡혔나요? 다음 장에서는 판다스의 기본 자료형인 데이터프레임과 시리즈를 좀 더 자세히 알아보겠습니다.
# 출처 : "판다스"
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: behavenet
# language: python
# name: behavenet
# ---
# ## Analyze a PS-VAE model
# Because the PS-VAEs currently require significant computation time (generally ~5 hours on a GPU) the data downloaded in the previous notebook also contains already trained PS-VAEs, which we will analyze here.
#
# There are a variety of files that are automatically saved during the fitting of a PS-VAE, which can be used for later analyses such as those below. Some of these files (many of which are common to all BehaveNet models, not just the PS-VAE):
# * `best_val_model.pt`: the best PS-VAE (not necessarily from the final training epoch) as determined by computing the loss on validation data
# * `meta_tags.csv`: hyperparameters associated with data, computational resources, and model
# * `metrics.csv`: metrics computed on dataset as a function of epochs; the default is that metrics are computed on training and validation data every epoch (and reported as a mean over all batches) while metrics are computed on test data only at the end of training using the best model (and reported per batch).
# * `[lab_id]_[expt_id]_[animal_id]_[session_id]_latents.pkl`: list of np.ndarrays of PS-VAE latents (both supervised and unsupervised) computed using the best model
# * `session_info.csv`: sessions used to fit the model
#
# To fit your own PS-VAEs, see additional documentation [here](https://behavenet.readthedocs.io/en/latest/source/user_guide.html).
#
# <br>
#
# ### Contents
# * [Plot validation losses as a function of epochs](#Plot-losses-as-a-function-of-epochs)
# * [Plot label reconstructions](#Plot-label-reconstructions)
# * [Plot latent traversals](#Plot-latent-traversals)
# * [Make latent traversal movie](#Make-latent-traversal-movie)
# * [Make frame reconstruction movie](#Make-reconstruction-movies)
# ## imports
# +
import os
from behavenet import get_user_dir
from behavenet.plotting.cond_ae_utils import plot_psvae_training_curves
from behavenet.plotting.cond_ae_utils import plot_label_reconstructions
from behavenet.plotting.cond_ae_utils import plot_latent_traversals
from behavenet.plotting.cond_ae_utils import make_latent_traversal_movie
# ONLY NEED TO CHANGE THIS LINE
# 'head-fixed': IBL data
# 'mouse-face': dipoppa data
# 'two-view': musall data
# 'freely-moving': rodriguez data
dataset = 'head-fixed'
save_outputs = True # true to save figures/movies to user's figure directory
file_ext = 'pdf' # figure format ('png' | 'jpeg' | 'pdf'); movies saved as mp4
# -
# ### define dataset parameters
# +
# parameters common to all datasets
# number of unsupervised latents
n_latents = 2
# some models trained with 50% of training data to speed up fitting
train_frac = 1 if dataset == 'freely-moving' else 0.5
# test-tube experiment name
experiment_name = 'demo-run'
# set dataset-specific parameters
if dataset == 'head-fixed':
lab = 'ibl'
expt = 'angelakilab'
animal = 'IBL-T4'
session = '2019-04-23-001'
n_labels = 4
label_names = ['L paw (x)', 'R paw (x)', 'L paw (y)', 'R paw (y)']
# define "best" model
best_alpha = 1000
best_beta = 5
best_gamma = 500
best_rng = 0
# label reconstructions
label_recon_trials= [229, 289, 419] # good validation trials; also used for frame recon
xtick_locs= [0, 30, 60, 90]
frame_rate= 60
scale= 0.4
# latent traversal params
label_min_p = 35 # lower bound of label traversals
label_max_p = 85 # upper bound of label traversals
ch = 0 # video channel to display
n_frames_zs = 4 # n frames for supervised static traversals
n_frames_zu = 4 # n frames for unsupervised static traversals
label_idxs = [1, 0] # horizontally move left/right paws
crop_type = None # no image cropping
crop_kwargs = None # no image cropping
# select base frames for traversals
trial_idxs = [11, 4, 0, None, None, None, None] # trial index wrt to all test trials
trials = [None, None, None, 169, 129, 429, 339] # trial index wrt to *all* trials
batch_idxs = [99, 99, 99, 16, 46, 11, 79] # batch index within trial
n_cols = 3 # width of traversal movie
text_color = [1, 1, 1] # text color for labels
elif dataset == 'mouse-face':
lab = 'dipoppa'
expt = 'pupil'
animal = 'MD0ST5'
session = 'session-3'
n_labels = 3
label_names = ['Pupil area', 'Pupil (y)', 'Pupil (x)']
# define "best" model
best_alpha = 1000
best_beta = 20
best_gamma = 1000
best_rng = 0
# label reconstructions
label_recon_trials= [43, 83, 73] # good validation trials; also used for frame recon
xtick_locs= [0, 30, 60, 90, 120, 150]
frame_rate= 30
scale= 0.45
# latent traversal params
label_min_p = 5 # lower bound of label traversals
label_max_p = 95 # upper bound of label traversals
ch = 0 # video channel to display
n_frames_zs = 4 # n frames for supervised static traversals
n_frames_zu = 4 # n frames for unsupervised static traversals
label_idxs = [1, 2] # pupil location
crop_type = 'fixed' # crop around eye
crop_kwargs = {'y_0': 48, 'y_ext': 48, 'x_0': 192, 'x_ext': 64}
# select base frames for traversals
trial_idxs = [11, None, 21] # trial index wrt to all test trials
trials = [None, 393, None] # trial index wrt to *all* trials
batch_idxs = [60, 27, 99] # batch index within trial
n_cols = 3 # width of traversal movie
text_color = [0, 0, 0] # text color for labels
elif dataset == 'two-view':
lab = 'musall'
expt = 'vistrained'
animal = 'mSM36'
session = '05-Dec-2017-wpaw'
n_labels = 5
label_names = ['Levers', 'L Spout', 'R Spout', 'R paw (x)', 'R paw (y)']
# define "best" model
best_alpha = 1000
best_beta = 1
best_gamma = 1000
best_rng = 1
# label reconstructions
label_recon_trials= [9, 19, 29] # good validation trials; also used for frame recon
xtick_locs= [0, 60, 120, 180]
frame_rate= 30
scale= 0.25
# latent traversal params
label_min_p = 5 # lower bound of label traversals
label_max_p = 95 # upper bound of label traversals
ch = 1 # video channel to display
n_frames_zs = 3 # n frames for supervised static traversals
n_frames_zu = 3 # n frames for unsupervised static traversals
label_idxs = [3, 4] # move right paw
crop_type = None # no image cropping
crop_kwargs = None # no image cropping
# select base frames for traversals
trial_idxs = [11, 11, 11, 5] # trial index wrt to all test trials
trials = [None, None, None, None] # trial index wrt to *all* trials
batch_idxs = [99, 0, 50, 180] # batch index within trial
n_cols = 2 # width of traversal movie
text_color = [1, 1, 1] # text color for labels
elif dataset == 'freely-moving':
lab = 'rodriguez'
expt = 'open-field'
animal = 'B125'
session = 'session-1'
n_labels = 8
label_names = [
'L ear (x)', 'R ear (x)', 'Back (x)', 'Tail base (x)', 'Nose (x)',
'R ear (y)', 'Back (y)', 'Nose (y)']
# define "best" model
best_alpha = 100
best_beta = 5
best_gamma = 1000
best_rng = 0
# label reconstructions
label_recon_trials = [9, 19, 29] # good validation trials; also used for frame recon
xtick_locs = [0, 30, 60, 90, 120]
frame_rate = 30
scale = 0.4
# latent traversal params
label_min_p = 35 # lower bound of label traversals
label_max_p = 85 # upper bound of label traversals
ch = 0 # video channel to display
n_frames_zs = 4 # n frames for supervised static traversals
n_frames_zu = 4 # n frames for unsupervised static traversals
label_idxs = [2, 4] # move back, nose
crop_type = None # no image cropping
crop_kwargs = None # no image cropping
# select base frames for traversals
trial_idxs = [None, None, None, None] # trial index wrt to all test trials
trials = [106, 60, 98, 32] # trial index wrt to *all* trials
batch_idxs = [38, 24, 29, 123] # batch index within trial
n_cols = 5 # width of traversal movie
text_color = [0, 0, 0] # text color for labels
else:
raise ValueError(
'Invalid dataset; must choose "head-fixed", "mouse-face", "two-view", or "freely-moving"')
# -
# ## Plot losses as a function of epochs
# The PS-VAE loss function contains many individual terms; this function plots each term separately (as well as the overall loss) to better understand model performance. Note that this function can also be used to plot training curves for multiple models simultaneously; see function documentation.
#
# Panel info (see paper for mathematical descriptions):
# * loss=loss: total PS-VAE loss
# * loss=loss_data_mse: mean square error on frames (actual loss function uses log-likelihood, a scaled version of the MSE)
# * loss=label_r2: $R^2$ (per trial) of the label reconstructions (actual loss function uses log-likelihood)
# * loss=loss_zs_kl: Kullback-Leibler (KL) divergence of supervised latents
# * loss=loss_zu_mi: index-code mutual information of unuspervised latents
# * loss=loss_zu_tc: total correlation of unuspervised latents
# * loss=loss_zu_dwkl: dimension-wise KL of unuspervised latents
# * loss=loss_AB_orth: orthogonality between supervised/unsupervised subspaces
#
# [Back to contents](#Contents)
# +
save_file = os.path.join(
get_user_dir('fig'), lab, expt, animal, session, 'ps-vae', 'training_curves')
save_file_new = save_file + '_alpha={}_beta={}_gamma={}_rng={}_latents={}'.format(
best_alpha, best_beta, best_gamma, best_rng, n_latents)
plot_psvae_training_curves(
lab=lab, expt=expt, animal=animal, session=session, alphas=[best_alpha],
betas=[best_beta], n_ae_latents=[n_latents],
rng_seeds_model=[best_rng], experiment_name=experiment_name,
n_labels=n_labels, train_frac=train_frac,
save_file=save_file_new, format=file_ext)
# -
# ## Plot label reconstructions
# Plot the original labels and their reconstructions from the supervised subspace of the PS-VAE.
#
# [Back to contents](#Contents)
# +
save_file = os.path.join(
get_user_dir('fig'), lab, expt, animal, session, 'ps-vae', 'label_recon')
plot_label_reconstructions(
lab=lab, expt=expt, animal=animal, session=session, n_ae_latents=n_latents,
experiment_name=experiment_name,
n_labels=n_labels, trials=label_recon_trials, version=None,
alpha=best_alpha, beta=best_beta, rng_seed_model=best_rng,
train_frac=train_frac, add_r2=False, save_file=save_file, format=file_ext)
# -
# ## Plot latent traversals
# Latent traversals provide a qualitative way to assess the quality of the learned PS-VAE representation. We generate these traversals by changing the latent representation one dimension at a time and visually compare the outputs. If the representation is sufficiently interpretable we should be able to easily assign semantic meaning to each latent dimension.
#
# [Back to contents](#Contents)
# +
n_latents = 2
# for trial, trial_idx, batch_idx in zip(trials, trial_idxs, batch_idxs):
# just plot traversals for single base frame
trial = trials[0]
trial_idx = trial_idxs[0]
batch_idx = batch_idxs[0]
if trial is not None:
trial_str = 'trial-%i-%i' % (trial, batch_idx)
else:
trial_str = 'trial-idx-%i-%i' % (trial_idx, batch_idx)
save_file = os.path.join(
get_user_dir('fig'), lab, expt, animal, session, 'ps-vae',
'traversals_alpha={}_beta={}_gamma={}_rng={}_latents={}_{}'.format(
best_alpha, best_beta, best_gamma, best_rng, n_latents, trial_str))
plot_latent_traversals(
lab=lab, expt=expt, animal=animal, session=session, model_class='ps-vae',
alpha=best_alpha, beta=best_beta, n_ae_latents=2,
rng_seed_model=best_rng, experiment_name=experiment_name,
n_labels=n_labels, label_idxs=label_idxs,
label_min_p=label_min_p, label_max_p=label_max_p, channel=ch,
n_frames_zs=n_frames_zs, n_frames_zu=n_frames_zu, trial_idx=trial_idx,
trial=trial, batch_idx=batch_idx, crop_type=crop_type, crop_kwargs=crop_kwargs,
train_frac=train_frac, save_file=save_file, format='png')
# -
# ## Make latent traversal movie
# A dynamic version of the traversals above; these typically provide a richer look at the traversal results.
#
# [Back to contents](#Contents)
# +
n_frames = 10 # number of sample frames per dimension
model_class = 'ps-vae' # 'sss-vae' | 'vae'
# NOTE: below I hand label each dimension; semantic labels for unsupervised dims are chosen
# by looking at the latent traversals above, and are indicated with quotes to distinguish
# them from the supervised dims
if dataset == 'head-fixed':
if model_class == 'ps-vae':
panel_titles = label_names + ['"Jaw"', '"L paw config"']
order_idxs = [0, 1, 4, 2, 3, 5] # reorder nicely
elif model_class == 'vae':
panel_titles = ['Latent %i' % i for i in range(n_labels + n_latents)]
order_idxs = [i for i in range(n_labels + n_latents)]
else:
raise NotImplementedError
elif dataset == 'mouse-face':
if model_class == 'ps-vae':
panel_titles = label_names + ['"Whisker pad"', '"Eyelid"']
order_idxs = [2, 1, 0, 3, 4] # reorder nicely
elif model_class == 'vae':
panel_titles = ['Latent %i' % i for i in range(n_labels + n_latents)]
order_idxs = [i for i in range(n_labels + n_latents)]
else:
raise NotImplementedError
elif dataset == 'two-view':
if model_class == 'ps-vae':
panel_titles = label_names + ['"Chest"', '"Jaw"']
order_idxs = [1, 2, 3, 4, 0, 5, 6] # reorder nicely
elif model_class == 'vae':
panel_titles = ['Latent %i' % i for i in range(n_labels + n_latents)]
order_idxs = [i for i in range(n_labels + n_latents)]
else:
raise NotImplementedError
elif dataset == 'freely-moving':
if model_class == 'ps-vae':
panel_titles = label_names + ['"Body posture 0"', '"Body posture 1"']
order_idxs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
elif model_class == 'vae':
panel_titles = ['Latent %i' % i for i in range(n_labels + n_latents)]
order_idxs = [i for i in range(n_labels + n_latents)]
else:
raise NotImplementedError
else:
raise NotImplementedError
save_file = os.path.join(
get_user_dir('fig'), lab, expt, animal, session, model_class,
'traversals_alpha={}_beta={}_gamma={}_rng={}_latents={}'.format(
best_alpha, best_beta, best_gamma, best_rng, n_latents))
make_latent_traversal_movie(
lab=lab, expt=expt, animal=animal, session=session, model_class=model_class,
alpha=best_alpha, beta=best_beta, n_ae_latents=n_latents,
rng_seed_model=best_rng, experiment_name=experiment_name,
n_labels=n_labels, trial_idxs=trial_idxs, batch_idxs=batch_idxs, trials=trials,
panel_titles=panel_titles, label_min_p=label_min_p,
label_max_p=label_max_p, channel=ch, n_frames=n_frames, crop_kwargs=crop_kwargs,
n_cols=n_cols, movie_kwargs={'text_color': text_color}, order_idxs=order_idxs,
train_frac=train_frac, save_file=save_file)
# -
# ## Make reconstruction movies
# Compare original frames to VAE and PS-VAE reconstructions.
#
# [Back to contents](#Contents)
# #### helper function
# +
import numpy as np
from behavenet.plotting.ae_utils import make_reconstruction_movie
from behavenet.plotting.cond_ae_utils import get_model_input
from behavenet.fitting.eval import get_reconstruction
from behavenet.fitting.utils import get_best_model_and_data, get_lab_example
from behavenet.plotting import concat, save_movie
def make_reconstruction_movie_wrapper(
hparams, save_file, model_info, trial_idxs=None, trials=None, sess_idx=0,
max_frames=400, frame_rate=15, layout_pattern=None):
"""Produce movie with original video and reconstructed videos.
This is a high-level function that loads the model described in the hparams dictionary
and produces the necessary predicted video frames.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
save_file : :obj:`str`
full save file (path and filename)
model_info : :obj:`list`
each entry is a dict that contains model-specific parameters; must include
'title', 'model_class'
trial_idxs : :obj:`list`, optional
list of test trials to construct videos from; each element is index into
test trials only; one of `trial_idxs` or `trials` must be
specified; `trials` takes precedence over `trial_idxs`
trials : :obj:`list`, optional
list of test trials to construct videos from; each element is index into all
possible trials (train, val, test); one of `trials` or `trial_idxs` must be
specified; `trials` takes precedence over `trial_idxs`
sess_idx : :obj:`int`, optional
session index into data generator
max_frames : :obj:`int`, optional
maximum number of frames to animate from a trial
frame_rate : :obj:`float`, optional
frame rate of saved movie
layout_pattern : :obj:`array-like`, optional
boolean entries specify which panels are used to display frames
"""
n_labels = hparams['n_labels']
n_latents = hparams['n_ae_latents']
expt_name = hparams['experiment_name']
# set up models to fit
titles = ['Original']
for model in model_info:
titles.append(model['title'])
# insert original video at front
model_info.insert(0, {'model_class': None})
ims_recon = [[] for _ in titles]
latents = [[] for _ in titles]
if trial_idxs is None:
trial_idxs = [None] * len(trials)
if trials is None:
trials = [None] * len(trial_idxs)
for i, model in enumerate(model_info):
if i == 0:
continue
# further specify model
version = model.get('version', 'best')
hparams['experiment_name'] = model.get('experiment_name', expt_name)
hparams['model_class'] = model['model_class']
model_ae, data_generator = get_best_model_and_data(hparams, None, version=version)
# get images
for trial_idx, trial in zip(trial_idxs, trials):
# get model inputs
ims_orig_pt, ims_orig_np, _, labels_pt, _, labels_2d_pt, _ = get_model_input(
data_generator, hparams, model_ae, trial_idx=trial_idx, trial=trial,
sess_idx=sess_idx, max_frames=max_frames, compute_latents=False,
compute_2d_labels=False)
# get model outputs
ims_recon_tmp, latents_tmp = get_reconstruction(
model_ae, ims_orig_pt, labels=labels_pt, labels_2d=labels_2d_pt,
return_latents=True)
ims_recon[i].append(ims_recon_tmp)
latents[i].append(latents_tmp)
# add a couple black frames to separate trials
final_trial = True
if (trial_idx is not None and (trial_idx != trial_idxs[-1])) or \
(trial is not None and (trial != trials[-1])):
final_trial = False
n_buffer = 5
if not final_trial:
_, n, y_p, x_p = ims_recon[i][-1].shape
ims_recon[i].append(np.zeros((n_buffer, n, y_p, x_p)))
latents[i].append(np.nan * np.zeros((n_buffer, n_latents)))
if i == 1: # deal with original frames only once
ims_recon[0].append(ims_orig_np)
latents[0].append([])
# add a couple black frames to separate trials
if not final_trial:
_, n, y_p, x_p = ims_recon[0][-1].shape
ims_recon[0].append(np.zeros((n_buffer, n, y_p, x_p)))
for i, (ims, zs) in enumerate(zip(ims_recon, latents)):
ims_recon[i] = np.concatenate(ims, axis=0)
latents[i] = np.concatenate(zs, axis=0)
if layout_pattern is None:
if len(titles) < 4:
n_rows, n_cols = 1, len(titles)
elif len(titles) == 4:
n_rows, n_cols = 2, 2
elif len(titles) > 4:
n_rows, n_cols = 2, 3
else:
raise ValueError('too many models')
else:
assert np.sum(layout_pattern) == len(ims_recon)
n_rows, n_cols = layout_pattern.shape
count = 0
for pos_r in layout_pattern:
for pos_c in pos_r:
if not pos_c:
ims_recon.insert(count, [])
titles.insert(count, [])
count += 1
make_reconstruction_movie(
ims=ims_recon, titles=titles, n_rows=n_rows, n_cols=n_cols,
save_file=save_file, frame_rate=frame_rate)
# +
# set model info
hparams = {
'data_dir': get_user_dir('data'),
'save_dir': get_user_dir('save'),
'n_labels': n_labels,
'n_ae_latents': n_latents + n_labels,
'experiment_name': None,
'model_type': 'conv',
'conditional_encoder': False,
}
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
# compare vae/ps-vae reconstructions
model_info = [
{
'model_class': 'ps-vae',
'experiment_name': 'demo-run',
'title': 'PS-VAE (%i latents)' % (n_latents + n_labels),
'version': 0},
{
'model_class': 'vae',
'experiment_name': 'demo-run',
'title': 'VAE (%i latents)' % (n_latents + n_labels),
'version': 0},
]
save_file = os.path.join(
get_user_dir('fig'), lab, expt, animal, session, model_class,
'reconstructions_alpha={}_beta={}_gamma={}_rng={}_latents={}'.format(
best_alpha, best_beta, best_gamma, best_rng, n_latents))
make_reconstruction_movie_wrapper(
hparams, save_file=save_file, trial_idxs=None, trials=label_recon_trials,
model_info=model_info, frame_rate=15)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] hide_input=true
# # Complex query
#
# ## Query the database using a provenance relationship with the QueryBuilder
#
# Time: 3 mins
#
# ##### In this example, we query the calculations in our database that are part of specific groups, and analyze the output. We want to get the magnetization of each structure that we computed. We are also interested in the smearing contribution to the total energy as an indicator of the existence and magnitude of the bandgap.
# -
# <div class="alert alert-box alert-info">
# To run this example you need to have imported first the sample database provided with the demos.
# Make sure to have done it otherwise you will get zero results.
# </div>
# + hide_input=true
import sys, numpy as np
from argparse import ArgumentParser
from matplotlib import gridspec, pyplot as plt
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
from aiida.orm import CalculationFactory, QueryBuilder, load_node
from aiida.orm.data.structure import StructureData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.group import Group
from notebook_helpers import generate_query_graph
PwCalculation = CalculationFactory('quantumespresso.pw')
# + hide_input=false
# Each group of calculations that are of interests has the string: "tutorial_"+pseudo,
# where pseudo is lda, pbe or pbesol
group_basename = 'tutorial_%'
# -
# #### Start building the query
# Instantiate QB:
qb = QueryBuilder()
# Append the Group to the entities returned, with a filter on the name:
qb.append(Group, filters={'name':{'like':group_basename}}, project='name', tag='group')
# #### Visualize the query so far
# + hide_input=true
from IPython.display import Image
# -
generate_query_graph(qb.get_json_compatible_queryhelp(), 'query1.png')
Image(filename='query1.png')
# #### Append the calculations that are members of each group
# I want every PwCalculation that is a member of the specified groups:
qb.append(PwCalculation, tag='calculation', member_of='group')
# #### Visualize the current status of the query
generate_query_graph(qb.get_json_compatible_queryhelp(), 'query2.png')
Image(filename='query2.png')
# #### Append the structures that are input of the calculation. Project the id of the structure and the formula, stored in the extras under the key 'formula'.
# The first time you will run this, the extras.formula is not set, so it will return all `None`. Later we'll see how to amend this.
qb.append(StructureData, project=['id', 'extras.formula'], tag='structure', input_of='calculation')
# #### Visualize the current status of the query
generate_query_graph(qb.get_json_compatible_queryhelp(), 'query3.png')
Image(filename='query3.png')
# Append the parameters that are an output of the calculation.
#
# Project:
# * The smearing contribution and the units
# * The magnetization and the untits.
qb.append(ParameterData,tag='results',
project=['attributes.energy_smearing', 'attributes.energy_smearing_units',
'attributes.total_magnetization', 'attributes.total_magnetization_units',
], output_of='calculation'
)
# #### Visualize the final query
generate_query_graph(qb.get_json_compatible_queryhelp(), 'query4.png')
Image(filename='query4.png')
# #### Print the query results
results = qb.all()
for item in results:
print ', '.join(map(str, item))
# The first time you run this query, the third column (`extras.formula` of the Structure) will be `None`, because the extras are not set. For those we now add the extras, and re-run the query.
missing_formulas_pk = set([res[1] for res in results if res[2] is None])
print "{} structures still do not have an extra.formulas set.".format(len(missing_formulas_pk))
if missing_formulas_pk:
print "We will set this extra now."
for structure_pk in missing_formulas_pk:
structure = load_node(structure_pk)
formula = structure.get_formula()
structure.set_extra('formula', formula)
print "Extra added to {} structures.".format(len(missing_formulas_pk))
# We now run again the query to make sure to get also the formula.
#
# Note that to run the query again, we have to create it again (we already run it, so we cannot just modify it).
# The line below replaces qb with a new query, with the same appended filters and projections.
qb = QueryBuilder(**qb.get_json_compatible_queryhelp())
results = qb.all()
for item in results:
print ', '.join(map(str, item))
# #### Plot the results
# Getting a long list is not always helpful. We prepared a function that visualizes in a nice, graphical format the results of the query.
#
# Don't get scared, most of the code below is to get a nice appearance in matplotlib - you already got the results in the point above!
# + hide_input=true
def plot_results(query_res):
"""
:param query_res: The result of an instance of the QueryBuilder
"""
smearing_unit_set,magnetization_unit_set,pseudo_family_set = set(), set(), set()
# Storing results:
results_dict = {}
for pseudo_family, structure_pk, formula, smearing, smearing_units, mag, mag_units in query_res:
if formula not in results_dict:
results_dict[formula] = {}
# Storing the results:
results_dict[formula][pseudo_family] = (smearing, mag)
# Adding to the unit set:
smearing_unit_set.add(smearing_units)
magnetization_unit_set.add(mag_units)
pseudo_family_set.add(pseudo_family)
# Sorting by formula:
sorted_results = sorted(results_dict.items())
formula_list = zip(*sorted_results)[0]
nr_of_results = len(formula_list)
# Checks that I have not more than 3 pseudo families.
# If more are needed, define more colors
#pseudo_list = list(pseudo_family_set)
if len(pseudo_family_set) > 3:
raise Exception('I was expecting 3 or less pseudo families')
colors = ['b', 'r', 'g']
# Plotting:
plt.clf()
fig=plt.figure(figsize=(16, 9), facecolor='w', edgecolor=None)
gs = gridspec.GridSpec(2,1, hspace=0.01, left=0.1, right=0.94)
# Defining barwidth
barwidth = 1. / (len(pseudo_family_set)+1)
offset = [-0.5+(0.5+n)*barwidth for n in range(len(pseudo_family_set))]
# Axing labels with units:
yaxis = ("Smearing energy [{}]".format(smearing_unit_set.pop()),
"Total magnetization [{}]".format(magnetization_unit_set.pop()))
# If more than one unit was specified, I will exit:
if smearing_unit_set:
raise Exception('Found different units for smearing')
if magnetization_unit_set:
raise Exception('Found different units for magnetization')
# Making two plots, upper for the smearing, the lower for magnetization
for index in range(2):
ax=fig.add_subplot(gs[index])
for i,pseudo_family in enumerate(pseudo_family_set):
X = np.arange(nr_of_results)+offset[i]
Y = np.array([thisres[1][pseudo_family][index] for thisres in sorted_results])
ax.bar(X, Y, width=0.2, facecolor=colors[i], edgecolor=colors[i], label=pseudo_family)
ax.set_ylabel(yaxis[index], fontsize=14, labelpad=15*index+5)
ax.set_xlim(-0.5, nr_of_results-0.5)
ax.set_xticks(np.arange(nr_of_results))
if index == 0:
plt.setp(ax.get_yticklabels()[0], visible=False)
ax.xaxis.tick_top()
ax.legend(loc=3, prop={'size': 18})
else:
plt.setp(ax.get_yticklabels()[-1], visible=False)
for i in range(0, nr_of_results, 2):
ax.axvspan(i-0.5, i+0.5, facecolor='y', alpha=0.2)
ax.set_xticklabels(list(formula_list),rotation=90, size=14, ha='center')
plt.show()
# -
plot_results(results)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# 单词级语言模型RNN
# =====================================
# **教程作者**: `Antares博士 <http://www.studyai.com/antares>`_
#
# 本教程在语言建模任务上训练多层RNN(Elman、GRU或LSTM)。默认情况下,训练脚本使用提供的Wikitext-2数据集。
# 然后,生成脚本(generate script)可以使用经过训练的模型生成新文本。
#
# 该模型使用 `nn.RNN` 模块(及其姊妹模块 `nn.GRU` 和 `nn.LSTM` ),如果在安装了cuDNN的CUDA上运行,
# 该模块将自动使用cuDNN后端。
#
# 在训练期间,如果接收到键盘中断(Ctrl-C),则停止训练,并根据测试数据集评估当前模型。
#
#
#
# 创建数据集:语料库
# -----------------------
#
#
# +
import os
from io import open
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
print(path)
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
# -
# 创建网络模型
# -----------------------
#
#
# +
import torch.nn as nn
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
# -
# 创建参数解析器
# -----------------------
#
#
# +
import argparse
import time
import math
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA', default=True)
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--onnx-export', type=str, default='',
help='path to export the final model in onnx format')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# -
# 加载数据
# -----------------------
#
#
# +
corpus = Corpus(args.data)
# Starting from sequential data, batchify arranges the dataset into columns.
# For instance, with the alphabet as the sequence and batch size 4, we'd get
# ┌ a g m s ┐
# │ b h n t │
# │ c i o u │
# │ d j p v │
# │ e k q w │
# └ f l r x ┘.
# These columns are treated as independent by the model, which means that the
# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
# batch processing.
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
# -
# 构建模型
# -----------------------
#
#
# +
ntokens = len(corpus.dictionary)
model = RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)
criterion = nn.CrossEntropyLoss()
# -
# 训练与评估模型
# -----------------------
#
#
# +
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
# get_batch subdivides the source data into chunks of length args.bptt.
# If source is equal to the example output of the batchify function, with
# a bptt-limit of 2, we'd get the following two Variables for i = 0:
# ┌ a g m s ┐ ┌ b h n t ┐
# └ b h n t ┘ └ c i o u ┘
# Note that despite the name of the function, the subdivison of data is not
# done along the batch dimension (i.e. dimension 1), since that was handled
# by the batchify function. The chunks are along dimension 0, corresponding
# to the seq_len dimension in the LSTM.
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i:i+seq_len].to(device)
target = source[i+1:i+1+seq_len].view(-1).to(device)
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
hidden = repackage_hidden(hidden)
return total_loss / (len(data_source) - 1)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0.
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = repackage_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def export_onnx(path, batch_size, seq_len):
print('The model is also exported in ONNX format at {}'.
format(os.path.realpath(args.onnx_export)))
model.eval()
dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
hidden = model.init_hidden(batch_size)
torch.onnx.export(model, (dummy_input, hidden), path)
import copy
# Loop over epochs.
lr = args.lr
best_val_loss = None
best_model = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
best_model = copy.deepcopy(model.cpu())
best_val_loss = val_loss
# with open(args.save, 'wb') as f:
# torch.save(model, f)
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
# model = torch.load(f)
model = best_model.to(device)
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
model.rnn.flatten_parameters()
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
if len(args.onnx_export) > 0:
# Export the model in ONNX format.
export_onnx(args.onnx_export, batch_size=1, seq_len=args.bptt)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from stanfordcorenlp import StanfordCoreNLP
nlp=StanfordCoreNLP("/home/judson/Documents/stanford-corenlp-full-2018-02-27")
sentence = 'Guangdong University of Foreign Studies is located in Guangzhou.'
print 'Dependency Parsing:', nlp.dependency_parse(sentence)
import spacy
nlp=spacy.load('en_core_web_sm')
doc=nlp(u"The Federal Magistrate found that the Tribunal's decision was unaffected by jurisdictional error and therefore a privative clause decision.")
from spacy import displacy
op={'compact':True,'bg':'#d7f23e','color':'black'}
#svg=displacy.render(doc,style="dep",jupyter=True,options=op)
svg=displacy.render(doc,style="dep",options=op)
fname="/home/judson/dep.svg"
from pathlib import Path
outpath=Path(fname)
outpath.open('w', encoding='utf-8').write(svg)
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # Exception
//
// In Java, an exception is a mechanism to signal that the
// execution of a method can not be performed
// by example, trying to get a value of a list with an index equals to -1
//
String valueAt(List<String> list, int index) {
return list.get(index);
}
valueAt(List.of("hello"), -1);
// an exception has
// - a type that indicate the kind of error
// - an error message that explain in English the issue
// - a stacktrace which indicates where the exception was raised
// and the methods to reach that point
//
// In our example, `java.lang.IndexOutOfBoundsException` is the type,
// `Index: -1 Size: 1` is the message and
// ```
// at ImmutableCollections$AbstractImmutableList.outOfBounds (ImmutableCollections.java:201)
// at ImmutableCollections$List12.get (ImmutableCollections.java:418)
// at valueAt (#1:2)
// at (#2:1)
// ```
// is the stacktrace
//
// ## `throw`
// You can create (with `new`) and raise your own exception using the keyword `throw`
//
String valueAt(List<String> list, int index) {
if (index < 0 || index >= list.size()) {
throw new IllegalArgumentException("invalid index " + index);
}
return list.get(index);
}
valueAt(List.of("hello"), -1);
// The stacktrace is populated automatically when you create the exception
// not where you throw it so it's a good idea to create the exception
// not too far from where you throw it.
// In the following example, the stacktrace will say that the exception
// is created at `notTooFar (#5:2)`, on the second line, not at `notTooFar (#5:4)`.
//
void notTooFar() {
var exception = new RuntimeException("i'm created here");
// an empty line
throw exception;
}
notTooFar();
// ## Use existing exceptions
// While you can create your own exception (see below),
// usually we are re-using already existing exceptions.
//
// Exceptions commonly used in Java
// - NullPointerException if a reference is null
// - IllegalArgumentException if an argument of a method is not valid
// - IllegalStateException if the object state doesn't allow to proceed,
// by example if a file is closed, you can not read it
// - AssertionError if a code that should not be reached has been reached
//
// By example
//
enum State { OK, NOT_OK }
void testState(State state) {
switch(state) {
case OK -> System.out.println("Cool !");
case NOT_OK -> System.out.println("Not cool");
default -> { throw new AssertionError("Danger, Will Robinson"); }
}
}
// here the AssertionError can only be thrown if the code if testState()
// and the enum State disagree on set of possible values
// By example, if a new state is added
//
enum State { OK, NOT_OK, UNKNOWN }
testState(State.UNKNOWN);
// ## Recovering from an exception
// In Java, you can recover from an exception using a `try/catch` block.
//
URI uri;
try {
uri = new URI("http://i'm a malformed uri");
} catch(URISyntaxException e) {
// if the URI is malformed, used google by default
uri = new URI("http://www.google.fr");
}
System.out.println(uri);
// A common mistake is to write a `try/catch` in a method with an empty catch
// or a catch that log/print a message instead of actually recovering from the
// exception
//
// As a rule of thumb, if you can not write something meaningful in the catch
// block then you should not use a `try/catch`.
//
// ## Fighting with the compiler
// For the compiler, there are two kinds of exceptions that are handled differently
// - unchecked exception, you can throw them anywhere you want
// - checked exception, you can only throw them if
// - you are inside a method that declare to throws that exception (or a supertype)
// - you are inside a try/catch block on that exception (or a supertype)
//
// In Java, an exception that inherits from `RuntimeException` or `Error` are
// unchecked exceptions, all the others are checked exceptions
//
// so this code doesn't compile because `IOException` inherits from `Exception`
// and not `RuntimeException`.
//
/*
void hello() {
Files.delete(Path.of("I don't exist"));
}
*/
// A way to fix the issue is to use the keywords `throws` to ask the caller
// of the method to deal with the exception, again the caller will have,
// either by propagating it with a `throws` or recover from it with a `try/catch`.
//
void hello() throws IOException {
Files.delete(Path.of("I don't exist"));
}
// As a rule of thumb, 99% of the time you want to propagate the exception,
// and keep the number of `try/catch` as low as possible in your program,
// so prefer `throws` to `try/catch`.
//
// ### When you can not use `throws`, wrap the exception
//
// If a method has it's signature fixed because it overrides a method of an interface,
// then you can not use `throws`
//
// The following example doesn't compile because the method `run` of a `Runnable`
// doesn't declare to `throws` `IOException` so the only solution seems to be
// to use a `try/catch`.
//
/*
var aRunnable = new Runnable() {
public void run() {
Files.delete(Path.of("I don't exist"));
}
};
*/
// So here, we have to use a `try/catch` but we still want to propagate the exception.
// The trick is wrap the checked exception into an unchecked exception.
// This trick is so common that the Java API already comes with existing
// classes to wrap common checked exceptions. For `IOException`, the unchecked
// equivalent is `UncheckedIOException`.
//
var aRunnable = new Runnable() {
public void run() {
try {
Files.delete(Path.of("I don't exist"));
} catch(IOException e) {
// the way to recover, is to propagate it as an unchecked
throw new UncheckedIOException(e);
}
}
};
aRunnable.run();
// The exception `UndeclaredThrowableException` is used as the generic unchecked exception
// to wrap any checked exception which do not have an unchecked equivalent.
//
// ## Create your own Exception
//
// You can create your own exception by creating a class that inherits from `RuntimeException`
// You should provide at least two constructors, one with a message and one with a message
// and a cause.
//
public class MyException extends RuntimeException {
public MyException(String message) {
super(message);
}
public MyException(String message, Throwable cause) {
super(message, cause);
}
}
throw new MyException("This is my exception");
// But in general, don't ! Reuse existing commonly used exceptions.
//
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cleaning the master metadata
#
# #### standardizing authors / titles
#
# The authors and titles I got from HathiTrust have a few rough edges. Titles sometimes include a statement about authorship preceded by ```$c```. I don't usually want to treat that as part of the title.
#
# Authors' names may be preceded by "Sir" or "Mrs"; generally I want to move that sort of honorific to the end of the name, so that last name always comes first. (Important for deduplication.)
#
# #### volume-part inference
#
# Commonly, a multi-volume set of *Works* will have a "contents" statement that enumerates the sub-title of each volume. With a bit of careful parsing, we can assign titles to individual volumes, so we have *Ivanhoe* instead of the less informative *Works of Scott,* vol 7.
#
# #### date correction
#
# The routine I used to infer ```inferreddate``` gave up a little too easily in some cases, and there are zeroes where we could make a better guess. Also, I'd like to add a column for "last possible date of composition." Using information about an author's date of death (!!), or in some cases copyright date, we can infer that some volumes are reprints of much earlier publications.
#
# +
# a few useful imports
import pandas as pd
import re
# +
# read the raw data
meta = pd.read_csv('mergedficmetadata.tsv', sep = '\t', index_col = 'docid', low_memory = False)
meta.head()
# +
# Let's create some new columns. Two of them will be blank.
# One will contain just volume numbers. To that end, let's
# define a function that translates enumcrons to vol
# numbers.
def justvolnumbers(enum):
''' Returns strictly the numeric part of an enumcron,
getting rid of the nonstandard 'v. ' or 'V.' It doesn't
return anything for enums that are like 'c. 2' or
'copy 2'--that's not a volume number.
'''
if pd.isnull(enum) or len(enum) < 1:
return ''
elif enum.startswith('c') or enum.startswith('(c'):
return ''
else:
matches = re.findall('\d+', enum)
if len(matches) < 1:
return ''
else:
volnum = int(matches[0])
if volnum < 200 and volnum > 0:
return volnum
else:
return ''
meta['volnum'] = meta['enumcron'].map(justvolnumbers)
meta['shorttitle'] = ''
meta['parttitle'] = ''
# +
# Just to test what we produced:
meta.head()
# -
# ### volume-part inference
#
# Basically, we want to be able to translate a contents statement, and convert it into a dictionary where volume numbers map to titles of individual volumes, like so:
#
# 
#
# That's not terribly hard, with a regex:
def volmap(contents):
''' A function that turns a "contents" statement into a dictionary
of titles.
'''
themap = dict()
if pd.isnull(contents):
return themap
if len(contents) < 4:
return themap
contents = contents.replace('XVI.', '16')
contents = contents.replace('XV.', '15')
contents = contents.replace('XIV.', '14')
contents = contents.replace('XIII.', '13')
contents = contents.replace('XII.', '12')
contents = contents.replace('XI.', '11')
contents = contents.replace('IX.', '9')
contents = contents.replace('X.', '10')
contents = contents.replace('VIII.', '8')
contents = contents.replace('VII.', '7')
contents = contents.replace('VI.', '6')
contents = contents.replace('IV.', '4')
contents = contents.replace('V.', '5')
contents = contents.replace('III.', '3')
contents = contents.replace('II.', '2')
contents = contents.replace('I.', '1')
sequence = re.findall(r'\D+|\d+', contents)
# The regex above does most of the work in this function, translating the
# contents statement into a sequence of alternating alphabetic and numeric
# sections.
if len(sequence) < 3:
return themap
started = False
hyphen = False
for s in sequence:
if s.isdigit() and not started:
started = True
nextvols = [int(s)]
expectation = int(s) + 1
elif not started:
pass
elif s == '-':
hyphen = True
elif s.isdigit() and hyphen:
if int(s) < expectation:
hyphen = False
pass
elif len(nextvols) == 1:
for i in range(nextvols[0], int(s) + 1):
nextvols.append(i)
expectation = int(s) + 1
hyphen = False
else:
hyphen = False
pass
elif s.isdigit():
if int(s) == expectation:
nextvols = [int(s)]
expectation = int(s) + 1
else:
pass
else:
for n in nextvols:
themap[n] = s.strip('., -v[]()')
return themap
# We also need to clean up titles, by getting rid of the part after "$c," along with various extra punctuation characters.
def short_title(longtitle):
if "$c" in longtitle:
parts = longtitle.split("$c")
justtitle = parts[0]
else:
justtitle = longtitle
shorttitle = justtitle.strip('| /.,').replace(' | ', ' ')
return shorttitle
# Now let's actually do the work.
# +
grouped = meta.groupby('recordid')
ctr = 0
for record, group in grouped:
ctr += 1
if ctr % 100 == 1:
print(ctr)
maxlen = 0
longest = ''
for cont in group.contents:
if pd.isnull(cont):
continue
elif len(cont) > maxlen:
maxlen = len(cont)
longest = cont
themap = volmap(longest)
for idx in group.index:
volnum = group.loc[idx, 'volnum']
if type(volnum) == int and volnum in themap:
meta.loc[idx, 'parttitle'] = themap[volnum]
meta.loc[idx, 'shorttitle'] = themap[volnum]
else:
meta.loc[idx, 'shorttitle'] = short_title(meta.loc[idx, 'title'])
# -
# ### Author standardization
#
# Move those honorifics to the end of the name.
#
# Also, while we're at it, let's redress a couple of historical injustices that affect prominent authors in ways that would complicate deduplication.
# +
def flip_honorific(auth):
if pd.isnull(auth):
return ''
elif auth == 'Ward, Humphry, Mrs' or auth == "Mrs. Humphry Ward" or auth == "Mrs., Ward, Humphry" or auth == 'Ward, Humphry':
return "Ward, Mary Augusta"
elif auth == 'Wood, Henry, Mrs' or auth == "Mrs. Henry Wood" or auth == "Mrs., Wood, Henry" or auth == 'Wood, Henry':
return "Wood, Ellen"
# yes, in principle that's unfair to the real Humphry Ward and Henry Wood
# however, in practice ...
elif auth.startswith('Sir') or auth.startswith('Mrs'):
return auth[3: ].strip('. ,') + ', ' + auth[0:3]
elif auth.startswith('Lady'):
return auth[4: ].strip('. ,') + ', ' + auth[0:4]
elif auth.startswith('(') and ')' in auth:
parts = auth.split(')')
firstpart = parts[1].strip('., ')
name = firstpart + " " + parts[0] + ")"
return name
elif auth == 'Baron, Dunsany, Edward John Moreton Drax Plunkett':
return 'Dunsany, Edward John Moreton Drax Plunkett'
elif auth == 'Baron, Lytton, Edward Bulwer Lytton':
return 'Lytton, Edward Bulwer Lytton'
elif auth == 'Baroness, Orczy, Emmuska Orczy':
return 'Orczy, Emmuska Orczy'
else:
return auth
meta['cleanauth'] = meta['author'].map(flip_honorific)
# -
# ### Date correction
#
# Fixing a few inferred dates, adding a column for last possible date of composition.
# +
meta['latestcomp'] = ''
for idx in meta.index:
infer = meta.loc[idx, 'inferreddate']
if int(infer) == 0:
try:
newdate = int(meta.loc[idx, 'startdate'])
if newdate > 1699 and newdate < 2100:
meta.loc[idx, 'inferreddate'] = newdate
else:
newdate = newdate = int(meta.loc[idx, 'enddate'])
if newdate > 1699 and newdate < 2100:
meta.loc[idx, 'inferreddate'] = newdate
except:
pass
authdate = meta.loc[idx, 'authordate']
death = 3000
if not pd.isnull(authdate):
authdate = authdate.strip(',.')
if '-' in authdate and len(authdate) > 6:
try:
death = int(authdate[-4: ])
except:
death = 3000
else:
death = 3000
datetype = meta.loc[idx, 'datetype']
if datetype == 'c' or datetype == 't' or datetype == 'r':
try:
firstpub = int(meta.loc[idx, 'enddate'])
except:
firstpub = 3000
else:
firstpub = 3000
infer = int(meta.loc[idx, 'inferreddate'])
if infer < 1700:
infer = 2100
if death < 1700:
death = 2100
if firstpub < 1700:
firstpub = 2100
meta.loc[idx, 'latestcomp'] = min(death, infer, firstpub)
# -
# #### now write to file
cols_in_order = ['author', 'cleanauth', 'authordate', 'inferreddate', 'latestcomp', 'datetype', 'startdate', 'enddate', 'imprint',
'imprintdate', 'contents', 'genres', 'subjects', 'geographics', 'locnum', 'oclc', 'place', 'recordid',
'enumcron', 'volnum', 'title', 'parttitle', 'shorttitle']
outmeta = meta[cols_in_order]
outmeta.sort_values(by = ['inferreddate', 'recordid', 'volnum'], inplace = True)
outmeta.to_csv('masterficmetadata.tsv', sep = '\t')
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # CMPS3660 - Project0
#
# This assignment is intended to ensure that you are able to edit and submit Jupyter Notebooks.
#
# The goal is to write a function to count the frequency of different characters in a given string. For example: character_frequency('google') should return a `dict`: {'g': 2, 'o': 2, 'l': 1, 'e': 1} and then you should add some code to display some summary statistics about some data that you bring in!
#
# There are many tutorials on the web that can show you how to use the Python data structures. Quite a few examples at: https://docs.python.org/3/tutorial/datastructures.html
#
# If you have any trouble with this at all (and we are sure at least some of you will), don't worry and stop by our office hours.
#
# You should edit the function definition below, make sure to press 'Shift + Enter' to execute it, and then execute the two function calls to check that the output is correct.
def character_frequency(s):
ret = dict()
for i in range(len(s)):
if s[i] in ret:
ret[s[i]] +=1
else:
ret[s[i]] = 1
return ret
character_frequency('google')
character_frequency('This assignment is intended to ensure that you are able to edit and submit Jupyter Notebooks.')
# Now in the cell below you should create two string variables and inside them put a different quotes (at least 10 words long) from a favorite book, movie, tv show, or any other media.
movie1 = "You were the chosen one! It was said that you would destroy the Sith, not join them! Bring balance to the force, not leave it in darkness! You were my brother Anakin! I loved you!"
movie2 = "Señores, Les voy a decir quién soy Yo soy Pablo Emilio Escobar Gaviria Mis ojos están en todos lados o sea ustedes no pueden hacer una puta sola mierda en el departamento de Antioquia que yo me entere. Sí, señores. No pueden mover un dedo. Un día, yo voy a ser presidente de la República de Colombia. Y bien, me gano la vida haciendo negocios. Así que pues, fresco. Tranquilo. Uds pueden aceptar mi negocio o aceptar las consecuencias. Plata o pomo Ustedes eligen"
# For each of the above quotes add code below to print out the character frequency and then give a set of summary statistics for each one, specifically the letter(s) that occur the fewest number of times, the letter(s) that occur the most times, and the average count for all the letters. You should display this data neatly.
# +
def getPercents(data):
total = sum(data.values())
print("There were a total of",total,"characters comprising of",len(data),"unique characters.")
for key in data:
percent = str(100*data[key]/total)[:4]
print("\tCharacter '",key,"' had ",data[key]," appeareances and comprised ",percent,"% of the dataset.",sep="")
def analysis(data):
print("The three most frequent characters were:")
for i in range(3):
key = max(data,key = data.get)
freq = data[key]
del data[key]
print("\tThe character '",key,"' appeared ",freq," times.",sep="")
print("The three least frequent characters were:")
for i in range(3):
key = min(data,key = data.get)
freq = data[key]
del data[key]
print("\tThe character '",key,"' only appeared ",freq," times.",sep="")
def overall_analysis(dataArr):
#merge all data to one dict
data = dict()
for dictionary in dataArr:
for key in dictionary:
if key in data:
data[key] += dictionary[key]
else:
data[key] = dictionary[key]
analysis(data.copy())
getPercents(data.copy())
data1 = character_frequency(movie1)
print("Star Wars' Quote:",data1)
analysis(data1.copy())
data2 = character_frequency(movie2)
print("Narcos' Quote:", data2)
analysis(data2.copy())
print("_______________________________________________\nCombining all of the data\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _")
overall_analysis([data1.copy(),data2.copy()])
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: ir
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Simple Stock Statistics
# + outputHidden=false inputHidden=false
library(quantmod)
# + outputHidden=false inputHidden=false
getSymbols('AAPL', src = 'yahoo', from = as.Date('2016-01-01'), to = as.Date('2019-01-01'))
# + outputHidden=false inputHidden=false
head(AAPL)
# + outputHidden=false inputHidden=false
AD <- AAPL$AAPL.Adjusted
# + outputHidden=false inputHidden=false
# Average
mean(AD)
# Find mean dropping NA values.
mean(AD,na.rm = TRUE)
# + outputHidden=false inputHidden=false
median(AD, na.rm = FALSE)
# + outputHidden=false inputHidden=false
# Create the function.
mode <- function(x) {
mode_x <- unique(x)
mode_x[which.max(tabulate(match(x, mode_x)))]
}
# + outputHidden=false inputHidden=false
mode(AD)
# + outputHidden=false inputHidden=false
# Standard Deviation
sd(AD)
# + outputHidden=false inputHidden=false
# Variance
var(AD)
# + outputHidden=false inputHidden=false
min(AD)
# + outputHidden=false inputHidden=false
max(AD)
# + outputHidden=false inputHidden=false
# Range
max(AD) - min(AD)
# + outputHidden=false inputHidden=false
# Interquartile Range
IQR(AD)
# + outputHidden=false inputHidden=false
# Percentile
quantile(AD, c(.10, .25, .50, .75, .99))
# + outputHidden=false inputHidden=false
ADm <- mean(AD)
ADsd <- sd(AD)
# + outputHidden=false inputHidden=false
# probability distribution
dnorm(AD, ADm, ADsd)
# + outputHidden=false inputHidden=false
y <- dnorm(AD, ADm, ADsd)
plot(y)
# + outputHidden=false inputHidden=false
hist(y, main='Probability Distribution ')
# + outputHidden=false inputHidden=false
length(min(dnorm(AD, ADm, ADsd)))
length(max(dnorm(AD, ADm, ADsd)))
# + outputHidden=false inputHidden=false
min(dnorm(AD, ADm, ADsd))
max(dnorm(AD, ADm, ADsd))
# + outputHidden=false inputHidden=false
# probability of a normally distributed
pnorm(AD, ADm, ADsd)
# + outputHidden=false inputHidden=false
y <- pnorm(AD, ADm, ADsd)
plot(y)
# + outputHidden=false inputHidden=false
hist(y, main = "Normal Distribution")
# + outputHidden=false inputHidden=false
library(e1071)
# + outputHidden=false inputHidden=false
skewness(AD)
# + outputHidden=false inputHidden=false
kurtosis(AD)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
nwalkers=150
width=300
height=150
seed=123
def neighbor(matrix,i,j):
x = [-1,0,1]
y = [-1,0,1]
for m in x:
for n in y:
if(m!=0 or n!=0):
if((i+m>=0 and i+m<matrix.shape[0]) and (j+n>=0)and(j+n<matrix.shape[1])):
if matrix[i+m,j+n]>0:
return True
return False
def update(matrix,i,width):
stay = []
for j in range(width):
if(matrix[i,j]>0):
if(neighbor(matrix,i,j)):
stay.append((i,j))
for j in range(width):
if matrix[i,j] > 0:
if (i,j) not in stay:
for k in range(matrix[i,j]):
update_j = j + np.random.binomial(1,0.5)*2-1
update_i = i + 1
if(update_j>=0 and update_j<width):
matrix[update_i,update_j] += 1
matrix[i,j] = 0
return matrix
def dla(nwalkers, width, height, seed):
np.random.seed(seed)
matrix = np.zeros((height+1,width),dtype=int)
index = np.random.randint(0,width,nwalkers)
for idx in index:
matrix[0,idx]+=1
for i in range(height):
matrix = update(matrix,i,width)
return matrix
matrix = dla(nwalkers, width, height, seed)
plt.imshow(matrix)
pass
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:venv]
# language: python
# name: conda-env-venv-py
# ---
# +
# download literature from LitCovid
# https://www.ncbi.nlm.nih.gov/research/coronavirus/
# https://ftp.ncbi.nlm.nih.gov/pub/lu/LitCovid/
# -
# !pip install bioc
# !wget https://ftp.ncbi.nlm.nih.gov/pub/lu/LitCovid/litcovid2pubtator.json.gz -P ../data/LitCovid/
# !gunzip ../data/LitCovid/litcovid2pubtator.json.gz
# %load_ext autoreload
# %autoreload 2
import parse_data
covid_f = '../data/LitCovid/litcovid2pubtator.json'
# covid_f = '../data/LitCovid/litcovid2BioCJSON'
output_d = '../data/LitCovid/'
import json
with open(covid_f, encoding='utf-8') as f:
data = json.load(f)
# +
t = parse_data.parse_doc(data[1][7414])[2]
print(t)
t.replace("\u2019", "'")
# t.encode('latin1', 'replace')
# .decode("utf-8")
t.encode('utf8')
# -
parse_data.parse_doc(data[1][7414])
tar_text = ''
import importlib
importlib.reload(parse_data)
parse_data.parse_doc(t)
t = {'_id': '32644403|None',
'id': '32644403',
'infons': {},
'passages': [{'infons': {'journal': '; 2020 01 ',
'year': '2020',
'type': 'title',
'authors': 'Hodgens A, Gupta V, ',
'section': 'Title',
'section_type': 'TITLE'},
'offset': 0,
'text': b'Severe Acute Respiratory Syndrome (SARS)',
'annotations': [{'id': '2',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'Severe Acute Respiratory Syndrome',
'locations': [{'offset': 0, 'length': 33}]},
{'id': '3',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 35, 'length': 4}]}]},
{'infons': {'type': 'abstract', 'section': 'Abstract'},
'offset': 41,
'text': 'A new and rapidly progressive respiratory syndrome termed severe acute respiratory syndrome (SARS) was identified by the World Health Organization (WHO) in the Guangdong Province of China as a global threat in March of 2003. SARS went on to spread globally over the following months to over 30 countries and became the 1st pandemic of the 21st century.\xa0It showed that the dissemination of an infectious microbe could be drastically increased in the era of globalization and increased international travel. The decade preceding the SARS outbreak featured the emergence of multiple novel pathogens, including H5N1 influenza, Hantavirus, Nipah virus, and Avian flu. However, SARS was unique among these as it had the ability for efficient person-to-person transmission.[1]\xa0By the end of the outbreak in July 2003, 8096 cases were reported leading to 774 deaths with a case fatality rate of over 9.6%.[2][3] SARS\xa0showed a unique predilection for healthcare workers,\xa0with 21% of cases occurring in these individuals.[4] The WHO, along with its international partners, including the Centers for Disease Control and Prevention (CDC), was able to identify within 2 weeks the etiologic agent.[5][6] The agent was a novel coronavirus and was given the name SARS coronavirus (SARS-CoV). It was isolated in a number of SARS patients and suspected as the causative agent before ultimately being sequenced and fulfilling Koch’s postulates confirming it as the cause.[7]\xa0 The number of secondary cases produced by one SARS patient is thought to be in the range of two to four though a few patients, including the original index case, were\xa0suspected to be “super-spreaders” spreading to up to hundreds of others. The mode of transmission for the virus was largely through respiratory inhalation of droplets. Treatment was primarily supportive, and no specific anti-virals were effective.\xa0Since mid-2004, no new cases of SARS have been reported. Until the recent COVID-19 pandemic, the global reach of SARS had been matched only by the 2009 H1N1 Influenza pandemic.[8] Lessons learned from the SARS pandemic are currently used as a blueprint to fight the pandemic of COVID19.',
'annotations': [{'id': '28',
'infons': {'identifier': 'MESH:D012120', 'type': 'Disease'},
'text': 'respiratory syndrome',
'locations': [{'offset': 71, 'length': 20}]},
{'id': '29',
'infons': {'identifier': 'MESH:D012120', 'type': 'Disease'},
'text': 'acute respiratory syndrome',
'locations': [{'offset': 106, 'length': 26}]},
{'id': '30',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 134, 'length': 4}]},
{'id': '31',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 266, 'length': 4}]},
{'id': '32',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 572, 'length': 4}]},
{'id': '33',
'infons': {'identifier': '102793', 'type': 'Species'},
'text': 'H5N1',
'locations': [{'offset': 648, 'length': 4}]},
{'id': '34',
'infons': {'identifier': '121791', 'type': 'Species'},
'text': 'Nipah virus',
'locations': [{'offset': 676, 'length': 11}]},
{'id': '35',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 713, 'length': 4}]},
{'id': '36',
'infons': {'identifier': 'MESH:D003643', 'type': 'Disease'},
'text': 'deaths',
'locations': [{'offset': 892, 'length': 6}]},
{'id': '37',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 945, 'length': 4}]},
{'id': '38',
'infons': {'identifier': '2697049', 'type': 'Species'},
'text': 'novel coronavirus',
'locations': [{'offset': 1247, 'length': 17}]},
{'id': '39',
'infons': {'identifier': '694009', 'type': 'Species'},
'text': 'SARS coronavirus',
'locations': [{'offset': 1288, 'length': 16}]},
{'id': '40',
'infons': {'identifier': '694009', 'type': 'Species'},
'text': 'SARS-CoV',
'locations': [{'offset': 1306, 'length': 8}]},
{'id': '41',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 1348, 'length': 4}]},
{'id': '42',
'infons': {'identifier': '9606', 'type': 'Species'},
'text': 'patients',
'locations': [{'offset': 1353, 'length': 8}]},
{'id': '43',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 1544, 'length': 4}]},
{'id': '44',
'infons': {'identifier': '9606', 'type': 'Species'},
'text': 'patient',
'locations': [{'offset': 1549, 'length': 7}]},
{'id': '45',
'infons': {'identifier': '9606', 'type': 'Species'},
'text': 'patients',
'locations': [{'offset': 1615, 'length': 8}]},
{'id': '46',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 1945, 'length': 4}]},
{'id': '47',
'infons': {'identifier': 'MESH:C000657245', 'type': 'Disease'},
'text': 'COVID-19',
'locations': [{'offset': 1987, 'length': 8}]},
{'id': '48',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 2026, 'length': 4}]},
{'id': '49',
'infons': {'identifier': '114727', 'type': 'Species'},
'text': 'H1N1',
'locations': [{'offset': 2065, 'length': 4}]},
{'id': '50',
'infons': {'identifier': 'MESH:D045169', 'type': 'Disease'},
'text': 'SARS',
'locations': [{'offset': 2118, 'length': 4}]},
{'id': '51',
'infons': {'identifier': 'MESH:C000657245', 'type': 'Disease'},
'text': 'COVID19',
'locations': [{'offset': 2191, 'length': 7}]}]}],
'pmid': 32644403,
'pmcid': None,
'created': {'$date': 1600357501642},
'accessions': ['disease@MESH:C000657245',
'species@694009',
'disease@MESH:D003643',
'species@114727',
'species@121791',
'disease@MESH:D012120',
'species@2697049',
'species@9606',
'species@102793',
'disease@MESH:D045169'],
'journal': '',
'year': 2020,
'authors': ['Hodgens A', 'Gupta V'],
'tags': ['LitCovid']}
data[1][0]['passages'][0]['infons']
parse_data.parse_data_json_f_hd(covid_f, output_d)
# !python evaluate_renet2_ft.py --raw_data_dir ../data/LitCovid/ --model_dir ../models/ft_models/ --gda_fn_d ../data/LitCovid/ --models_number 10 --batch_size 60
73651
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''.venv'': venv)'
# name: python_defaultSpec_1594574675805
# ---
# # TITLE
# We downloaded the files from: <br>
# accidentalidad.csv: http://medata.gov.co/dataset/accidentalidad
# <br>
# AccidentalidadgeoreferenciadaYYYY:
# <br>
# http://medata.gov.co/dataset/accidentalidad-geo-referenciada-2019
# http://medata.gov.co/dataset/accidentalidad-geo-referenciada-2018
# http://medata.gov.co/dataset/accidentalidad-geo-referenciada-2017
# http://medata.gov.co/dataset/accidentalidad-geo-referenciada-2016
# http://medata.gov.co/dataset/accidentalidad-geo-referenciada-2015
# http://medata.gov.co/dataset/accidentalidad-geo-referenciada-2014
# <br>
#
# To ingest about dataset we decided to use two different dataframes, accidentalidad dataframe ```a_df``` to aggregate the differents files by year and include the incident time wich is not included in the accidentalidad_georrererenciada dataframe ```ag_df```, so we use these two dataframes to verify the integrity of the datasets and build one dataset with all the information we need.
#
# In the following code we clean and merge the datasets:
# # Download all the data
#
# + tags=[]
import requests
import pandas as pd
import geopandas as gpd
from geopandas.tools import sjoin
from shapely import geometry
import numpy as np
from ast import literal_eval
from unidecode import unidecode
import holidays
import random
pd.options.display.max_columns = None
# #!brew install spatialindex #needs rtree to work
#pip install rtree
# +
# url_incidentes = 'https://ds4a.blob.core.windows.net/ds4a/Incidentes2014_2020.xlsx'
# +
# r =requests.get(url_incidentes, allow_redirects=True)
# open('./data/Incidentes2014_2020.xlsx', 'wb').write(r.content)
# -
i_df = pd.read_excel('./data/Incidentes2014_2020.xlsx')
i_df.shape
i_df.columns
i_df.drop(columns=['Comuna'],inplace=True)
i_df.rename(columns={"NRO_RADICADO": "Radicado", "CLASE_ACCIDENTE":"Type", "DIRECCION":"Address", "CBML":"Cbml", "Barrio":"Borough", "nombre comuna":"Comuna", "codigo comuna":"codigo_comuna", "Diseño":"Desing", "Longitud":"Longitude", "Latitud":"Latitude", "Gravedad Incidente":"Severity", "FECHA_ACCIDENTE":"Date", "Año":"Year", "HORA_ACCIDENTE":"Time"},inplace=True)
i_df.co
i_df.codigo_comuna.value_counts()
# + tags=[]
for i in i_df.isna().any()[i_df.isna().any() == True].index:
print(i,' : ' ,i_df[i_df[i].isna()][i].size,' NaN')
# + tags=[]
i_df.loc[(i_df.Radicado.isna()),'Radicado'] = i_df[i_df.Radicado.isna()].Radicado.apply(lambda x :str(int(round(random.random() * 9999999,0))))
i_df.Radicado.value_counts()
# + tags=[]
for i in i_df.isna().any()[i_df.isna().any() == True].index:
print(i,' : ' ,i_df[i_df[i].isna()][i].size,' NaN')
# -
url_postal_codes_with_boroughs = 'https://ds4a.blob.core.windows.net/ds4a/postalCodesBoroughs.json'
pcb_df = pd.read_json(url_postal_codes_with_boroughs, dtype=str)
pcb_df
url_postal_codes = "https://ds4a.blob.core.windows.net/ds4a/MedPostalCode.geojson"
pc_df = gpd.read_file(url_postal_codes)
pc_df
url = "https://ds4a.blob.core.windows.net/ds4a/Barrios.geojson"
borough_df = gpd.read_file(url)
borough_df.rename(columns={'NOMBRE_COMUNA':'Comuna','CODIGO': 'Cbml','NOMBRE_BARRIO':'Borough' },inplace=True)
borough_df.drop(columns=['OBJECTID', 'SHAPEAREA', 'SHAPELEN', 'COMUNA', 'BARRIO'],inplace=True)
b_df = borough_df[['Cbml', 'Borough', 'Comuna']].copy()
b_df
i_df = i_df.merge(b_df,how='left', on='Cbml', suffixes=('_original','')).drop_duplicates()
i_df
# + tags=[]
temp_i_df = i_df[(~i_df.Latitude.isna())][['Radicado','Latitude', 'Longitude']]
i_gdf = gpd.GeoDataFrame(
temp_i_df, geometry=gpd.points_from_xy(temp_i_df.Longitude, temp_i_df.Latitude))[['Radicado','geometry']]
i_gdf
# -
i_gdf = gpd.sjoin(i_gdf, borough_df,op='within', how='left').drop_duplicates()
i_gdf
i_gdf.dropna(inplace=True)
i_gdf.drop(columns=['geometry','index_right'],inplace=True)
# +
i_df = i_df.merge(i_gdf, on='Radicado', how='left', suffixes=('_standard', '')).drop_duplicates()
# i_df.join(i_gdf.set_index('Radicado'), on='Radicado', how='left', lsuffix='_old').drop_duplicates()
# -
i_df
# + tags=[]
# import geocoder
# import json
# to_clean = i_df[i_df.Cbml != i_df.Cbml_standard].copy()
# batch = [[x,y] for x, y in zip(to_clean.Latitude,to_clean.Longitude) ]
# len(batch)
g = geocoder.bing([[40.7943, -73.970859], [48.845580, 2.321807]], method='batch_reverse',key='An8rGhIvWo0WthZ813a1cKK7gUjQ_6c2OFAlqLvi3pP9El8P0s_k7beCQEW2EzR1')
for x in g:
print(x)
# with open('batch_reverse.json', 'w') as outfile:
# json.dump(g, outfile, ensure_ascii=False, indent=4)
# -
to_clean.shape
# + tags=[]
from geopy.geocoders import Bing
from geopy.distance import distance
g = Bing('An8rGhIvWo0WthZ813a1cKK7gUjQ_6c2OFAlqLvi3pP9El8P0s_k7beCQEW2EzR1')
to_clean = i_df[i_df.Cbml != i_df.Cbml_standard].copy()
collistFromAddress =['postalCodeFromAddress','stateFromAddress','cityFromAddress','locTypeFromAddress','boroughsFromAddress','otherBoroughsFromAddress']
for col in collistFromAddress:
to_clean[col] = ''
collistFromCoord =['postalCodeFromCoord','stateFromCoord','cityFromCoord','locTypeFromCoord','boroughsFromCoord','otherBoroughsFromCoord']
for col in collistFromCoord:
to_clean[col] = ''
currentIndex = 1
oks = 0
for i, row in to_clean.iterrows():
loc_from_address = g.geocode(row.Address+',Medellin,Colombia')
loc_from_coord = g.reverse(str(row.Latitude)+','+str(row.Longitude))
if 'raw' in loc_from_address:
pc_from_address = loc_from_address.raw['address']['postalCode'] if 'postalCode' in loc_from_address.raw['address'] else ''
if pc_from_address != '' and not pcb_df[pcb_df.postalCode==pc_from_address].empty:
to_clean.loc[(to_clean.Radicado == row.Radicado),collistFromAddress] = pcb_df[pcb_df.postalCode==pc_from_address].values
to_clean.loc[to_clean.Radicado == row.Radicado,'PostalCodeFromAddress'] = pc_from_address
to_clean.loc[to_clean.Radicado == row.Radicado,'AddressFromAddress'] = loc_from_address.raw['name']
to_clean.loc[to_clean.Radicado == row.Radicado,'ConfidenceFromAddress'] = loc_from_address.raw['confidence']
to_clean.loc[to_clean.Radicado == row.Radicado,'LocalityFromAddress'] = loc_from_address.raw['address']['locality']
to_clean.loc[to_clean.Radicado == row.Radicado,'MatchCodesFromAddress'] = ','.join(loc_from_address.raw['matchCodes'])
p_from_address = (loc_from_address.latitude, loc_from_address.longitude)
d_from_address = distance(p_from_address, p_from_coord).km
to_clean.loc[to_clean.Radicado == row.Radicado,'DistanceFromAddress'] = d_from_address
if 'raw' in loc_from_coord:
pc_from_coord = loc_from_coord.raw['address']['postalCode'] if 'postalCode' in loc_from_coord.raw['address'] else ''
if pc_from_coord != '' and not pcb_df[pcb_df.postalCode==pc_from_coord].empty:
to_clean.loc[to_clean.Radicado == row.Radicado,collistFromCoord] = pcb_df[pcb_df.postalCode==pc_from_coord].values
to_clean.loc[to_clean.Radicado == row.Radicado,'PostalCodeFromCoord'] = pc_from_coord
to_clean.loc[to_clean.Radicado == row.Radicado,'AddressFromCoord'] = loc_from_coord.raw['name']
to_clean.loc[to_clean.Radicado == row.Radicado,'ConfidenceFromCoord'] = loc_from_coord.raw['confidence']
to_clean.loc[to_clean.Radicado == row.Radicado,'LocalityFromCoord'] = loc_from_coord.raw['address']['locality']
to_clean.loc[to_clean.Radicado == row.Radicado,'MatchCodesFromCoord'] = ','.join(loc_from_coord.raw['matchCodes'])
p_from_coord = (row.Latitude, row.Longitude)
d_from_coord = distance((loc_from_coord.latitude,loc_from_coord.longitude), p_from_coord).km
to_clean.loc[to_clean.Radicado == row.Radicado,'DistanceFromCoord'] = d_from_coord
if d_from_address < 1:
oks = oks + 1
print(oks,currentIndex)
print(round(oks/currentIndex,2)*100,'%')
print(to_clean.loc[to_clean.Radicado == row.Radicado].Radicado.values)
to_clean.loc[to_clean.Radicado == row.Radicado ,'DistanceAnalysis'] = 'Ok'
else:
to_clean.loc[to_clean.Radicado == row.Radicado ,'DistanceAnalysis'] = 'Nok'
currentIndex = currentIndex + 1
to_clean
# -
to_clean[to_clean.DistanceAnalysis=='Ok']
# +
#"Source", "Objectid", "Radicado", "Type", "Address", "Cbml", "Borough", "Comuna", "Geocod", "Desing", "Longitude", "Latitude", "X_magnamed", "Y_magnamed", "Severity", "SeverityIndex", "Date", "Year", "MonthNum", "Month", "Quarter", "Week", "Day", "WeekDayNum", "WeekDay", "Holiday", "IsHoliday", "Time", "Hour", "Motorcycle"
# i_df['Date'] = i_df.Date.dt.strftime('%d/%m/%Y')
# i_df['Time'] = i_df.Time.dt.strftime('%H:%M:%S')
# i_df['Hour'] = pd.to_datetime(i_df.Time, format='%Y-%m-%d %H:%M:%S').dt.hour
# i_df['Date'] = pd.to_datetime(i_df.Date,format='%d/%m/%Y')
# + tags=[]
for i, row in i_df[(~i_df.Latitude.isna()) & (i_df.Borough.isna())].iterrows():
p1 = geometry.Point(float(row.Longitude), float(row.Latitude))
borough_row = find_borough(borough_df, p1)
if borough_row is not None:
i_df.Comuna = borough_row.NOMBRE_COMUNA
i_df.Cbml = borough_row.CODIGO
i_df.Borough = borough_row.NOMBRE_BARRIO
i_df[(~i_df.Latitude.isna()) & (i_df.Borough.isna())]
# -
i_df[(~i_df.Latitude.isna()) & (i_df.Borough.isna())]
# +
i_df['MonthNum']= i_df.Date.dt.month
i_df['Month']= i_df.Date.dt.month_name()
i_df['Quarter'] = i_df.Date.dt.quarter
i_df['Week'] = i_df.Date.dt.week
i_df['Day'] = i_df.Date.dt.day
i_df['WeekDayNum'] = i_df.Date.dt.dayofweek
i_df['WeekDay'] = i_df.Date.dt.day_name()
i_df['Hour'] = i_df.Time.hour
col_holidays = holidays.COL(years = list(range(2014,2021)))
h_df = pd.DataFrame([col_holidays.keys(), col_holidays.values()]).T
h_df.rename(columns={0:'Date',1:'Holiday'},inplace=True)
h_df['Date']= pd.to_datetime(h_df.Date,format='%Y/%m/%d')
h_df
#
#i_df['Holiday'] = i_df.
# i_df['WeekDay'].value_counts().sort_index()
# -
i_df = i_df.merge(h_df,on='Date',how='left')
i_df
i_df[i_df.Cbml== '03070040016']
i_df['IsHoliday'] = i_df.Holiday.apply(lambda x : 0 if x is np.nan else 1)
i_df['Date'] = i_df.Date.dt.strftime('%d/%m/%Y')
i_df.drop(columns='codigo_comuna',inplace=True)
i_df['Hour']
# +
# dont forget to import event
from sqlalchemy import event, create_engine
DB_USERNAME = 'postgres@psql-ds4a-prod'
DB_PASSWORD = 'FliFUDlbO72cq2h9AaFF'
HOST = 'psql-ds4a-prod.postgres.database.azure.com'
engine=create_engine(f'postgresql://{DB_USERNAME}:{DB_PASSWORD}@{HOST}/ds4a', connect_args={'sslmode':'require'}, max_overflow=20)
i_df.to_sql(name='incidents', con=engine,schema='processed',if_exists='append',index=False,chunksize=1000, method='multi')
# -
# ```pwsh
# Install-Module -Name AzureRM.profile
# Install-Module -Name MSAL.PS
#
# https://docs.microsoft.com/en-us/sharepoint/dev/sp-add-ins/add-in-permissions-in-sharepoint
#
# https://celerik-admin.sharepoint.com/_layouts/15/appinv.aspx
#
# <AppPermissionRequests AllowAppOnlyPolicy="true">
# <AppPermissionRequest Scope="http://sharepoint/content/sitecollection/web" Right="Read" />
# <AppPermissionRequest Scope="http://sharepoint/content/sitecollection/web/list" Right="Read"/>
# </AppPermissionRequests>
#
# https://login.microsoftonline.com/celerik.onmicrosoft.com/.well-known/openid-configuration
#
# $tenantId = "65de2287-e076-4804-9fa6-ddbd24d2d0ff"
# $authority = "https://login.microsoftonline.com/$tenantId"
# $resource = "https://graph.microsoft.com"
# $clientId = "f768001f-0170-455a-a3da-cdd617efe34b"
# $clientSecret = '/KlFUYi9ZI0Jye0/ZY39w0uKARlZSIUHovkh6lYOjtY='
# $secClientSecret = (ConvertTo-SecureString '/KlFUYi9ZI0Jye0/ZY39w0uKARlZSIUHovkh6lYOjtY=' -AsPlainText -Force)
# $redirectUri = 'https://celerik.com'
#
# $token = Get-MsalToken -clientID $clientId -clientSecret $secClientSecret -tenantID $tenantId
# $token.AccessToken
#
# $clientCreds = New-Object Microsoft.IdentityModel.Clients.ActiveDirectory.ClientCredential($clientId, $clientSecret);
#
# $authUrl = 'https://login.microsoftonline.com/{0}/oauth2/token' -f $tenantId
#
# $authContext = New-Object Microsoft.IdentityModel.Clients.ActiveDirectory.AuthenticationContext -ArgumentList $authUrl,$false
#
#
# $authResult = $authContext.AcquireTokenAsync($resource, $clientCreds);
#
# $authResult.Wait()
#
#
# $authResult.Result.AccessToken
#
# # Server name, database name and the connection string that will be used to open connection
# $sqlServerUrl = "fcobo.database.windows.net"
# $database = "TESTDB"
# $connectionString = "Server=tcp:$sqlServerUrl,1433;Initial Catalog=$database;Persist Security Info=False;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;"
#
# # Create the connection object
# $connection = New-Object System.Data.SqlClient.SqlConnection($connectionString)
#
# # Set AAD generated token to SQL connection token
# $connection.AccessToken = $authResult.Result.AccessToken
# $connection.AccessToken
#
# # prints the connection information, it's still closed at this point, will open later on. The token is already attached.
# $connection
#
# # Query that will be sent when the connection is open. I had a 4,000 record table and I was able to truncate with this script
# $query = "TRUNCATE TABLE TEST1"
#
# # Opens connection to Azure SQL Database and executes a query
# $connection.Open()
# # After this, the token is no longer there, I believe this is because the authentication went through already, so it gets rid of it.
# $connection
# $command = New-Object -Type System.Data.SqlClient.SqlCommand($query, $connection)
# $command.ExecuteNonQuery()
# $connection.Close()
#
#
# Client Secret:
#
#
# /KlFUYi9ZI0Jye0/ZY39w0uKARlZSIUHovkh6lYOjtY=
#
#
# Title:
#
#
# test
#
# App Domain:
#
#
# www.celerik.com
# Example: "www.contoso.com"
#
# Redirect URI:
#
#
# https://www.celerik.com
#
#
#
#
# $url = "https://login.microsoftonline.com/celerik/oauth2/token";
# $body = @{
# resource='https://celerik.sharepoint.com/sites/idlink'
# client_id='f768001f-0170-455a-a3da-cdd617efe34b'
# grant_type=code
# username={userName}
# password={password}
# }
# $json = $body | ConvertTo-Json
#
# ```
# +
urls = {'url_accidentalidad' : 'http://medata.gov.co/sites/default/files/medata_harvest_files/accidentalidad.csv',
'url_accidentalidad_georreferenciada_2019' : 'https://opendata.arcgis.com/datasets/e132c88e875b429db4c1a3c80b7977da_20.csv',
'url_accidentalidad_georreferenciada_2018' : 'https://opendata.arcgis.com/datasets/e4b4645cc47b462495305db0574982b5_16.csv',
'url_accidentalidad_georreferenciada_2017' : 'https://opendata.arcgis.com/datasets/b7f7158734f64e7f849df6ac4a6b10f1_12.csv',
'url_accidentalidad_georreferenciada_2016' : 'https://opendata.arcgis.com/datasets/a65c3aff0ef34973a2441b6cd0fbc24a_8.csv',
'url_accidentalidad_georreferenciada_2015' : 'https://opendata.arcgis.com/datasets/025f3be66fbd48b888b779cf00928ae8_4.csv',
'url_accidentalidad_georreferenciada_2014' : 'https://opendata.arcgis.com/datasets/505e89d2ade143a684d51b60236ba285_0.csv',
'url_accidentalidad_motos':'https://www.datos.gov.co/api/views/b7ik-2upt/rows.csv?accessType=DOWNLOAD',
'url_camaras_cctv' : 'https://opendata.arcgis.com/datasets/9721295ac3644bfbb076795b36912037_9.csv',
'url_camaras_fotodeteccion' : 'https://opendata.arcgis.com/datasets/bfcd5e4e3e364957b1aeeff0ee022815_7.csv',
'url_cruces_semafóricos' : 'https://opendata.arcgis.com/datasets/072ed4e620b849d6bdbb058c3cb77922_0.csv',
'url_limite_comunas' : 'https://opendata.arcgis.com/datasets/283d1d14584641c9971edbd2f695e502_6.csv',
'url_limite_barrios' : 'https://opendata.arcgis.com/datasets/1a6dbf15865b4357aa559699ea90c5b9_7.csv'}
for key in urls:
r =requests.get(urls[key], allow_redirects=True)
open(key.replace('url_','')+'.csv', 'wb').write(r.content)
# -
# ### Concatenate all the accidentalidad_georreferenciada files
# +
# ag_df: yearly file
ag_df = pd.DataFrame()
path = "./"
for file in os.listdir(path):
if 'accidentalidad_georreferenciada_' in file:
df_tmp = pd.read_csv(os.path.join(path, file))
df_tmp.columns = [c.lower() for c in df_tmp.columns]
df_tmp['file'] = file
ag_df = pd.concat([ag_df, df_tmp])
ag_df.to_csv("../raw/accidentalidad_georreferenciada.csv",index=False)
# -
# ### Check the columns
# # Delete 'radicados' duplicates from three sources:
# - accidentalidad.csv
# - accidentalidad_georreferenciada.csv
# - accidentalidad_motos.csv
#
# We found some duplicates values in "radicado" field so we procedure to drop them.
#
# +
ag_df = pd.read_csv("../raw/accidentalidad_georreferenciada.csv")
a_df = pd.read_csv('./accidentalidad.csv', skiprows=1,header=None,sep=';')
a_df.rename(columns={18:'fecha_hora',22: 'objectid', 23:'radicado'},inplace=True)
am_df = pd.read_csv('./accidentalidad_motos.csv')
am_df['moto']=1
# -
ag_df = ag_df.sort_values(by=["radicado", "objectid"])
ag_df.head(5)
ag_df[ag_df['radicado']=='62196']
ag_df.drop_duplicates(subset=["radicado"], keep="first", inplace=True)
ag_df[ag_df['radicado']=='62196']
ag_df['gravedad'].value_counts()
ag_df[ag_df['objectid']==507563]
df.clase = df.clase.apply(lambda x: unidecode(x).upper().strip().replace(' de ',' '))
from ast import literal_eval
[l.lower() for l in list(literal_eval(list(a_df.loc[a_df[0]=="1.0", 4][0:1])[0]).keys())]
a_df = a_df.sort_values(by=['radicado', 'objectid'])
a_df['radicado'].value_counts()
a_df.drop_duplicates(subset=['radicado'], keep="first", inplace=True)
am_df = am_df.sort_values(by=['NRO_RADICADO'])
am_df['NRO_RADICADO'].value_counts()
ag_df = ag_df[~ag_df.radicado.isna()]
a_df=a_df[~a_df.radicado.isna()]
dtype = dict(radicado=str)
ag_df.radicado = ag_df.radicado.astype(dtype)
a_df.radicado = a_df.radicado.astype(dtype)
ag_df.radicado = ag_df.radicado.str.strip()
ag_df.radicado = ag_df.radicado.str.replace("(\d+)(.0)*","\\1") #.dtype #.sort_values(by="radicado", ascending=False)
ag_df.radicado = ag_df.radicado.str.replace("^(0+)(\d+)", "\\2")
a_df.radicado = a_df.radicado.str.strip()
a_df.radicado = a_df.radicado.str.replace("(\d+)(.0)*","\\1")
am_df.drop_duplicates(subset=['NRO_RADICADO'], keep="first", inplace=True)
# Merge accidentes geo with accidentes, and motos
ag_df = ag_df.merge(a_df[['fecha_hora','radicado']],on='radicado',how='left')
# +
am_df.rename(columns={'NRO_RADICADO':'radicado'},inplace=True)
ag_df = ag_df.merge(am_df[['radicado','moto']],on='radicado',how='left')
# +
import numpy as np
ag_df['fecha'] = ag_df['fecha'].apply(lambda x : x.replace(' 00:00:00+00',''))
ag_df['fecha'] = pd.to_datetime(ag_df['fecha'],format='%Y/%m/%d')
ag_df[ag_df['fecha'].dt.year>2014]['moto'].fillna(0,inplace=True)
# -
ag_df[ag_df['radicado'].isin(am_df['radicado'])].groupby(pd.Grouper(key='fecha',freq='Y')).count()
# ## How did you merge the two DataFrames (aggregated data from 2014-2019 and accidentalidad.csv)
# + tags=[]
print(a_df.isna().any())
print(ag_df.isna().any())
print(am_df.isna().any())
# -
ag_df[ag_df.radicado.isna()] # check the nulls
ag_df.clase.value_counts(dropna=False)
ag_df.cbml.value_counts(dropna=False)
ag_df.radicado.value_counts()[0:21]
ag_df[ag_df.radicado==1638618.0]
a_df.radicado.value_counts(dropna=False)[0:30]
df[df.radicado=="1638618"]
df[df.radicado=="1638617"]
# We found some duplicates values in "radicado" field so we procedure to drop them.
df = df.sort_values(by=["radicado", "id"])
df.drop_duplicates(subset=["radicado"], keep="first", inplace=True)
# In order to have date and time in different columns we decided to derivate new columns from "fecha" and have separet columns for the date and the hour.
df["fecha_incidente"] = df.fecha.str[0:10]
df["hora_incidente"] = df.fecha.str[11:13]
#
# drop duplicates by "radicado", "objectid"
agg_df = agg_df.sort_values(by=["radicado", "objectid"])
agg_df.drop_duplicates(subset=["radicado"], keep="first", inplace=True)
agg_df = agg_df[(~agg_df.radicado.isna()) & (agg_df.radicado != "0")]
df.radicado
df[df.radicado.isna()] # check the number of nulls
agg_df = agg_df[~agg_df.radicado.isna()]
df=df[~df.radicado.isna()]
dtype = dict(radicado=str)
agg_df.radicado = agg_df.radicado.astype(dtype)
df.radicado = df.radicado.astype(dtype)
agg_df.radicado = agg_df.radicado.str.strip()
agg_df.radicado = agg_df.radicado.str.replace("(\d+)(.0)*","\\1") #.dtype #.sort_values(by="radicado", ascending=False)
agg_df.radicado = agg_df.radicado.str.replace("^(0+)(\d+)", "\\2")
df.radicado = df.radicado.str.strip()
df.radicado = df.radicado.str.replace("(\d+)(.0)*","\\1")
df.fecha.max()
df.shape
df
# r = df[(df.cbml.isna()) & (df.x_long_y_lat == '[-75.7037762763, 6.22141524356]') ][['x_long_y_lat','barrio','comuna','direccion','direccion_enc','cbml']]
# r
df.clase = df.clase.apply(lambda x: unidecode(x).lower().strip().replace(' de ',' '))
df.to_csv("../processed/preprocessed_accidents.csv",index=False)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv("FND_1")
data = pd.read_csv("FND_2")
df = df.append(data)
df.head(5)
df = df.drop(columns=["created_utc","Unnamed: 0","score","url","created","d_"])
# +
# df.
# -
df = df.dropna()
df.head(20)
df.shape
# +
import numpy as np
df = df.replace("[removed]",np.nan)
df = df.dropna()
print(len(df))
# +
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
# +
from sklearn.model_selection import train_test_split
X = df['selftext']
y = df['subreddit']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1000)
# -
X_train.shape, X_test.shape, y_train.shape, y_test.shape
model.fit(X_train, y_train)
labels = model.predict(X_test)
# +
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
labels_names = list(set(df['title']))
mat = confusion_matrix(y_test, labels)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=labels_names, yticklabels=labels_names)
plt.xlabel('true label')
plt.ylabel('predicted label');
# -
from sklearn.metrics import classification_report
y_true = y_test
y_pred = labels
target_names = labels_names
print(classification_report(y_true, y_pred, target_names=target_names))
# +
import nltk
from nltk.tokenize import word_tokenize
effect_types = []
for effect in df['Effects']:
#Get all the sentences
#print(effect)
#effect_types.append(effect)
sentence = word_tokenize(effect)
for word in sentence:
effect_types.append(word)
# -
effect_types = list(set(effect_types))
len(effect_types)
effect_types
# +
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
import spacy
from spacy.tokenizer import Tokenizer
tfidf = TfidfVectorizer(stop_words = 'english')
sparse = tfidf.fit_transform(X_train)
# send the matrix to a DataFrame
tfidf_dtm = pd.DataFrame(sparse.todense(), columns = tfidf.get_feature_names())
# Instantiate nearest neighbors model
nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')
nn.fit(tfidf_dtm)
# -
X_test[2268]
# +
# test
fake = pd.Series(X_test[2268])
# Transform
fake_sparse = tfidf.transform(fake)
# Densify
fake_tfidf = pd.DataFrame(fake_sparse.todense())
# The bottom array is the index of the recommendations
nn.kneighbors([fake_tfidf][0], n_neighbors=10)
# -
results = (nn.kneighbors([fake_tfidf][0], n_neighbors=10))
results[0][0][0]
import tqdm
from tqdm import tqdm
# +
classfications1 = []
classfications2 = []
classfications3 = []
classfications4 = []
classfications5 = []
for index, row in tqdm(df.iterrows()):
fake = pd.Series(row['Description'])
# Transform
fake_sparse = tfidf.transform(fake)
# Densify
fake_tfidf = pd.DataFrame(fake_sparse.todense())
# The bottom array is the index of the recommendations
results = nn.kneighbors([fake_tfidf][0], n_neighbors=10)
classfications1.append(results[0][0][0])
classfications2.append(results[0][0][1])
classfications3.append(results[0][0][2])
classfications4.append(results[0][0][3])
classfications5.append(results[0][0][4])
# -
df_class = pd.DataFrame()
df_class['classfications1'] = classfications1
df_class['classfications2'] = classfications2
df_class['classfications3'] = classfications3
df_class['classfications4'] = classfications4
df_class['classfications5'] = classfications5
df_class.head()
# +
multiple_class = []
df_class['multiple_class'] = [1 if row['classfications1'] == row['classfications2'] or row['classfications1'] == row['classfications3'] else 0 for index,row in df_class.iterrows()]
# -
df_class.head()
class_count = df_class['multiple_class'].value_counts()
class_count
# Export Pickle File
filename = 'knn_01.pkl'
pickle.dump(nn, open(filename, 'wb'))
# +
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
vect = TfidfVectorizer(stop_words='english')
rfc = RandomForestClassifier()
pipe = Pipeline([
#Vectorizer
('vect', vect),
# Classifier
('clf', rfc)
])
# +
# parameters = {
# 'vect__max_df': ( 0.75, 1.0),
# 'vect__min_df': (.02, .05),
# 'vect__max_features': (500,1000),
# 'clf__n_estimators':(5, 10,),
# 'clf__max_depth':(15,20)
# }
# grid_search = GridSearchCV(pipe,parameters, cv=5, n_jobs=-1, verbose=1)
# grid_search.fit(df['selftext'], df['subreddit'])
# +
# grid_search.best_score_
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="dPhcz7TKdVe1"
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="DsJ5Bkcuk2xV" outputId="83c5dd48-b4eb-430a-8fe4-f0fc27fc6899"
names = ["Registration Number", "Badge", "Superbadge", "Department", "Batch", "Examly status now"]
df = pd.read_excel("/content/drive/MyDrive/student-list_12-Mar-2022 08_19-12-Mar-2022 08_19.xlsx", names=names)
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="_Vkz207TCkgn" outputId="c3be9f5b-6f3d-4ea4-c887-2fc66523714a"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="vjo0fE08DQ_6" outputId="c28baf41-78e3-4774-a943-c85bd67bee68"
df.shape
# + colab={"base_uri": "https://localhost:8080/"} id="uzvyG2yAIFiz" outputId="7b93345f-bc5c-4ad1-c0c2-b9895ac448ba"
df.columns
# + id="L_Bib4X4H_7e"
X = df.drop(columns=['Registration Number', 'Department', 'Batch', 'Examly status now'], axis=1)
Y = df['Department']
# + id="iJrGpDeXJkSB"
print(X)
print(Y)
# + id="aWrbVRnoE7iC"
params = [{'C': [0.0001,0.001,0.01,0.1,1,10,100,1000,10000]}]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, shuffle = False)
# + colab={"base_uri": "https://localhost:8080/"} id="ADbqgK_ZKd1W" outputId="2b4250c6-d8e6-40e0-f2d0-5982a76ab41c"
print(X.shape, X_train.shape, X_test.shape)
# + id="EL2-X9UgKsEQ"
model = GridSearchCV(LogisticRegression(), params, scoring = 'accuracy')
model.fit(X_train, Y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="z_opYFy8NXKT" outputId="9322dd3a-e7a3-4a50-8d2c-3befcf336c99"
print(model.best_estimator_)
# + colab={"base_uri": "https://localhost:8080/"} id="43c623FENkOx" outputId="d8ac7c34-eebb-4070-ae7b-9dfc586a4cd4"
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print('The accuracy of training data is:', training_data_accuracy)
# + [markdown] id="yjpoRWcTSKTS"
# following code throws error
# + id="wCJr4NRxR9TM"
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, Y_test)
print('The accuracy of testing data is:', test_data_accuracy)
# + [markdown] id="lzyGjlEbRgbp"
# Yatharth's version
# + colab={"base_uri": "https://localhost:8080/"} id="Src0T-vlqM5T" outputId="3337f520-480b-459a-e7fd-86f4b98319a1"
print(type(df["Badge"][1]))
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="glUCNi6ynLEX" outputId="27b999a5-7733-49f4-edce-a00a83d70e1c"
for i in range(len(df)):
plt.scatter(x = df["Badge"][i], y = df["Superbadge"][i])
plt.grid()
# + id="Jbmwi58xldZs"
# Assign values to the X and y variables:
X = df.iloc[2:, :-1].values
y = df.iloc[:, 4].values
# Split dataset into random train and test subsets:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# Standardize features by removing mean and scaling to unit variance:
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Use the KNN classifier to fit data:
classifier = KNeighborsClassifier(n_neighbors=5)
classifier.fit(X_train, y_train)
# Predict y data with classifier:
y_predict = classifier.predict(X_test)
# Print results:
print(confusion_matrix(y_test, y_predict))
print(classification_report(y_test, y_predict))
# + colab={"base_uri": "https://localhost:8080/"} id="m8fQcqzYfMXE" outputId="04bd4767-d1f4-4208-e66d-070cb3162459"
url = "https://raw.githubusercontent.com/BollywoodData/Bollywood-Data/master/wikipedia-data/female_mentions_centrality.csv"
import requests
import io
download = requests.get(url).content
# Reading the downloaded content and turning it into a pandas dataframe
df = pd.read_csv(io.StringIO(download.decode('utf-8')))
# Printing out the first 5 rows of the dataframe
sorted_by_avg = df.sort_values(by=" TOTAL CENTRALITY", ascending=False)
# print (df.head())
# print(df.columns)
print(sorted_by_avg)
# + colab={"base_uri": "https://localhost:8080/"} id="QOzbglmnifka" outputId="643007f5-357b-48b1-8319-eb05525cb3ca"
print(df.columns)
new_df = df.filter([' MENTIONS', ' TOTAL CENTRALITY', 'COUNT'])
new_df.dropna(inplace=True)
indices_to_keep = ~new_df.isin([np.nan, np.inf, -np.inf]).any(1)
# + colab={"base_uri": "https://localhost:8080/"} id="Mzwek6GuiC2s" outputId="7ecb58f3-fc8c-4fa2-ed8a-4b679c581ecb"
from sklearn.cluster import KMeans
km = KMeans(n_clusters=10)
km.fit(new_df)
clusters = km.cluster_centers_
print(f"The cluster centres are {clusters}")
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import Binarizer, LabelEncoder, OneHotEncoder, MinMaxScaler
from sklearn.cluster import KMeans
import seaborn as sns
from sklearn.mixture import GaussianMixture as GMM
# ## Gaussian Mixture Modeling
# Overcomes the limitations of k-means clustering by making soft assignments of the data points. A Gaussian mixture will almost always fit better than k-means: the clustering will mimic the data cloud better and with a smaller k. k-means is useful primarily because it’s very fast, so might be more easily fit to very large data sets with largish dimensions.
data = pd.read_csv('Wine2.csv')
# +
## scale the data
mms = MinMaxScaler()
data.loc[:, data.columns != 'class'] = mms.fit_transform(data.loc[:, data.columns != 'class'])
# -
kmeans = KMeans(n_clusters= 3)
kmeans.fit(data.drop('class',axis=1))
# Create a colormap
plt.scatter(data['Alcohol'], data['Malic_acid'], c= data['class'], s=40)
plt.title('Actual Classification')
plt.show()
gmm = GMM(n_components=3).fit(data.drop('class',axis=1))
labels = gmm.predict(data.drop('class',axis=1))
plt.scatter(data['Alcohol'], data['Malic_acid'], c=labels, s=40)
plt.title('GMM Classification')
plt.show()
pd.crosstab(data['class'], labels, rownames=['Actual Result'], colnames=['Predicted Result'])
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PYTHON com API de LOGIN e envio de SMS
#
# ### Para envio de SMS precisaremos de LOGIN para usar a api
#
# Requisitos:
#
# 1. ID da conta
# 2. Token
# 3. Número de Envio
#
# - API https://www.twilio.com/docs/libraries/python
# +
from twilio.rest import Client
# SID daminha conta TWILIO em twilio.com/console
account_sid = '***'
#Token de autenticação da minha conta Twilio
auth_token = '****'
client = Client(account_sid, auth_token)
message = client.messages.create(to='+***', from_='+12565675179', body='Testando envio de SMS via twilio.')
print(message.sid)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Addition of Two Sample Objects
# +
# %reset -f
# Operator Overloading that return int as sum of args
class Sample:
'''This is class of Sample'''
def __init__(self, a, b):
self.real = a
self.img = b
def __add__(self, e):
return self.real + self.img + e.num
class Sample2:
'''this is class of sample2'''
def __init__(self, a):
self.num = a
s1 = Sample(20, 120)
s2 = Sample2(60)
print(s1+s2)
type(s1+s2)
# +
# %reset -f
# Operator Overloading that integer as sum of args
class Sample:
'''This is class of Sample'''
def __init__(self, a, b):
self.real = a
self.img = b
def __add__(self, e):
return Sample(self.real + e.img, self.img+e.img)
def __str__(self):
return f"Number : {self.real} + {self.img}j"
s1 = Sample(20, 120)
s2 = Sample(60, 60)
print(s1+s2)
s3 = s1 + s2
type(s3)
# -
# # Operator Overloading
# +
# Arithmatic between two objects
class Arithmatic:
'''This is class of Sample''' # Arithmatic.__doc__
def __init__(self, a):
self.real = a
def __add__(self, e):
return self.real + e.real
def __sub__(self, e):
return self.real - e.real
def __mul__(self, e):
return self.real * e.real
def __truediv__(self, e):
return self.real / e.real
def __floordiv__(self, e):
return self.real / e.real
def __mod__(self, e):
return self.real / e.real
def __xor__(self, integer):
return self.real**integer.real
# unary operator
def __neg__(self):
return self.real-1
def __pos__(self):
return self.real+1
def __invert__(self):
return self.real*-1
E1 = Arithmatic(140)
E2 = Arithmatic(50)
E3 = Arithmatic(2)
print("\nBinary Operator := ")
print(E1 + E2)
print(E1 - E2)
print(E1 * E2)
print(E1 / E2)
print(E1 // E2)
print(E1 % E2)
print(E1 ^ 2)
print("\nUnary Operator := ")
print(-E1)
print(+E1)
print(~E1)
# +
# Arithmatic objects and real numbers
class Arithmatic:
'''This is class of Sample''' # Arithmatic.__doc__ # e.__doc__
def __init__(self, a):
self.real = a
def __add__(self, e):
return self.real + e
def __sub__(self, e):
return self.real - e
def __mul__(self, e):
return self.real * e
def __truediv__(self, e):
return self.real / e
def __floordiv__(self, e):
return self.real / e
def __mod__(self, e):
return self.real / e
def __xor__(self, e):
return self.real**e
# unary operator
def __neg__(self):
return self.real-1
def __pos__(self):
return self.real+1
def __invert__(self):
return self.real*-1
E1 = Arithmatic(140)
print("\nBinary Operator := ")
print(E1 + 5)
print(E1 - 5)
print(E1 * 5)
print(E1 / 5)
print(E1 // 5)
print(E1 % 5)
print(E1 ^ 2)
print("\nUnary Operator := ")
print(-E1)
print(+E1)
print(~E1)
# -
# # Overloading extended assignment operator (+=) in Python
# +
class Arithmatic:
def __init__(self, real):
self.real = real
def __str__(self):
return f"Number: ({self.real})"
def __iadd__(self, e):
self.real += e.real
return Arithmatic(self.real)
def __isub__(self, e):
self.real -= e.real
return Arithmatic(self.real)
def __imul__(self, e):
self.real *= e.real
return Arithmatic(self.real)
def __itruediv__(self, e):
self.real /= e.real
return Arithmatic(self.real)
def __ifloordiv__(self, e):
self.real //= e.real
return Arithmatic(self.real)
def __imod__(self, e):
self.real %= e.real
return Arithmatic(self.real)
def __ixor__(self, e):
self.real **= e.real
return Arithmatic(self.real)
E1, E2 = Arithmatic(90), Arithmatic(50)
E1 += E2
print(E1)
E1, E2 = Arithmatic(90), Arithmatic(50)
E1 -= E2
print(E1)
E1, E2 = Arithmatic(90), Arithmatic(50)
E1 *= E2
print(E1)
E1, E2 = Arithmatic(90), Arithmatic(50)
E1 /= E2
print(E1)
E1, E2 = Arithmatic(90), Arithmatic(50)
E1 //= E2
print(E1)
E1, E2 = Arithmatic(90), Arithmatic(50)
E1 %= E2
print(E1)
E1, E2 = Arithmatic(90), Arithmatic(50)
E1 ^= E2
print(E1)
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="NGV-JwlmtXKG"
# ## **Regressão Logística no conjunto de dados de dígitos**
# + [markdown] id="8dCGyl12tiQV"
# **Carregando os Dados**
# + id="sB3qEGUzsMUJ"
from sklearn.datasets import load_digits
digits = load_digits()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 421, "status": "ok", "timestamp": 1631210693443, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="Zf3o-fn-t4hF" outputId="47edc57e-acf5-4e89-cdda-773ab0ada9b4"
# Mostra que existem 1797 imagens (8 por 8 imagens para uma dimensionalidade de 64)
print("Image Data shape", digits.data.shape)
# Mostra que há 1797 rótulos ( inteiros de 0 a 9)
print("Label Data Shape", digits.target.shape)
# + [markdown] id="cNAYx9javAhg"
# **Mostrando as imagens e rótulos (conjunto de dados e dígitos)**
# + colab={"base_uri": "https://localhost:8080/", "height": 163} executionInfo={"elapsed": 1690, "status": "ok", "timestamp": 1631211172809, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="HMXdeg4SuUtf" outputId="bee5034d-7579-491e-ed78-829f1830a4b0"
# Esta seçao é apenas para mostrar o tipo de imagens e rótulos. Isso ajuda na visualização dos dados e mostra com o que estamos trabalhando
import numpy as np
import matplotlib.pyplot as plt
plt.figure (figsize = (20,4))
for index, (image, label) in enumerate (zip(digits.data[0:5], digits.target[0:5])):
plt.subplot(1,5, index + 1)
plt.imshow(np.reshape(image, (8,8)), cmap=plt.cm.gray)
plt.title('Training: %i\n' % label, fontsize = 20)
# + [markdown] id="uIwSp0CLx2cf"
# **Dividindo os dados em conjuntos de treino e teste(cojuntos de dígitos)**
# + [markdown] id="0cXn4C_pzpMm"
# *A criação de conjuntos de treino e teste possibilitam que o treinamento do modelo de classificação generalize bem quando trabalhar com outros dados*
# + id="OTGeEQYAwScq"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.25, random_state=0)
# + [markdown] id="yGEuEeR1z7-9"
# **Padrão de modelagem de 4 etapas Scikit-learn (conjunto de dados de dígitos)**
# + id="W7J2_2KgyW19"
# Passo 1 - Importanto o modelo que iremos usar
from sklearn.linear_model import LogisticRegression
# + id="ELKLdjGf0ZL-"
# Passo 2 - Instanciando o modelo
# Todos os parâmetros não especificados são definidos por default
logisticRegr = LogisticRegression()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 653, "status": "ok", "timestamp": 1631212512519, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="3wcVprZg1V63" outputId="9cbc2123-6b5a-42d9-923e-2fcdceebaff8"
# Passo 3 - Treinar o modelo nos dados, armazenando as informações aprendidas com os dados
# O modelo está aprendendo a relaçao entre 'x_train' e os rótulos de 'y_train
logisticRegr.fit(x_train, y_train)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 426, "status": "ok", "timestamp": 1631212950168, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="zlCVw_IN16Bb" outputId="c0d3a5bf-f718-494d-999b-02f405827661"
# Passo 4 - Prever rótulos para novos dados (novas imagens) usando as informações que o madelo aprendeu durante o processo de treinamento
# Retorna um array numpy
# Predict para uma observação (imagem)
logisticRegr.predict(x_test [0].reshape(1, -1))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 418, "status": "ok", "timestamp": 1631213029696, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="DLXN2kta3Vhd" outputId="4f716b98-57c9-4a73-ab2f-f1cf5da69151"
# Prevendo múltiplas observações (imagens) de uma só vez
logisticRegr.predict(x_test [0:10])
# + id="mx15JzX034WE"
# Fazendo previsões em todos os dados de teste
predictions = logisticRegr.predict(x_test)
# + [markdown] id="fFhSDmFh4Z-J"
# **Medindo o desempenho do modelo (conjunto de dados de dígitos)**
# + [markdown] id="o5E4lQ3m5HBz"
# *Embora existam outras maneiras de medir o desempenho do modelo (precisão, recall, pontuação F1, curva ROC , etc), vamos manter isso simples e usar a precisão como nossa métrica.
# Para fazer isso, veremos o desempenho do modelo nos novos dados (conjunto de teste)
# a precisão é definida como:
# (fração de previsões corretas): previsões corretas / número total de pontos de dados*
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 418, "status": "ok", "timestamp": 1631213553653, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="IOlE8diy4Vrm" outputId="cb3eba9c-3f43-4825-8007-82375aa5cec3"
# Usando o método de score para obter a precisão/acurácia do modelo
score = logisticRegr.score(x_test, y_test)
print (score)
# + [markdown] id="25275Kwa6AmC"
# **Matriz de Confusão (conjuto de dados de dígitos)**
# + [markdown] id="wBws2BvH6u7_"
# *Uma matriz de confusão é uma tabela frequentemente usada para descrever o desempenho de um modelo de classificação (ou “classificador”) em um conjunto de dados de teste para os quais os verdadeiros valores são conhecidos. Nesta seção,são mostrados apenas dois pacotes python (Seaborn e Matplotlib) para tornar as matrizes de confusão mais compreensíveis e visualmente atraentes.*
# + id="AgtSErVd6mkS"
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
# + [markdown] id="dBYYiDXs7Wiq"
# *A matriz de confusão abaixo não é visualmente super informativa ou visualmente atraente.*
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1245, "status": "ok", "timestamp": 1631213991984, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="MCPwZ9RF6Pt5" outputId="eaa52b8f-afa5-4f7c-c07e-55be09fd5575"
cm = metrics.confusion_matrix(y_test, predictions)
print(cm)
# + [markdown] id="OXNBn9zu7vz6"
# **Método 1 (Seaborn)**
# + [markdown] id="nknBtnsI79En"
# *Esse método produz uma matriz de confusão mais compreensível e visualmente legível usando o seaborn .*
# + colab={"base_uri": "https://localhost:8080/", "height": 543} executionInfo={"elapsed": 1158, "status": "ok", "timestamp": 1631215401824, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="leTWi6-27jE9" outputId="406e79ae-bb1a-43c6-da91-2b2225298fdb"
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Rótulo atual');
plt.xlabel('Rótulo previsto');
all_sample_title = 'Accuracy Score: {0}'. format(score)
plt.title(all_sample_title, size = 15)
# + [markdown] id="MWucoPcu90Ss"
# **Método 2 (Matplotlib)**
# + [markdown] id="YHDZxpiX-Dxw"
# *O método 2 é para mostrar que pode ser feito o passo anterior como Matplotlib, no entanto a quantidade de código é maior*
# + colab={"base_uri": "https://localhost:8080/", "height": 657} executionInfo={"elapsed": 1592, "status": "ok", "timestamp": 1631230168917, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="Z0gHbSy38-rk" outputId="2f50fc75-ea65-439b-81c5-42e8b3841b8d"
plt.figure(figsize=(9,9))
plt.imshow(cm, interpolation='nearest', cmap='Pastel1')
plt.title('Matriz de Confusão', size=15)
plt.colorbar()
tick_marks = np.arange(10)
plt.xticks(tick_marks, ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], rotation=45, size=10)
plt.yticks(tick_marks, ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], rotation=45, size=10)
plt.tight_layout()
plt.ylabel('Rótulo atual', size=15)
plt.xlabel('Rótulo previsto', size = 15)
width, height = cm.shape
for x in range (width):
for y in range (height):
plt.annotate(str(cm[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
# + [markdown] id="VbXf4JhtGDVp"
# ## **Regressão Logística (conjunto de dados MNIST_784)**
# + [markdown] id="cZ33gt78J56W"
# *O dataset MNIST não vem com o scikit-lean, portanto é necessário importar do fetch_openml*
# + id="3ZJvZDZp-nhB"
# Carregando o conjunto de dados
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 440, "status": "ok", "timestamp": 1631218158804, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="1rs4qWrWHAk6" outputId="a5ea3bcf-4a31-4c38-c898-7e210e7d2b44"
# Exibindo imagens
# Existem 70.000 imagens(28 por 28 imagens para uma dimensionalidade de 784)
print(mnist.data.shape)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 642, "status": "ok", "timestamp": 1631218120283, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="5hazU-6KLCUq" outputId="b0c274a9-0651-4b58-d1f6-5aa7b2a0cfda"
# Exibindo as etiquetas
print(mnist.target.shape)
# + [markdown] id="nyxoUE-TLgyj"
# ## **Dividindo o dataset (MNIST_784) em conjunto de treino e teste**
# + id="9Tqe9o26LTG_"
# Esse test_size=1/7.0 cria o tamanho do conjunto de treinamento de 60.000 imagens e o tamanho do conjunto de teste de 10.000.
from sklearn.model_selection import train_test_split
train_img, test_img, train_lbl, test_lbl = train_test_split(
mnist.data, mnist.target, test_size = 1/7.0, random_state = 0)
# + [markdown] id="uAz-dns-M9uS"
# **Exibindo imagens e rótulos (MNIST_874)**
# + colab={"base_uri": "https://localhost:8080/", "height": 162} executionInfo={"elapsed": 1107, "status": "ok", "timestamp": 1631219628570, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="9VPtUiARMCIt" outputId="437455cb-a39a-4f33-dfdc-2fac09d89b84"
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(20,4))
for index, (image, label) in enumerate (zip(train_img[0:5], train_lbl[0:5])):
plt.subplot(1,5, index + 1)
plt.imshow(np.reshape(image, (28,28)), cmap=plt.cm.gray)
plt.title('Treinamento: %s\n' % label, fontsize = 20)
# + [markdown] id="mRSBlUWzRisp"
# **Padrão de modelagem de 4 etapas Scikit-learn (MNIST_784)**
# + id="aYjfGroXOmFU"
# Passo 1 - Importanto o modelo que queremos usar
# No sklearn, todos os modelos de aprendizado de máquina são implementados como classes Python
from sklearn.linear_model import LogisticRegression
# + id="7qz3en4pSaUa"
# Passo 2 - Instanciando o modelo
logisticRegr = LogisticRegression (solver = 'lbfgs')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 47123, "status": "ok", "timestamp": 1631220714932, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="7r46WMyRUgci" outputId="ad5dcba5-9940-461c-85f1-bc56a95f4919"
# Passo 3 - Treinando o modelo nos dados, armazenando as informações aprendidas com eles
# O modelo está aprendendo a realação entre x(dígitos) e y(rótulos)
logisticRegr.fit(train_img, train_lbl)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 423, "status": "ok", "timestamp": 1631221339842, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="93lcCO-GVBOF" outputId="63ac0f7b-591f-4e13-899e-dddaec5bb0e6"
# Passo 4 - prevendo rótulos de novos dados (novas imagens) usando as informações que o modelo aprendeu durante o processo de treinamento do modelo
# Retorna uma matriz Numpy
# Predição para uma observação (uma imagem)
logisticRegr.predict(test_img[0].reshape(1,-1))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 15, "status": "ok", "timestamp": 1631221440979, "user": {"displayName": "Leidiana Iza Andrade Freitas", "photoUrl": "", "userId": "01064954486875378846"}, "user_tz": 180} id="iB32iiWqXlMM" outputId="0e2c4854-f6a9-42a2-c8b0-8951828db244"
# Predição para múltiplas observações (várias imagens) de uma vez
logisticRegr.predict(test_img[0:10])
# + id="W2PG1Aj-X3-X"
#
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import checklist
from checklist.editor import Editor
import random
editor = Editor()
print (editor.lexicons.keys())
# +
# As per requirement we can add more lexicons to checklist as shown below
editor.lexicons['profession'] = ['writer', 'author', 'engineer', 'teacher', 'physician', 'accountant',
'architect', 'lawyer', 'electrician', 'librarian', 'cook', 'secretary',
'dentist', 'dietitian', 'mechanic', 'chef', 'actor', 'plumber', 'firefighter',
'aviator', 'designer', 'police officer', 'journalist', 'hairdresser', 'artist',
'economist', 'tailor', 'butler', 'farmer', 'judge', 'lecturer', 'professor',
'model', 'nurse', 'photographer', 'scientist']
# The 'first_name' lexicon contains 200 names. For illustration let's just use 20
editor.lexicons['name'] = editor.lexicons['first_name'][:20]
# +
# example of ordered resolution (boolean) template with label entailment
out = editor.template('P: {name1} and {name2} are {a:profession1} and {a:profession2} respectively H: {name1} is {a:profession1}')
# editor.template creates a cross product of all choices for placeholders. Let's sample 10 examples from this
random.shuffle(out.data)
examples = out.data[:10]
for i in range(len(examples)):
print ('entailment ' + examples[i])
# similarly the above template can be modified to create contradiction
out = editor.template('P: {name1} and {name2} are {a:profession1} and {a:profession2} respectively H: {name1} is {a:profession2}')
random.shuffle(out.data)
examples = out.data[:10]
for i in range(len(examples)):
print ('contradiction ' + examples[i])
# +
# It is not necessary to use the editor.template construct. One can simply create templates shown below
data = []
for i in range(10):
name1, name2 = random.sample(editor.lexicons['name'], 2)
n1, n2 = random.sample(range(1,21), 2)
if n1 < n2:
data.append('P: {:s} has {:d} coins. {:s} has {:d} coins. H: {:s} has more coins than {:s}'.format(name1, n1, name2, n2, name2, name1))
else:
data.append('P: {:s} has {:d} coins. {:s} has {:d} coins. H: {:s} has more coins than {:s}'.format(name1, n1, name2, n2, name1, name2))
for i in range(len(data)):
print ('entailment ' + data[i])
# +
editor.lexicons['tuple'] = [
('smart','smarter','smartest'),
('pretty','prettier','prettiest'),
('tall','taller','tallest'),
('generous', 'more generous', 'most generous'),
]
data = []
for i in range(10):
name1, name2, name3 = random.sample(editor.lexicons['name'], 3)
(adj, com_adj, sup_adj) = random.choice(editor.lexicons['tuple'])
data.append('P: {:s} is {:s} than {:s}. {:s} is {:s} than {:s}. H: {:s} is {:s} than {:s}'.format(name1, com_adj, name2, name2, com_adj, name3, name1, com_adj, name3))
for i in range(len(data)):
print ('entailment ' + data[i])
data = []
for i in range(10):
name1, name2, name3 = random.sample(editor.lexicons['name'], 3)
(adj, com_adj, sup_adj) = random.choice(editor.lexicons['tuple'])
data.append('P: {:s} is {:s} than {:s}. {:s} is {:s} than {:s}. H: {:s} is {:s} than {:s}'.format(name1, com_adj, name2, name1, com_adj, name3, name2, com_adj, name3))
for i in range(len(data)):
print ('neutral ' + data[i])
data = []
for i in range(10):
name1, name2, name3 = random.sample(editor.lexicons['name'], 3)
(adj, com_adj, sup_adj) = random.choice(editor.lexicons['tuple'])
data.append('P: Among {:s}, {:s} and {:s}, {:s} is the {:s}. H: {:s} is {:s} than {:s}'.format(name1, name2, name3, name1, sup_adj, name1, com_adj, name3))
for i in range(len(data)):
print ('entailment ' + data[i])
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:lipoenv]
# language: python
# name: conda-env-lipoenv-py
# ---
# # Proposal
# ## 1. Executive Summary
# Subclinical lipohypertrophy is traditionally evaluated based on visual inspection or palpation. However, recent work has shown that lipohypertrophy may be detected by ultrasound imaging. The criteria used to classify lipohypertrophy using ultrasound imaging is only familiar to and implemented by a small group of physicians. In an effort to improve the accessibility and efficiency of this method of detection, our capstone partner has asked us to explore the possibility of using supervised machine learning to detect lipohypertrophy on ultrasound images.
#
# In this project, we will be creating a convolutional neural network to detect the presence of lipohypertrophy in ultrasound images. We will be testing a variety of image preprocessing and transfer learning methodologies to select our finalized machine learning pipeline. Our proposed data product is a Python application that can intake new ultrasound images and make accurate predictions on the presence or absence of lipohypertrophy.
# <br>
# ## 2. Introduction
# Lipohypertrophy is a common complication for diabetic patients who inject insulin {cite}`kapeluto2018ultrasound`. It is defined as the growth of fat cells and fibrous tissue with lowered vascularity in the skin following repeated trauma of insulin injection in the same area. Our focus is on subclinical hypertrophy which forms in the subcutaneous layer (the deepest layer of the skin) {cite}`boundless`. It is critical that insulin is not injected into areas of lipohypertrophy as it reduces the effectiveness of the insulin such that patients are unable to manage their blood sugar levels and may require more insulin to achieve the same therapeutic benefits {cite}`kapeluto2018ultrasound`. Fortunately, research by Kapeluto et al. (2018) {cite}`kapeluto2018ultrasound` has found ultrasound imaging techniques are more accurate in finding these masses than a physical examination of the body by a healthcare professional. But, currently, the criteria to classify lipohypertrophy using ultrasound imaging is only implemented by a small group of physicians {cite}`madden_2021`. To expand the usability of this criteria to a larger set of healthcare professionals, the capstone partner is interested in seeing if we could leverage supervised machine learning techniques to accurately classify the presence of lipohypertrophy given an ultrasound image.
# ### 2.1 Anticipated Data Product
# Our objective as the data science team is to build a binary classification convolutional neural network (CNN) model that classifies an ultrasound image into two classes, lipohypertrophy present or lipohypertrophy is not present. A CNN model is a common technique used to classify image data into binary categories. A secondary objective of this work is to utilize object detection techniques to classify the location of hypertrophy, if present, on an ultrasound image.
#
#
# There are three identified levels of technical abstraction for the final data product:
#
# 1. The first level of abstraction and the minimum viable product for this capstone project is a well-documented and reproducible machine learning pipeline. The source code from this analysis acts as a proof-of-concept for using a supervised learning approach to address the problem of detecting subclinical lipohypertrophy.
#
# 2. The second level of abstraction includes deploying the model to a cloud-based service and creating an interface where users will be able to interact with it to obtain predictions on unseen data.
#
# 3. The final level of abstraction is creating a python application using tools such as `pyinstaller`. This would allow healthcare workers to easily interact with our final data product without any prior knowledge of programming.
# <br>
# ## 3. Data Science Techniques
# A previous study {cite}`kapeluto2018ultrasound` classified ultrasound images as containing hypertrophy ("positive") or not containing lipohypertrophy ("negative") based on expert knowledge and objectively-developed criteria. These images and mappings will be used to develop the CNN. Efforts were made to crop the images to discard their borders and other annotated information, keeping only the core ultrasound image.
# *Code adapted from {cite}`barzinm_2016,shoeb3_2018`*
# + tags=["hide-input"]
from matplotlib.pyplot import figure, imshow, axis, figtext
from matplotlib.image import imread
files = {"Negative": 'lipo_negative.png', "Positive": 'lipo_positive.png'}
fig = figure(figsize=(16, 12))
for i, key in enumerate(list(files.keys())):
a=fig.add_subplot(1,len(files),i+1)
a.set_title(key)
image = imread(files[key])
imshow(image)
axis('off')
txt="Figure 1: Some examples of ultrasound images. The lipohypertrophy is characterized with \n the filled light grey circle on the positive image."
figtext(0.5, 0.20, txt, horizontalalignment='center', fontsize=16)
# -
# The processed data consists of 263 images, a scarce yet fairly balanced dataset. The counts and proportion of positive and negative observations are shown in the table below.
#
# | | Positive | Negative |
# | --- | --- | --- |
# | Count | 135 | 128 |
# | Proportion | 51% | 49% |
#
# Due to the relatively small size of the dataset, data augmentation (the process of creating additional samples as augmented versions of original samples) was performed, using techniques such as randomly flipping and/or rotating the images. Data augmentation can help expand the data available to train a model and hence improve model performance. We plan on further investigating additional data augmentation techniques such as contrast and brightness adjustment, scaling, and shifting.
#
# Furthermore, we have created a Python script to split the data into train, validation, and test sets. This is standard practice when developing machine learning models and facilitates data reading into PyTorch and allows the team to have holdout samples to validate model results.
#
# The processed data will be used to train a CNN model using a technique called transfer learning. Transfer learning is the process of applying an existing pre-trained model to a new dataset, and is common practice in deep learning. A popular choice in deep learning applications is the *densenet* model which will serve as our baseline model.
#
# In our preliminary application, the baseline model is trained on the pre-processed data, yielding a validation accuracy score of 82% after 20 epochs (i.e. 20 passes of the entire training dataset). We further explored the baseline model’s performance by examining the feature importances on several observations. Below is an example of a positive observation (i.e. lipohypertrophy present). The highlighted areas are areas that the model believes are significant for deriving the prediction. We can observe that the model is highlighting areas where lipohypertrophy is located.
# + tags=["hide-input"]
fig = figure(figsize=(10, 8))
a=fig.add_subplot(1,1,1)
image = imread("proposal_feature_importance.PNG")
imshow(image)
axis('off')
txt = "Figure 2: Feature Importance. The lighter orange and yellow areas on the right represent areas that \n the baseline model thinks are important for detecting lipohypertrophy."
_ = figtext(0.5, 0.10, txt, horizontalalignment='center', fontsize=14)
# -
# To evaluate the CNN model's performance, we will consider both accuracy and recall scores. Recall is a ratio of true positives to false negatives with a higher score reflecting less false negatives which is of interest here. False negatives are critical to avoid from the healthcare providers’ perspective as administering insulin in a region that the model detected did not have lipohypertrophy when in fact there is lipohypertrophy present would be detrimental in managing diabetes.
#
# Our review of the literature flagged other popular deep learning architectures that have been successful across a wide range of problems. As a second approach, we plan on utilizing the *VGG* architecture, proposed by Karen Simonyan and Andrew Zisserman in the paper "Very Deep Convolutional Networks for Large-Scale Image Recognition" {cite}`simonyan2014very`. The *VGG* architecture has proven successful in applications with small images and combats the large memory requirement of the *densemodels* architecture. Our research also demonstrated that a *VGG* pre-trained CNN model is slow to train as its learned weights are rather large. Therefore, we plan on exploring *Inception*, proposed in the paper "Going Deeper with Convolutions" {cite}`szegedy2015going`. We plan on modeling the performance of a variety of tuned CNN architectures against the baseline model to determine the most appropriate architecture.
# <br>
# ## 4. Timeline
# We have divided our project up into 3 phases.
#
# 1. In Phase 1 (weeks 1 to 3), we aim to configure the most appropriate image pre-processing and CNN architectures by testing a variety of transfer learning techniques.
#
# 2. In Phase 2 (weeks 4 to 6), we aim to implement higher levels of sophistication to our data product to improve its accessibility to a non-technical audience.
#
# 3. In Phase 3 (weeks 6 to 8), we aim to complete our final data product, final report, and final presentation and add additional functionality such as object detection if time permits.
#
# 
#
# <br>
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# This notebook presents **Convolutional Neural Network** applied to [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset.
# **Contents**
#
# * [CIFAR-10 Dataset](#CIFAR-10-Dataset) - load and preprocess dataset
# * [Keras Model](#Keras-Model) - make and train model
# # Imports
import numpy as np
import matplotlib.pyplot as plt
# Limit TensorFlow GPU memory usage
#
# +
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config):
pass # init sessin with allow_growth
# -
# # CIFAR-10 Dataset
# Load dataset and show example images
(x_train_raw, y_train_raw), (x_test_raw, y_test_raw) = tf.keras.datasets.cifar10.load_data()
class2txt = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# Show example images
fig, axes = plt.subplots(nrows=1, ncols=6, figsize=[16, 9])
for i in range(len(axes)):
axes[i].set_title(class2txt[y_train_raw[i, 0]])
axes[i].imshow(x_train_raw[i])
# Normalize features
x_train = (x_train_raw - x_train_raw.mean()) / x_train_raw.std()
x_test = (x_test_raw - x_train_raw.mean()) / x_train_raw.std()
print('x_train.shape', x_train.shape)
print('x_test.shape', x_test.shape)
# One-hot encode labels
y_train = tf.keras.utils.to_categorical(y_train_raw, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test_raw, num_classes=10)
print('y_train.shape', y_train.shape)
print(y_train[:3])
# # Keras Model
from tensorflow.keras.layers import Input, InputLayer, Conv2D, MaxPooling2D, Activation, Flatten, Dense, Dropout
# *Option #1:* define ConvNet model using **Keras Sequential API**
# +
# model = tf.keras.Sequential()
# model.add(InputLayer(input_shape=[32, 32, 3]))
# model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='elu'))
# model.add(MaxPooling2D(pool_size=[2,2], strides=[2, 2], padding='same'))
# model.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='elu'))
# model.add(MaxPooling2D(pool_size=[2,2], strides=[2, 2], padding='same'))
# model.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='elu'))
# model.add(MaxPooling2D(pool_size=[2,2], strides=[2, 2], padding='same'))
# model.add(Flatten())
# model.add(Dropout(0.2))
# model.add(Dense(512, activation='elu'))
# model.add(Dropout(0.2))
# model.add(Dense(10, activation='softmax'))
# -
# *Option #2:* define ConvNet using **Keras Functional API** (both options produce identical models)
# +
X_input = Input(shape=[32, 32, 3])
X = Conv2D(filters=16, kernel_size=3, padding='same', activation='elu')(X_input)
X = MaxPooling2D(pool_size=[2,2], strides=[2, 2], padding='same')(X)
X = Conv2D(filters=32, kernel_size=3, padding='same', activation='elu')(X)
X = MaxPooling2D(pool_size=[2,2], strides=[2, 2], padding='same')(X)
X = Conv2D(filters=64, kernel_size=3, padding='same', activation='elu')(X)
X = MaxPooling2D(pool_size=[2,2], strides=[2, 2], padding='same')(X)
X = Flatten()(X)
X = Dropout(0.2)(X)
X = Dense(512, activation='elu')(X)
X = Dropout(0.2)(X)
X = Dense(10, activation='softmax')(X)
model = tf.keras.Model(inputs=X_input, outputs=X)
# -
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# Train model
hist = model.fit(x=x_train, y=y_train, batch_size=250, epochs=20,
validation_data=(x_test, y_test), verbose=2)
# **Final Results**
# **NOTE:** Keras calculates training loss differently than validation loss, from documentation ([source](https://keras.io/getting-started/faq/#why-is-the-training-loss-much-higher-than-the-testing-loss)):
#
# > **Why is the training loss much higher than the testing loss?**
# >
# > A Keras model has two modes: training and testing. Regularization mechanisms, such as Dropout and L1/L2 weight regularization, are turned off at testing time.
# >
# > Besides, the training loss is the average of the losses over each batch of training data. Because your model is changing over time, the loss over the first batches of an epoch is generally higher than over the last batches. On the other hand, the testing loss for an epoch is computed using the model as it is at the end of the epoch, resulting in a lower loss.
#
# This is why train loss/accuracy below are much better than ones calcualted during training. This also why initially during training train loss is higher than validation loss.
# Final results
loss, acc = model.evaluate(x_train, y_train, batch_size=250, verbose=0)
print(f'Accuracy on train set: {acc:.3f}')
loss, acc = model.evaluate(x_test, y_test, batch_size=250, verbose=0)
print(f'Accuracy on test set: {acc:.3f}')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=[16, 6])
axes[0].plot(hist.history['loss'], label='train_loss')
axes[0].plot(hist.history['val_loss'], label='val_loss')
axes[0].set_title('Loss')
axes[0].legend()
axes[0].grid()
axes[1].plot(hist.history['acc'], label='train_acc')
axes[1].plot(hist.history['val_acc'], label='val_acc')
axes[1].set_title('Accuracy')
axes[1].legend()
axes[1].grid()
# Looks like we have a bit of overfitting issue
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.0 64-bit
# metadata:
# interpreter:
# hash: e404b59586357c814bc0d3940e75d6763c00a48753b225b81f7716971b8e1741
# name: python3
# ---
# +
import numpy as np
from sympy import preview
scalar = 3
print(f'Scalar Value:\n{scalar}\n')
vector = np.array([1,2,3,4,5], dtype=np.int)
print(f'Vector Value:\n{vector}\n')
matrix = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
])
print(f'Matrix Value:\n{matrix}\n')
tensor = np.array([
np.array(list(range(1, 21))).reshape((5,4)),
np.array(list(range(21, 41))).reshape((5,4)),
np.array(list(range(41, 61))).reshape((5,4)),
])
[[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]
[13 14 15 16]
[17 18 19 20]]
[[21 22 23 24]
[25 26 27 28]
[29 30 31 32]
[33 34 35 36]
[37 38 39 40]]
[[41 42 43 44]
[45 46 47 48]
[49 50 51 52]
[53 54 55 56]
[57 58 59 60]]]
print(f'Matrix Value:\n{tensor}\n')
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Example 2: A fast parallel pivot, or preparing for time series analysis
from pyspark import SparkConf, SparkContext
from collections import OrderedDict
from functools import partial
partitions = 64
parcsv = sc.textFile("/user/milroy/lustre_timeseries.csv", partitions)
parcsv.take(5)
# Each of these lines contains 6 semi-colon delimited columns: hostname, metric name, value reported, type, units, and Unix epoch time. Can we assume all do? The example data is an excerpt of one day of Lustre data, but we have hundreds of full days which may contain dropped writes and malformed data. I'll apply a filter to the data to select all lines with six columns.
#
# Sometimes it isn't evident whether filters are needed until a succeeding RDD action fails.
filtered = parcsv.filter(lambda line: len(line.split(';')) == 6)
# As seen above, the lines are Unicode, but in anticipation of necessary transformations the timestamp and values will need to be cast to appropriate types. We'll need to create a function that takes each line as an argument and returns a 4-tuple (quadruple?), organized to facilitate intuitive indexing. Let's pick the following ordering: (timestamp, host, metric, value). We don't need the other values, so they are discarded.
#
# Since the values in the third column are currently Unicode, a try-except structure is used to attempt to cast them to floats. If unsuccessful we set them to zero rather than NaN, since these don't work with the forthcoming eigendecomposition.
#
# An alternative to the try-except would be to apply a filter for lines whose third column can't be cast as a float. I haven't compared the performance between these two.
def cast(line):
try:
val = float(str(line.split(';')[2]))
except:
val = 0.0
return (int(line.split(';')[5]), line.split(';')[0],
line.split(';')[1], val)
parsed = filtered.map(cast)
# Metrics aren't reported continuously, nor are the monitoring systems flawless. We need to assemble a unique set (dictionary) of metrics for the pivot, but they must be ordered to make sure the covariance structure (for PCA) isn't distorted.
#
# PySpark's ".distinct()" method accomplishes this; we issue a ".collect()" as well to assign the RDD's values to a variable.
columns = parsed.map(lambda x: x[2]).distinct().collect()
basedict = dict((metric, 0.0) for metric in columns)
# Now we create an ordered dictionary to preserve the metric (and consequently, column) ordering. If we did not create this OrderedDict, the keys' ordering may be permuted. This will render the eigendecomposition of the covariance matrix meaningless.
#
# The object is broadcast to all executors to be used in a future mapped function.
ordered = sc.broadcast(OrderedDict(sorted(basedict.items(), key=lambda y: y[0])))
# The two functions below are adapted from user patricksurry's answer to this Stack Overflow question: http://stackoverflow.com/questions/30260015/reshaping-pivoting-data-in-spark-rdd-and-or-spark-dataframes. Beware, patricksurry's answer is predominantly serial!
# +
def combine(u1, u2):
u1.update(u2)
return u1
def sequential(u, v):
if not u:
u = {}
u[v[2]] = v[3]
return u
# -
# We need to perform an aggregation by key. This operation takes two functions as arguments: the sequential and combination functions. The sequential op constructs a dictionary from (metric, value) in each row, and the combine op combines row dictionaries based on identical (timestamp, host) keys.
#
# <img src="aggregateByKey.png">
aggregated = parsed.keyBy(lambda row: (row[0], row[1])).aggregateByKey(
None, sequential, combine)
# Now we need to impose the structure of our OrderedDict on each aggregated key, value pair. We create a new function to copy our canonical dictionary (of ordered keys, and 0.0 values) and update it with the dictionaries created in the aggregateByKey step.
def mergedicts(new):
tmp = ordered.value.copy()
tmp.update(new[1])
return new[0], tmp
pivoted = aggregated.map(mergedicts)
# Let's take a look at the results.
final_ordered = pivoted.takeOrdered(10, key=lambda x: x[0])
final_ordered[0][0]
# To sort the entire RDD, we use a sortByKey.
final_sorted = pivoted.sortByKey(keyfunc= lambda k: k[0])
final_dict = final_sorted.map(lambda row: row[1].values())
# Writing the lists to disk takes quite a long time. This is not optimized for Hadoop, and not writing in parallel. An exercise for the reader!
final_dict.coalesce(2).saveAsTextFile("/home/milroy/pyspark/processed.txt")
# # Now on to Scala Spark for time series PCA
# Now exit the pyspark shell, and run spark-shell with the following options.
spark-shell --master $MASTER --driver-memory 12g
# +
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.linalg.{Vector, Vectors}
val datafilePattern = "/user/milroy/pivoted.txt"
val lustreData = sc.textFile(datafilePattern).cache()
val vecData = lustreData.map(line => line.split(",").map(
line => line.drop(1).dropRight(1)).map(
v => v.toDouble)).map(arr => Vectors.dense(arr))
val rmat: RowMatrix = new RowMatrix(vecData)
val pc: Matrix = rmat.computePrincipalComponents(15)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# <img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# ## _*Superposition and Entanglement*_
#
#
# The latest version of this notebook is available on https://github.com/QISKit/qiskit-tutorial.
#
# ***
# ### Contributors
# Jay Gambetta, Antonio Córcoles, Andrew Cross, Anna Phan
# ## Introduction
# Many people tend to think quantum physics is hard math, but this is not actually true. Quantum concepts are very similar to those seen in the linear algebra classes you may have taken as a freshman in college, or even in high school. The challenge of quantum physics is the necessity to accept counter-intuitive ideas, and its lack of a simple underlying theory. We believe that if you can grasp the following two Principles, you will have a good start:
# 1. [A physical system in a perfectly definite state can still behave randomly.](#section1)
# 2. [Two systems that are too far apart to influence each other can nevertheless behave in ways that, though individually random, are somehow strongly correlated.](#section2)
# ## Getting Started
# Please see [Getting Started with QISKit](../tools/getting_started.ipynb) if you would like to understand how to get started with QISKit. This tutorial will be giving an introduction to superposition and entanglement. Note that all the examples will be run on a simulator to avoid the queues for running on a quantum processor, however, all examples can be run on real devices, see [this tutorial](../tools/working_with_backends.ipynb) to find out how.
import sys, getpass
try:
sys.path.append("../../") # go to parent dir
import Qconfig
qx_config = {
"APItoken": Qconfig.APItoken,
"url": Qconfig.config['url']}
print('Qconfig loaded from %s.' % Qconfig.__file__)
except:
APItoken = getpass.getpass('Please input your token and hit enter: ')
qx_config = {
"APItoken": APItoken,
"url":"https://quantumexperience.ng.bluemix.net/api"}
print('Qconfig.py not found in qiskit-tutorial directory; Qconfig loaded using user input.')
# +
# useful additional packages
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
from pprint import pprint
# importing QISKit
from qiskit import QuantumProgram, QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import available_backends, get_backend, execute, register
# import basic plot tools
from qiskit.tools.visualization import plot_histogram, circuit_drawer
# -
def lowest_pending_jobs():
"""Returns the backend with lowest pending jobs."""
list_of_backends = available_backends(
{'local': False, 'simulator': False})
device_status = [get_backend(backend).status
for backend in list_of_backends]
best = min([x for x in device_status if x['operational'] is True],
key=lambda x: x['pending_jobs'])
return best['name']
# +
backend = 'local_qasm_simulator' # run on local simulator by default
# Uncomment the following lines to run on a real device
# register(qx_config['APItoken'], qx_config['url'])
# backend = lowest_pending_jobs()
# print("the best backend is " + backend)
# -
# ## Quantum States - Basis States and Superpositions<a id='section1'></a>
#
# The first Principle above tells us that the results of measuring a quantum state may be random or deterministic, depending on what basis is used. To demonstrate, we will first introduce the computational (or standard) basis for a qubit.
#
# The computational basis is the set containing the ground and excited state $\{|0\rangle,|1\rangle\}$, which also corresponds to the following vectors:
#
# $$|0\rangle =\begin{pmatrix} 1 \\ 0 \end{pmatrix}$$
# $$|1\rangle =\begin{pmatrix} 0 \\ 1 \end{pmatrix}$$
#
# In Python these are represented by
zero = np.array([[1],[0]])
one = np.array([[0],[1]])
# In our quantum processor system (and many other physical quantum processors) it is natural for all qubits to start in the $|0\rangle$ state, known as the ground state. To make the $|1\rangle$ (or excited) state, we use the operator
#
# $$ X =\begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}.$$
#
# This $X$ operator is often called a bit-flip because it exactly implements the following:
#
# $$X: |0\rangle \rightarrow |1\rangle$$
# $$X: |1\rangle \rightarrow |0\rangle.$$
#
# In Python this can be represented by the following:
X = np.array([[0,1],[1,0]])
print(np.dot(X,zero))
print(np.dot(X,one))
# Next, we give the two quantum circuits for preparing and measuring a single qubit in the ground and excited states using QISKit.
# +
# Creating registers
qr = QuantumRegister(1)
cr = ClassicalRegister(1)
# Quantum circuit ground
qc_ground = QuantumCircuit(qr, cr)
qc_ground.measure(qr[0], cr[0])
# Quantum circuit excited
qc_excited = QuantumCircuit(qr, cr)
qc_excited.x(qr)
qc_excited.measure(qr[0], cr[0])
# -
circuit_drawer(qc_ground)
circuit_drawer(qc_excited)
# Here we have created two jobs with different quantum circuits; the first to prepare the ground state, and the second to prepare the excited state. Now we can run the prepared jobs.
circuits = [qc_ground, qc_excited]
job = execute(circuits, backend)
result = job.result()
# After the run has been completed, the data can be extracted from the API output and plotted.
plot_histogram(result.get_counts(qc_ground))
plot_histogram(result.get_counts(qc_excited))
# Here we see that the qubit is in the $|0\rangle$ state with 100% probability for the first circuit and in the $|1\rangle$ state with 100% probability for the second circuit. If we had run on a quantum processor rather than the simulator, there would be a difference from the ideal perfect answer due to a combination of measurement error, preparation error, and gate error (for the $|1\rangle$ state).
#
# Up to this point, nothing is different from a classical system of a bit. To go beyond, we must explore what it means to make a superposition. The operation in the quantum circuit language for generating a superposition is the Hadamard gate, $H$. Let's assume for now that this gate is like flipping a fair coin. The result of a flip has two possible outcomes, heads or tails, each occurring with equal probability. If we repeat this simple thought experiment many times, we would expect that on average we will measure as many heads as we do tails. Let heads be $|0\rangle$ and tails be $|1\rangle$.
#
# Let's run the quantum version of this experiment. First we prepare the qubit in the ground state $|0\rangle$. We then apply the Hadamard gate (coin flip). Finally, we measure the state of the qubit. Repeat the experiment 1024 times (shots). As you likely predicted, half the outcomes will be in the $|0\rangle$ state and half will be in the $|1\rangle$ state.
#
# Try the program below.
# +
# Quantum circuit superposition
qc_superposition = QuantumCircuit(qr, cr)
qc_superposition.h(qr)
qc_superposition.measure(qr[0], cr[0])
circuit_drawer(qc_superposition)
# +
job = execute(qc_superposition, backend)
result = job.result()
plot_histogram(result.get_counts(qc_superposition))
# -
# Indeed, much like a coin flip, the results are close to 50/50 with some non-ideality due to errors (again due to state preparation, measurement, and gate errors). So far, this is still not unexpected. Let's run the experiment again, but this time with two $H$ gates in succession. If we consider the $H$ gate to be analog to a coin flip, here we would be flipping it twice, and still expecting a 50/50 distribution.
# +
# Quantum circuit two Hadamards
qc_twohadamard = QuantumCircuit(qr, cr)
qc_twohadamard.h(qr)
qc_twohadamard.barrier()
qc_twohadamard.h(qr)
qc_twohadamard.measure(qr[0], cr[0])
circuit_drawer(qc_twohadamard)
# +
job = execute(qc_twohadamard, backend)
result = job.result()
plot_histogram(result.get_counts(qc_twohadamard))
# -
# This time, the results are surprising. Unlike the classical case, with high probability the outcome is not random, but in the $|0\rangle$ state. *Quantum randomness* is not simply like a classical random coin flip. In both of the above experiments, the system (without noise) is in a definite state, but only in the first case does it behave randomly. This is because, in the first case, via the $H$ gate, we make a uniform superposition of the ground and excited state, $(|0\rangle+|1\rangle)/\sqrt{2}$, but then follow it with a measurement in the computational basis. The act of measurement in the computational basis forces the system to be in either the $|0\rangle$ state or the $|1\rangle$ state with an equal probability (due to the uniformity of the superposition). In the second case, we can think of the second $H$ gate as being a part of the final measurement operation; it changes the measurement basis from the computational basis to a *superposition* basis. The following equations illustrate the action of the $H$ gate on the computational basis states:
# $$H: |0\rangle \rightarrow |+\rangle=\frac{|0\rangle+|1\rangle}{\sqrt{2}}$$
# $$H: |1\rangle \rightarrow |-\rangle=\frac{|0\rangle-|1\rangle}{\sqrt{2}}.$$
# We can redefine this new transformed basis, the superposition basis, as the set {$|+\rangle$, $|-\rangle$}. We now have a different way of looking at the second experiment above. The first $H$ gate prepares the system into a superposition state, namely the $|+\rangle$ state. The second $H$ gate followed by the standard measurement changes it into a measurement in the superposition basis. If the measurement gives 0, we can conclude that the system was in the $|+\rangle$ state before the second $H$ gate, and if we obtain 1, it means the system was in the $|-\rangle$ state. In the above experiment we see that the outcome is mainly 0, suggesting that our system was in the $|+\rangle$ superposition state before the second $H$ gate.
#
#
# The math is best understood if we represent the quantum superposition state $|+\rangle$ and $|-\rangle$ by:
#
# $$|+\rangle =\frac{1}{\sqrt{2}}\begin{pmatrix} 1 \\ 1 \end{pmatrix}$$
# $$|-\rangle =\frac{1}{\sqrt{2}}\begin{pmatrix} 1 \\ -1 \end{pmatrix}$$
#
# A standard measurement, known in quantum mechanics as a projective or von Neumann measurement, takes any superposition state of the qubit and projects it to either the state $|0\rangle$ or the state $|1\rangle$ with a probability determined by:
#
# $$P(i|\psi) = |\langle i|\psi\rangle|^2$$
#
# where $P(i|\psi)$ is the probability of measuring the system in state $i$ given preparation $\psi$.
#
# We have written the Python function ```state_overlap``` to return this:
state_overlap = lambda state1, state2: np.absolute(np.dot(state1.conj().T,state2))**2
# Now that we have a simple way of going from a state to the probability distribution of a standard measurement, we can go back to the case of a superposition made from the Hadamard gate. The Hadamard gate is defined by the matrix:
#
# $$ H =\frac{1}{\sqrt{2}}\begin{pmatrix} 1 & 1 \\ 1 & -1 \end{pmatrix}$$
#
# The $H$ gate acting on the state $|0\rangle$ gives:
Hadamard = np.array([[1,1],[1,-1]],dtype=complex)/np.sqrt(2)
psi1 = np.dot(Hadamard,zero)
P0 = state_overlap(zero,psi1)
P1 = state_overlap(one,psi1)
plot_histogram({'0' : P0.item(0), '1' : P1.item(0)})
# which is the ideal version of the first superposition experiment.
#
# The second experiment involves applying the Hadamard gate twice. While matrix multiplication shows that the product of two Hadamards is the identity operator (meaning that the state $|0\rangle$ remains unchanged), here (as previously mentioned) we prefer to interpret this as doing a measurement in the superposition basis. Using the above definitions, you can show that $H$ transforms the computational basis to the superposition basis.
print(np.dot(Hadamard,zero))
print(np.dot(Hadamard,one))
# ## Entanglement<a id='section2'></a>
#
# The core idea behind the second Principle is *entanglement*. Upon reading the Principle, one might be inclined to think that entanglement is simply strong correlation between two entitities -- but entanglement goes well beyond mere perfect (classical) correlation. If you and I read the same paper, we will have learned the same information. If a third person comes along and reads the same paper they <i>also</i> will have learned this information. All three persons in this case are perfectly correlated, and they will remain correlated even if they are separated from each other.
#
# The situation with quantum entanglement is a bit more subtle. In the quantum world, you and I could read the same quantum paper, and yet we will not learn what information is actually contained in the paper until we get together and share our information. However, when we are together, we find that we can unlock more information from the paper than we initially thought possible. Thus, quantum entanglement goes much further than perfect correlation.
#
# To demonstrate this, we will define the controlled-NOT (CNOT) gate and the composition of two systems. The convention we use in the Quantum Experience is to label states by writing the first qubit's name in the rightmost position, thereby allowing us to easily convert from binary to decimal. As a result, we define the tensor product between operators $q_0$ and $q_1$ by $q_1\otimes q_0$.
#
# Taking $q_0$ as the control and $q_1$ as the target, the CNOT with this representation is given by
#
# $$ CNOT =\begin{pmatrix} 1 & 0 & 0 & 0\\ 0 & 0 & 0 & 1\\0& 0& 1 & 0\\0 & 1 & 0 & 0 \end{pmatrix},$$
#
# which is non-standard in the quantum community, but more easily connects to classical computing, where the least significant bit (LSB) is typically on the right. An entangled state of the two qubits can be made via an $H$ gate on the control qubit, followed by the CNOT gate. This generates a particular maximally entangled two-qubit state known as a Bell state, named after John Stewart Bell ([learn more about Bell and his contributions to quantum physics and entanglement](https://en.wikipedia.org/wiki/John_Stewart_Bell)).
#
# To explore this, we can prepare an entangled state of two qubits, and then ask questions about the qubit states. The questions we can ask are:
# * What is the state of the first qubit in the standard basis?
# * What is the state of the first qubit in the superposition basis?
# * What is the state of the second qubit in the standard basis?
# * What is the state of the second qubit in the superposition basis?
# * What is the state of both qubits in the standard basis?
# * what is the state of both qubits in the superposition basis?
#
# Below is a program with six such circuits for these six questions.
# +
# Creating registers
q2 = QuantumRegister(2)
c2 = ClassicalRegister(2)
# quantum circuit to make an entangled bell state
bell = QuantumCircuit(q2, c2)
bell.h(q2[0])
bell.cx(q2[0], q2[1])
# quantum circuit to measure q0 in the standard basis
measureIZ = QuantumCircuit(q2, c2)
measureIZ.measure(q2[0], c2[0])
bellIZ = bell+measureIZ
# quantum circuit to measure q0 in the superposition basis
measureIX = QuantumCircuit(q2, c2)
measureIX.h(q2[0])
measureIX.measure(q2[0], c2[0])
bellIX = bell+measureIX
# quantum circuit to measure q1 in the standard basis
measureZI = QuantumCircuit(q2, c2)
measureZI.measure(q2[1], c2[1])
bellZI = bell+measureZI
# quantum circuit to measure q1 in the superposition basis
measureXI = QuantumCircuit(q2, c2)
measureXI.h(q2[1])
measureXI.measure(q2[1], c2[1])
bellXI = bell+measureXI
# quantum circuit to measure q in the standard basis
measureZZ = QuantumCircuit(q2, c2)
measureZZ.measure(q2[0], c2[0])
measureZZ.measure(q2[1], c2[1])
bellZZ = bell+measureZZ
# quantum circuit to measure q in the superposition basis
measureXX = QuantumCircuit(q2, c2)
measureXX.h(q2[0])
measureXX.h(q2[1])
measureXX.measure(q2[0], c2[0])
measureXX.measure(q2[1], c2[1])
bellXX = bell+measureXX
# -
circuit_drawer(bellIZ)
circuit_drawer(bellIX)
circuit_drawer(bellZI)
circuit_drawer(bellXI)
circuit_drawer(bellZZ)
circuit_drawer(bellXX)
# Let's begin by running just the first two questions, looking at the results of the first qubit ($q_0$) using a computational and then a superposition measurement.
# +
circuits = [bellIZ,bellIX,bellZI,bellXI,bellZZ,bellXX]
job = execute(circuits, backend)
result = job.result()
plot_histogram(result.get_counts(bellIZ))
# -
result.get_data(bellIZ)
# We find that the result is random. Half the time $q_0$ is in $|0\rangle$, and the other half it is in the $|1\rangle$ state. You may wonder whether this is like the superposition from earlier in the tutorial. Maybe the qubit has a perfectly definite state, and we are simply measuring in another basis. What would you expect if you did the experiment and measured in the superposition basis? Recall we do this by adding an $H$ gate before the measurement...which is exactly what we have checked with the second question.
plot_histogram(result.get_counts(bellIX))
# In this case, we see that the result is still random, regardless of whether we measure in the computational or the superposition basis. This tells us that we actually know nothing about the first qubit. What about the second qubit, $q_1$? The next lines will run experiments measuring the second qubit in both the computational and superposition bases.
plot_histogram(result.get_counts(bellZI))
plot_histogram(result.get_counts(bellXI))
# Once again, all the experiments give random outcomes. It seems we know nothing about either qubit in our system! In our previous analogy, this is equivalent to two readers separately reading a quantum paper and extracting no information whatsoever from it on their own.
#
# What do you expect, however, when the readers get together? Below we will measure both in the joint computational basis.
plot_histogram(result.get_counts(bellZZ))
# Here we see that with high probability, if $q_0$ is in state 0, $q_1$ will be in 0 as well; the same goes if $q_0$ is in state 1. They are perfectly correlated.
#
# What about if we measure both in the superposition basis?
plot_histogram(result.get_counts(bellXX))
# Here we see that the system **also** has perfect correlations (accounting for experimental noise). Therefore, if $q_0$ is measured in state $|0\rangle$, we know $q_1$ is in this state as well; likewise, if $q_0$ is measured in state $|+\rangle$, we know $q_1$ is also in this state. These correlations have led to much confusion in science, because any attempt to relate the unusual behavior of quantum entanglement to our everyday experiences is a fruitless endeavor.
#
# Finally, we need to point out that having correlated outcomes does not necessarily imply that what we are observing is an entangled state. What would we observe, for example, if we prepared half of our shots in the $|00\rangle$ state and half of the shots in the $|11\rangle$ state? Let's have a look
# +
# quantum circuit to make a mixed state
mixed1 = QuantumCircuit(q2, c2)
mixed2 = QuantumCircuit(q2, c2)
mixed2.x(q2)
mixed1.measure(q2[0], c2[0])
mixed1.measure(q2[1], c2[1])
mixed2.measure(q2[0], c2[0])
mixed2.measure(q2[1], c2[1])
circuit_drawer(mixed1)
# -
circuit_drawer(mixed2)
# +
mixed_state = [mixed1,mixed2]
job = execute(mixed_state, backend)
result = job.result()
counts1 = result.get_counts(mixed_state[0])
counts2 = result.get_counts(mixed_state[1])
from collections import Counter
ground = Counter(counts1)
excited = Counter(counts2)
plot_histogram(ground+excited)
# -
# We do see the same kind of correlation indeed as we observed in the "bell_measureZZ" circuit. But we know this is not an entangled state! All we have done is leave the qubits in their ground state for some of the shots and flip both qubits for some of the shots. This is called a mixed state and it is a classical state. Now, would we observe a similar outcome if we measured this mixed state in the superposition basis? We will leave this for the reader to try.
#
# This is just a taste of what happens in the quantum world. Please continue to [Entanglement revisited](entanglement_revisited.ipynb) to explore further!
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import seaborn as sb
import pandas
import sys
import itertools
import matplotlib.pyplot as plt
import nltk
import csv
import datetime
import tensorflow as tf
# %matplotlib notebook
# # Introduction into tensorflow
#
# Let's extend our Python journey into deep networks to one of the standard neural network packages.
#
# First, installation:
#
# ## Installing tensorflow
#
# You can follow the many examples given here:
#
# https://www.tensorflow.org/install/
#
# To reduce the hassle, I would recommend a **CPU-installation**, since GPU-installs are notoriously difficult and may result in you spending several hours tweaking drivers and installation files.
#
# I actually had good success with installing the pip-version on my computer, so I did:
#
# `pip3 install tensorflow`
#
# and it just rolled - after downloading for several minutes on a high-speed network.
# ## tensorflow basics: the computational graph
#
# In tensorflow, everything is structured as a computational graph, which is just a fancy word for a flow-chart.
#
# A computational graph is a series of operations arranged into a graph with nodes. You can visualize this graph as a flow-chart with tools shipped in tensorflow as well, which makes for nice debugging and a different intuition about the computations that are going on in your code.
#
# ### tensors
#
# As the name says, tensorflow works on tensors, which are mathematical entities that basically generalize matrices. In Python terms that means roughly that they are like multi-dimensional arrays - a matrix has two indices `m[i][j]`. This matrix is actually simply a tensor of Rank 2.
#
# Hence, a tensor of Rank 3 would be `t[i][j][k]`. Things get a little tricky with summations and multiplications of tensors, but in principle tensors are basically multi-dimensionally-indexed "matrices".
#
# ### nodes
#
# Each node in tensorflow takes zero or more tensors as input, and produces a tensor as an output.
#
# The most basic node is a "constant" node that takes zero inputs (since it is constant) and produces a Rank 1 tensor as output (a number).
#
# Let's create two of these very simple nodes:
nodeBoring1 = tf.constant(5.0)
nodeBoring2 = tf.constant(10.0)
print(nodeBoring1,nodeBoring2)
# The important thing is that printing the nodes does not print their values. Instead a node is a structure in a computational graph that needs to be evaluated in order to produce output!
#
# We evalulate the computational graph by running a session, like so:
sess = tf.Session()
print(sess.run([nodeBoring1,nodeBoring2]))
# ### simple computations with nodes
#
# Let's multiply two nodes together:
nodeMult = tf.multiply(nodeBoring1,nodeBoring2)
print(nodeMult)
print(sess.run(nodeMult))
# Ain't that awesome? We can use a multi-megabyte code-based to multiply two numbers in about 4 lines of code...
#
# Ok, sarcasm off. Let's try to visualize the computational graph. Since we are using jupyter, we first have to teach it how to display the input from tensorboard, which is tensorflow's official visualization tool for these graphs.
#
# The following code should allow us to use its visualization in jupyter:
# +
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# -
show_graph(tf.get_default_graph().as_graph_def())
# Awesome again! We have two constant input nodes, and apparently they are combined in a multiply node! <"Totally faints">
# ### adding variable nodes
#
# Ok, enough with the constants. Let's use variable nodes in our graph, so that we can go forward with something interesting.
#
# Let's try to do a simple linear regression.
#
# For this, we need:
#
# * two variables $w$ and $b$ that hold the slope and the intercept of the line
#
# * a placeholder $x$ that will hold our input data
#
# * a model $ym=wx+b$ that combines everything together
#
# * a loss function $l=\sum(y-ym)^2$ that evaluates how the predictions fit the actual data $y$
#
# Here's the full code in tensorflow:
# +
# components of the model
w = tf.Variable([.5])
b = tf.Variable([-.5])
# input data - we need to tell tensorflow the datatype!
x = tf.placeholder(tf.float32)
# actual data
y = tf.placeholder(tf.float32)
# linear model that produces predictions
ym = w*x + b
##### IMPORTANT
# we need to initialize variables before use!!!
init = tf.global_variables_initializer()
sess.run(init)
#####
# let's see the output of the model with some input data
print("linear predictions",sess.run(ym, {x:[0,1,2,3,4,5]}))
# now how good are we?
l = tf.reduce_sum(tf.square(ym-y))
print("loss =",sess.run(l, {x:[0,1,2,3,4,5], y:[0,0.3,0.6,0.9,1.2,1.5]}))
# -
# So the predictions are off, since the model parameters are, of course, not ideal. So let's change them using the `tf.assign` method, which changes already-initialized variables:
optw = tf.assign(w,[0.3])
optb = tf.assign(b,[0])
sess.run([optw,optb])
print("loss =",sess.run(l, {x:[0,1,2,3,4,5], y:[0,0.3,0.6,0.9,1.2,1.5]}))
# ## tensorflow basics: training something
#
# Of course, we would like to get $w,b$ automatically from the input data and the actual data! For this, we need to use some sort of optimization scheme in tensorflow.
#
# The most general (and simple) optimization scheme is gradient descent, so let's use this:
# +
# choose the optimizer and the learning rate
optimizer = tf.train.GradientDescentOptimizer(0.01)
# determine the loss function to optimize
train = optimizer.minimize(l)
# this will return our variables to the initial state!!
sess.run(init)
for i in np.arange(1000):
sess.run(train,{x:[0,1,2,3,4,5], y:[0,0.3,0.6,0.9,1.2,1.5]})
print("final parameters:",sess.run([w,b]))
print("final loss:",sess.run(l,{x:[0,1,2,3,4,5], y:[0,0.3,0.6,0.9,1.2,1.5]}))
# -
# That's better - after a few iterations, the optimizer has successfully converged and we get our optimal solutions.
#
# The computational graph of our problem so far, however, now looks vastly more complicated due to the inclusion of the gradient descent optimizer:
show_graph(tf.get_default_graph().as_graph_def())
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Rationale
# We want to map the chemicals/medicines in the ZIB data to the [ATC ontology](http://bioportal.bioontology.org/ontologies/ATC/?p=summary). Here we explore how to get these mappings from the bioportal API.
import requests
import json
from itertools import chain
import pandas as pd
MAPPING_URL = 'http://data.bioontology.org/ontologies/ATC/mappings'
API_KEY = '8b5b7825-538d-40e0-9e9e-5ab9274a9aeb'
MAPPINGS_FILE = 'mappings.json'
# ### Inspect mapping data
# +
# Get some sample data (one page)
result = requests.get(MAPPING_URL, params={'apikey':API_KEY})
j = result.json()
display(j)
# -
len(j['collection'][1]['classes'])
# Every page has a "collection" of mappings. Every item in a collection contains a list 'classes' with 2 classes that are mapped by a 'source'.
# ## Fetch data from single page
def get_mappings_from_page(page):
for mapping in page['collection']:
source = mapping['source']
yield source, mapping['classes'][0]['@id'], mapping['classes'][1]['@id']
mappings = get_mappings_from_page(j)
pd.DataFrame(mappings, columns=['source', 'atc_class', 'same_as_class'])
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Introduction to scikit-learn (sklearn)
# This notebook demostrates some of the most useful functions of the beautiful scikit-learn library
#
# What we're going to cover:
#
# 0. An end-to-end scikit-learn workflow
# 1. Getting the data ready
# 2. Choose the right estimator/algorithm for our problems
# 3. Fit the model/algorithm and use it to make predictions on our data
# 4. Evaluating a model
# 5. Improve a model
# 6. Save and load a trained model
# 7. Putting it all together!
# # 0. An end-to-end scikit-learn workflow
import numpy as np
# 1. Get the data ready
import pandas as pd
heart_disease=pd.read_csv("heart-disease.csv")
heart_disease
# +
# create X (features matrix)
X=heart_disease.drop("target",axis=1)
# create y (labels)
y=heart_disease["target"]
# -
pip install sklearn
# +
# choose the right model and hyperparameters
from sklearn.ensemble import RandomForestClassifier
clf=RandomForestClassifier(n_estimators=100)
# we'll keep the default hyperparameters
clf.get_params()
# +
# 3. Fit the model to the training data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# -
import sklearn
sklearn.show_versions();
clf.fit(X_train,y_train);
X_train
X_test
# make a prediction
y_pred=clf.predict(X_test)
y_pred
y_test
# 4. Evaluate the model om the training data and test data
clf.score(X_train, y_train)
clf.score(X_test,y_test)
# +
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print(classification_report(y_test,y_pred))
# -
confusion_matrix(y_test,y_pred)
accuracy_score(y_test,y_pred)
# 5. Improve a model
# Try different amount of n_estimators
np.random.seed(42)
for i in range(10,100,10):
print(f"Trying model with {i} estimators..")
clf=RandomForestClassifier(n_estimators=i).fit(X_train,y_train)
print(f"Model accuracy on test set: {clf.score(X_test,y_test)*100:.2f}%")
print("")
# 6. Save a model and load it
import pickle
pickle.dump(clf,open("random_forest_model_1.plk","wb"))
loaded_model=pickle.load(open("random_forest_model_1.plk","rb"))
loaded_model.score(X_test,y_test)
import warnings
warnings.filterwarnings("default")
# Standard imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# ## 1. Getting our data ready to be used with machine learning
# Three main things we have to do:
# 1. Split the data into features and labels (usually 'X' & 'Y')
# 2. Filling (also called imputing) or disregarding missing values
# 3. Converting non-numerical values to numerical values (also called feature encoding)
heart_disease=pd.read_csv("heart-disease.csv")
heart_disease
X=heart_disease.drop("target",axis=1)
X.head()
y=heart_disease["target"]
y.head()
# Split the data into training and test sets
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
X_train
X_train.shape, X_test.shape, y_train.shape, y_test.shape
X.shape[0]*0.8
len(heart_disease)
242.4+61
# ### 1.1 Make sure it's all numerical
car_sales=pd.read_csv("car-sales-extended.csv")
car_sales.head()
len(car_sales)
car_sales.dtypes
# +
# split into X/y
X=car_sales.drop("Price",axis=1)
y=car_sales["Price"]
# split into training and test
X_train, X_test, y_train, y_test= train_test_split(X,
y,
test_size=0.2)
# +
# Build machine learning model
from sklearn.ensemble import RandomForestRegressor
model=RandomForestRegressor()
model.fit(X_train,y_train)
model.score(X_test,y_test)
# +
# Turn the categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features=["Make","Colour","Doors"]
one_hot=OneHotEncoder()
transformer=ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_X= transformer.fit_transform(X)
transformed_X
# -
X.head()
pd.DataFrame(transformed_X)
dummies=pd.get_dummies(car_sales[["Make","Colour","Doors"]])
dummies
# +
# Let's refit the model
np.random.seed(42)
X_train, X_test, y_train, y_test=train_test_split(transformed_X,
y,
test_size=0.2)
model.fit(X_train,y_train)
# -
model.score(X_test,y_test)
# ### 1.2 What if there were missing values?
#
# 1. Fill them with some value (also known as imputation).
# 2. Remove the samples with missing data altogether.
# import car sales missing data
car_sales_missing=pd.read_csv("car-sales-extended-missing-data.csv")
car_sales_missing.head()
car_sales_missing
car_sales_missing["Doors"].value_counts()
car_sales_missing.isna().sum()
# Create X & y
X=car_sales_missing.drop("Price",axis=1)
y=car_sales_missing["Price"]
# +
# Let's try and convert our data to numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features=["Make","Colour","Doors"]
one_hot=OneHotEncoder()
transformer=ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_X= transformer.fit_transform(X)
transformed_X
# -
# ### Option 1: Fill missing data with Pandas
# +
# Fill the "Make" column
car_sales_missing["Make"].fillna("missing", inplace=True)
# Fill the "colour" column
car_sales_missing["Colour"].fillna("missing",inplace=True)
# Fill the "Odometer (KM)" column
car_sales_missing["Odometer (KM)"].fillna(car_sales_missing["Odometer (KM)"].mean(),inplace=True)
# Fill the "Door" column
car_sales_missing["Doors"].fillna(4,inplace=True)
# -
# Check our dataframe again
car_sales_missing.isna().sum()
# Remove rows with missing Price value
car_sales_missing.dropna(inplace=True)
car_sales_missing.isna().sum()
len(car_sales_missing)
X=car_sales_missing.drop("Price",axis=1)
y=car_sales_missing["Price"]
# +
# Let's try and convert our data to numbers
# Turn the categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features=["Make","Colour","Doors"]
one_hot=OneHotEncoder()
transformer=ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_X= transformer.fit_transform(car_sales_missing)
transformed_X
# -
# ### Option 2: Fill missing values with Scikit-learn
import pandas as pd
car_sales_missing=pd.read_csv("car-sales-extended-missing-data.csv")
car_sales_missing.head()
car_sales_missing.isna().sum()
car_sales_missing.dropna(subset=["Price"],inplace=True)
car_sales_missing.isna().sum()
# Split into X & Y
X=car_sales_missing.drop("Price",axis=1)
y=car_sales_missing["Price"]
X
y
# +
# Fill missing values with Scikit-Learn
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
# Fill categorical values with 'missing' and numerical values with mean
cat_imputer=SimpleImputer(strategy="constant",fill_value="missing")
door_imputer=SimpleImputer(strategy="constant",fill_value=4)
num_imputer=SimpleImputer(strategy="mean")
#Define columns
cat_features=["Make","Colour"]
door_features=["Doors"]
num_features=["Odometer (KM)"]
#Create an imputer (something that fills missing data)
imputer=ColumnTransformer([(
"cat_imputer",cat_imputer,cat_features),
("door_imputer",door_imputer,door_features),
("num_imputer",num_imputer,num_features)])
#Transform the data
filled_X=imputer.fit_transform(X)
filled_X
# -
car_sales_filled=pd.DataFrame(filled_X,
columns=["Make","Colour","Doors","Odometer (KM)"])
car_sales_filled.head()
car_sales_filled.isna().sum()
# +
#Turn the categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features=["Make","Colour","Doors"]
one_hot=OneHotEncoder()
transformer=ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_X=transformer.fit_transform(car_sales_filled)
transformed_X
# +
# Now we've got our data as numbers and filled (no missing values)
# Let's fit a model
import numpy as np
np.random.seed(42)
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(transformed_X,
y,
test_size=0.2)
model=RandomForestRegressor(n_estimators=100)
model.fit(X_train,y_train)
model.score(X_test,y_test)
# -
car_sales=pd.read_csv("car-sales-extended.csv")
len(car_sales_filled),len(car_sales)
# ## 2. Chosing the right estimator/algorithm for your problem
#
# Some things to note:
# * Sklearn refers to machine learning model, algorithms as estimators
# * Classification problem - predicting a category (heart disease or not)
# * Sometimes you'll see 'clf' (short for classifier) used as a classification estimators
# * Regression problem - predicting a number (selling price of a car)
#
# if you're working on a machine learning problem and looking to use Sklearn and not sure what model yo should use, refer to the
# sklearn machine learning map: https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html
#
# ### 2.1 Picking a machine learning model for a regression problem
#
# Lets's use the California Housing dataset.
# Get California Housing dataset
from sklearn.datasets import fetch_california_housing
housing=fetch_california_housing()
housing_df=pd.DataFrame(housing["data"],columns=housing["feature_names"])
housing_df.head(100)
housing_df["target"]=housing["target"]
housing_df.head()
housing_df=housing_df.drop("MedHouseVal",axis=1)
housing_df.head(10)
# +
# import algorithm/estimator
from sklearn.linear_model import Ridge
# setup random seed
np.random.seed(42)
# Create the data
X=housing_df.drop("target",axis=1)
y=housing_df["target"] #median house price in $100,000s
# split into train and test sets
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# instantiate and fit the model (on the training set)
model=Ridge()
model.fit(X_train,y_train)
model.score(X_test,y_test)
# -
# What if Ridge didn't work our the score didn't fit our needs?
#
# well, we could always try different model...
#
# how about we try an ensemble model (an ensemble is combination of smaller model to try and make better predictions than just a single model)
#
# sklearn's ensemble models can be found here: https://scikit-learn.org/stable/modules/ensemble.html
# +
# import the RandomForestRegressor model class from the ensemble module
from sklearn.ensemble import RandomForestRegressor
# setup random seed
np.random.seed(42)
#create the data
X=housing_df.drop("target",axis=1)
y=housing_df["target"]
#Split into train and test sets
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# create random forest model
model=RandomForestRegressor(n_estimators=100)
model.fit(X_train,y_train)
# check the score of the model (on the test set)
model.score(X_test, y_test)
# -
# ## 2.2 Picking a machine learning model for a classification problem
heart_disease=pd.read_csv("heart-disease.csv")
heart_disease.head()
len(heart_disease)
# Consulting the map and it says to try 'LinearSVC'.
# +
# Import the linearSVC estimator class
from sklearn.svm import LinearSVC
# Setup random seed
np.random.seed(42)
# make the data
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
# split the data
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# instantiate LineaSVC
clf=LinearSVC()
clf.fit(X_train, y_train)
# evaluate the linearSVC
clf.score(X_test, y_test)
# -
heart_disease["target"].value_counts()
# +
# Import the RandomForestClassifier estimator class
from sklearn.ensemble import RandomForestClassifier
# Setup random seed
np.random.seed(42)
# make the data
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
# split the data
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# instantiate RandomForestClassifier
clf=RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
# evaluate the RandomForestClassifier
clf.score(X_test, y_test)
# -
# Tidbit:
# 1. if you have structured data, use ensemble methods
# 2. if you have unstructured data, use deep learning or transfer learning
heart_disease.head()
# ## 3. Fit the model/algorithm on our data and use it to make predictions
#
# ### 3.1 Fitting the model to the data
#
# Different names for:
#
# * X= features, features variables, data
# * y=labels, targets, target variables
# +
# Import the RandomForestClassifier estimator class
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# Setup random seed
np.random.seed(42)
# make the data
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
# split the data
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# instantiate RandomForestClassifier
clf=RandomForestClassifier(n_estimators=100)
#Fit the model to the data (training the machine learning model)
clf.fit(X_train, y_train)
# evaluate the RandomForestClassifier (use the patterns the model has learned)
clf.score(X_test, y_test)
# -
X.head()
y.tail()
# ### 3.2 Make predictions using a machine learning model
#
# 2 ways to make predictions
# 1. predict()
# 2. predict_proba()
# Use a trained model to make predictions
clf.predict(np.array([1,7,8,3,4])) #this doesn't work..
X_test.head()
X_test.shape
clf.predict(X_test)
y_test
np.array(y_test)
# Compare predictions to truth labels to evaluate the model
y_preds=clf.predict(X_test)
np.mean(y_preds==y_test)
clf.score(X_test,y_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_preds)
# +
# predict_proba() returns probabilities of a classufication label
# -
# make predictions with predict_proba()
clf.predict_proba(X_test[:5])
# Let's predict() on the same data
# index del valor mayor
clf.predict(X_test[:5])
X_test[:5]
heart_disease["target"].value_counts()
# predict() can also be used for regression models.
housing_df.head()
# +
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
# Create the data
X=housing_df.drop("target",axis=1)
y=housing_df["target"]
# split into training and test sets
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# create model instance
model= RandomForestRegressor(n_estimators=100)
# fit the model to the data
model.fit(X_train,y_train)
# make predictions
y_preds=model.predict(X_test)
# -
y_preds[:10]
np.array(y_test[:10])
# Compare the predictions to the truth
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test,y_preds)
housing_df["target"].head()
# ## 4. Evaluating a machine learning model
#
# Three ways to evaluate Scikit-learn models/estimators:
# 1. Estimators's built-in 'score()' method
# 2. The 'scoring' parameter
# 3. Problem specific metric functions
# ### 4.1 evaluating a model with 'score' method
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
# +
import pandas as pd
import numpy as np
heart_disease=pd.read_csv("heart-disease.csv")
np.random.seed(42)
heart_disease.head()
# +
# Create X & y
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
# Create train/test
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# Instantied Random Forest Classifier
clf=RandomForestClassifier(n_estimators=100)
# fit the model to the data (training the machine learning model)
clf.fit(X_train,y_train)
# -
# The highest value from .score() method is 1.0, the lowest is 0.0
clf.score(X_train,y_train)
clf.score(X_test,y_test)
# Let's use the .score() method on our regresion ploblem...
# Get California Housing dataset
from sklearn.datasets import fetch_california_housing
housing=fetch_california_housing()
housing_df=pd.DataFrame(housing["data"],columns=housing["feature_names"])
housing_df.head()
# +
np.random.seed(42)
housing_df["target"]=housing["target"]
X=housing_df.drop("target",axis=1)
y=housing_df["target"]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
model=RandomForestRegressor(n_estimators=100)
model.fit(X_train,y_train)
# -
# the default score() evaluation metric is r_squared for regression algorithms
# the highest =1.0 the lowest 0.0
model.score(X_test,y_test)
model.score(X_test,y_test)
housing_df.head()
y_test.head()
y_test.mean()
# # 4.2 Evaluating a model using a 'scoring' parameter
# +
from sklearn.model_selection import cross_val_score
np.random.seed(42)
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
# Create train/test
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2)
# Instantied Random Forest Classifier
clf=RandomForestClassifier(n_estimators=100)
# fit the model to the data (training the machine learning model)
clf.fit(X_train,y_train);
# -
clf.score(X_test,y_test)
cross_val_score(clf,X,y,cv=5)
cross_val_score(clf,X,y,cv=10)
# +
np.random.seed(42)
# single training and test split score
clf_single_score=clf.score(X_test,y_test)
# take the mean of 5-fold cross validation score
clf_cross_val_score=np.mean(cross_val_score(clf,X,y,cv=5))
#compare two
clf_single_score,clf_cross_val_score
# -
# default scoring parameter of classifier = mean accuracy
clf.score()
# scoring parameter set to None for default
cross_val_score(clf,X,y,cv=5,scoring=None)
# ### Classifications model evaluation metrics
# 1. Accuracy
# 2. Area under ROC curve
# 3. Confusion matrix
# 4. Classification report
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
clf=RandomForestClassifier(n_estimators=100)
cross_val_score=cross_val_score(clf,X,y,cv=5)
# -
np.mean(cross_val_score)
print(f"heart disease Classifier Cross-Validated Accuracy: {np.mean(cross_val_score)*100:.2f}%")
# **Area under the receiver operating characteristic curve (AUC/ROC)**
#
# * Area under curve (AUC)
# * ROC curve
#
# ROC curves are a comparison of a model's true positive rate (tpr) versus a models false positive rate (fpr).
#
# * True positive = model predicts 1 when truth is 1
# * False positive = model predicts 1 when truth is 0
# * True negative = model predicts 0 when truth is 0
# * False negative = model predicts 0 when truth is 1
# +
# create X_test...
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
# +
from sklearn.metrics import roc_curve
#fit the classifier
clf.fit(X_train,y_train)
# make predictions with probabilities
y_probs=clf.predict_proba(X_test)
y_probs[:10],len(y_probs)
# -
y_probs_positive=y_probs[:,1]
y_probs_positive[:10]
# +
# calcular fpr, ttpr and thresholds
fpr, tpr, thresholds=roc_curve(y_test,y_probs_positive)
# check the false positive
fpr
# +
# create a function for plotting ROC curve
import matplotlib.pyplot as plt
def plot_roc_curve(fpr,tpr):
"""
Plot a ROC curver given the false positive rate (fpr)
and true positive rate (tpr) of a model.
"""
# plot roc curve
plt.plot(fpr,tpr,color="orange", label="ROC")
# plot line with not predictive power (baseline)
plt.plot([0,1],[0,1],color="darkblue",linestyle="--",label="Guessing")
# customize the plot
plt.xlabel("False positive rate (fpr)")
plt.ylabel("True positive rate (tpr)")
plt.title("Receiver operating characteristic (ROC) curve")
plt.legend()
plt.show()
plot_roc_curve(fpr,tpr)
# +
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test,y_probs_positive)
# -
# plot perfect roc curve and AUC score
fpr,tpr,thresholds=roc_curve(y_test,y_test)
plot_roc_curve(fpr,tpr)
# perfect AUC score
roc_auc_score(y_test,y_test)
# **Confusing matrix**
#
# A confusing matrix is a quick way to compare the labels a model predicts and the actuals labels it was a supposed to predict.
#
# In essence, giving you and idea of where the model is getting confused.
#
#
# +
from sklearn.metrics import confusion_matrix
y_preds=clf.predict(X_test)
confusion_matrix(y_test,y_preds)
# +
# visualize confusing matrix with pd.crosstab()
pd.crosstab(y_test,y_preds,rownames=["Actual labels"],colnames=["Predicted labels"])
# -
24+5+3+29
len(X_test)
# +
# make confusion matrix more visual with Seaborn's heatmap()
import seaborn as sns
# set the font scale
sns.set(font_scale=1.5)
# create a confusion matrix
conf_mat=confusion_matrix(y_test,y_preds)
# plot it using seaborn
sns.heatmap(conf_mat)
# -
# # Creating a confusion matrix using Scikit-Learn
import sklearn
sklearn.__version__
clf
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_estimator(estimator=clf,X=X,y=y);
ConfusionMatrixDisplay.from_predictions(y_true=y_test,y_pred=y_preds);
# **Classification report**
# +
from sklearn.metrics import classification_report
print(classification_report(y_test,y_preds))
# +
# where precision and recall become valuable
disease_true=np.zeros(10000)
disease_true[0]=1 # only one positive case
disease_preds=np.zeros(10000) #models predict every case as 0
pd.DataFrame(classification_report(disease_true,disease_preds,output_dict=True))
# -
# ## 4.2.2 Regression model evaluation metrics
#
# The one we're goin to cover are:
# 1. R^2 (pronounced r-squared) or coefficient of determination
# 2. Mean absolute error (MAE)
# 3. Mean squared error (MSE)
# +
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
X=housing_df.drop("target",axis=1)
y=housing_df["target"]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
model=RandomForestRegressor(n_estimators=100)
model.fit(X_train,y_train)
# -
model.score(X_test,y_test)
housing_df.head()
y_test[:5]
y_test.mean()
# +
from sklearn.metrics import r2_score
# fill an array with y_test mean
y_test_mean=np.full(len(y_test),y_test.mean())
# -
y_test_mean[:10]
r2_score(y_true=y_test,
y_pred=y_test_mean)
r2_score(y_true=y_test,
y_pred=y_test)
# **Mean absolute error (MAE)**
#
# MAE is the average of the absolute differences between predictions and actual values.
#
# It gives you idea of how wrong your models predictions are.
# +
# MAE
from sklearn.metrics import mean_absolute_error
y_preds=model.predict(X_test)
mae=mean_absolute_error(y_test,y_preds)
mae
# -
y_preds
y_test[:20]
df=pd.DataFrame(data={"actual values":y_test,
"predicted values":y_preds})
df["differences"]=df["predicted values"] -df["actual values"]
df.head()
df["differences"].mean()
#MAE using formulas and differences
np.abs(df.differences).mean()
# Get California Housing dataset
from sklearn.datasets import fetch_california_housing
housing=fetch_california_housing()
import pandas as pd
housing_df=pd.DataFrame(housing["data"],columns=housing["feature_names"])
# +
from sklearn.ensemble import RandomForestRegressor
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(42)
housing_df["target"]=housing["target"]
X=housing_df.drop("target",axis=1)
y=housing_df["target"]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
model=RandomForestRegressor(n_estimators=100)
model.fit(X_train,y_train)
# -
# **Mean squared error (MSE)**
#
# MSE is the mean of the squared of the errors between actual and predicted values.
# +
# Mean squared error
from sklearn.metrics import mean_squared_error
y_preds=model.predict(X_test)
mse=mean_squared_error(y_test,y_preds)
mse
# -
df["squared differences"]=np.square(df["differences"])
df.head()
# calculate msi by hand
squared=np.square(df["differences"])
squared.mean()
df.iloc[0]["squared differences"].mean()
# ### 4.2.3 Finally using the scoring parameter
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
heart_disease=pd.read_csv("heart-disease.csv")
np.random.seed(42)
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
clf=RandomForestClassifier(n_estimators=100)
# +
np.random.seed(42)
#cross validation accuracy
cv_acc=cross_val_score(clf,X,y,cv=5,scoring=None) #if scoring =None, estimator's default scoring evaluating metric is used(accuracy for classification models)
cv_acc
# -
# cross validation accuracy
print(f"The cross validated accuracy is: {np.mean(cv_acc)*100:.2f}%")
np.random.seed(42)
cv_acc=cross_val_score(clf,X,y,cv=5, scoring="accuracy")
cv_acc
# # Precision
np.random.seed(42)
cv_precision=cross_val_score(clf,X,y,cv=5, scoring="precision")
cv_precision
print(f"The cross-validated precision is: {np.mean(cv_precision)}")
# # Recall
np.random.seed(42)
cv_recall=cross_val_score(clf,X,y,cv=5,scoring="recall")
cv_recall
print(f"The cross-validated recall is: {np.mean(cv_recall)}")
# Let's see the scoring parameter being using for a regression problem...
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
X=housing_df.drop("target",axis=1)
y=housing_df["target"]
model=RandomForestRegressor(n_estimators=100)
# -
np.random.seed(42)
cv_r2=cross_val_score(model,X,y,cv=3,scoring=None)
np.mean(cv_r2)
cv_r2
# Mean squared error
cv_mse=cross_val_score(model,X,y,cv=3,scoring="neg_mean_squared_error")
np.mean(cv_mse)
cv_mse
# Mean absolute error
cv_mae=cross_val_score(model,X,y,cv=3,scoring="neg_mean_absolute_error")
np.mean(cv_mae)
cv_mae
# ### 4.3 Using different evaluation metrics as Scikit-learn functions
#
# The 3rd way to evaluate scikit learn machine learning models/estimators is to using 'sklearn.metrics' module
# +
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import numpy as np
np.random.seed(42)
#create X and y
heart_disease=pd.read_csv("heart-disease.csv")
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
#split the data
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
#create model
clf=RandomForestClassifier()
#fit the model
clf.fit(X_train,y_train)
#make predictions
y_preds=clf.predict(X_test)
#evaluate model using evaluation functions
print("Classifier metrics on the test set")
print(f"Accuracy: {accuracy_score(y_test,y_preds)*100:.2f}%")
print(f"Precison: {precision_score(y_test,y_preds)*100:.2f}%")
print(f"Recall: {recall_score(y_test,y_preds)*100:.2f}%")
print(f"F1: {f1_score(y_test,y_preds)*100:.2f}%")
# +
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
np.random.seed(42)
#create X and y
X=heart_disease.drop("target",axis=1)
y=heart_disease["target"]
#split the data
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
#create model
model=RandomForestRegressor()
#fit the model
model.fit(X_train,y_train)
#make predictions
y_preds=model.predict(X_test)
#evaluate model using evaluation functions
print("Regression metrics on the test set")
print(f"R2 score: {r2_score(y_test,y_preds)}")
print(f"MAE: {mean_absolute_error(y_test,y_preds)}")
print(f"MSE: {mean_squared_error(y_test,y_preds)}")
# -
# ## 5. Improving a model
#
# First predictions=baseline predictions.
# First model = baseline model.
#
# From a data perspective
# * Could we collect more data? (generally, the more data, the better)
# * Could we improve our data?
#
# From a model perspective:
# * Is there a better model we could use?
# * Could we improve the current model?
#
# Hyperparameters vs. parameters
# * Parameters = model find these patterns on data
# * Hyperparameters = setting from on a model you can adjust to (potencially) improve its ability to find pattern
#
# Three ways to adjust hyperparameters:
# 1. By hand
# 2. Randomly with RandomSearchCV
# 3. Exhaustively with GridSearchCV
from sklearn.ensemble import RandomForestClassifier
clf=RandomForestClassifier()
clf.get_params()
# ### 5.1 Turning hyperparameters by hand
#
# Let's make 3 sets, training, validation and test.
clf.get_params()
# We're going to try and adjust.
#
# * `max_depth`
# * `max_features`
# * `min_samples_leaf`
# * `min_samples_split`
# * `n_estimators`
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score
def evaluate_preds(y_true,y_preds):
"""
Performs evaluation comparison on y_true labels vs. y_pred labels
"""
accuracy=accuracy_score(y_true,y_preds)
precision=precision_score(y_true,y_preds)
recall=recall_score(y_true,y_preds)
f1=f1_score(y_true,y_preds)
metric_dict={"accuracy":round(accuracy,2),
"precision":round(precision,2),
"recall":round(recall,2),
"f1":round(f1,2)}
print(f"Acc:{accuracy*100:.2f}%")
print(f"Precision: {precision*100:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}")
return metric_dict
from sklearn.model_selection import train_test_split
# +
import pandas as pd
heart_disease=pd.read_csv("heart-disease.csv")
heart_disease.head()
# +
from sklearn.ensemble import RandomForestClassifier
import numpy as np
np.random.seed(42)
# shuffle the data
heart_disease_shuffled=heart_disease.sample(frac=1)
# split into X & y
X=heart_disease_shuffled.drop("target",axis=1)
y=heart_disease_shuffled["target"]
# split the data into train, validation
train_split=round(0.7*len(heart_disease_shuffled)) # 70% of data
valid_split=round(train_split+0.15*len(heart_disease_shuffled)) # 15% of data
X_train,y_train=X[:train_split],y[:train_split]
X_valid,y_valid=X[train_split:valid_split],y[train_split:valid_split]
X_test,y_test=X[valid_split:],y[valid_split:]
len(X_train),len(X_valid),len(X_test)
clf=RandomForestClassifier()
clf.fit(X_train,y_train)
# Make baseline predictions
y_preds= clf.predict(X_valid)
# Evaluate the classifier on validation set
baseline_metrics=evaluate_preds(y_valid,y_preds)
baseline_metrics
# +
np.random.seed(42)
clf_2=RandomForestClassifier(n_estimators=100)
clf_2.fit(X_train,y_train)
# make predicitions
y_preds2=clf_2.predict(X_valid)
# evaluate the second classifier
clf_2_metrics=evaluate_preds(y_valid,y_preds2)
# -
# ### 5.2 Hyperparameters tuning with RandomizedSearchCV
# +
from sklearn.model_selection import RandomizedSearchCV
import numpy as np
grid={"n_estimators":[10,100,200,500,1000,1200],
"max_depth":[None,5,10,20,30],
"max_features":["auto","sqrt"],
"min_samples_split":[2,4,6],
"min_samples_leaf":[1,2,6]}
np.random.seed(42)
# split into X & y
X=heart_disease_shuffled.drop("target",axis=1)
y=heart_disease_shuffled["target"]
# split into train and test sets
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
# Instantiate RandomForestClassifier
clf=RandomForestClassifier(n_jobs=1)
# setup RandomizedSearchCV
rs_clf=RandomizedSearchCV(estimator=clf,
param_distributions=grid,
n_iter=10,
cv=5,
verbose=2)
# fit the RandomizedSearchCV version of clf
rs_clf.fit(X_train,y_train)
# -
rs_clf.best_params_
# +
# make predictions with the best hyperparameters
rs_y_preds=rs_clf.predict(X_test)
# EVALUATE The predictions
rs_metrics=evaluate_preds(y_test,rs_y_preds)
# -
# ### 5.3 Hyperparameters tuning with GridSearchCV
grid
grid_2={"n_estimators":[100,200,500],
"max_depth":[None],
"max_features":["auto","sqrt"],
"min_samples_split":[6],
"min_samples_leaf":[1,2]}
# +
from sklearn.model_selection import GridSearchCV, train_test_split
np.random.seed(42)
# split into X & y
X=heart_disease_shuffled.drop("target",axis=1)
y=heart_disease_shuffled["target"]
# split into train and test sets
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
# Instantiate RandomForestClassifier
clf=RandomForestClassifier(n_jobs=1)
# setup GridSearchCV
gs_clf=GridSearchCV(estimator=clf,
param_grid=grid_2,
cv=5,
verbose=2)
# fit the GridSearchCV version of clf
gs_clf.fit(X_train,y_train)
# -
gs_clf.best_params_
# +
gs_y_preds=gs_clf.predict(X_test)
# evaluate the predictions
gs_metrics=evaluate_preds(y_test,gs_y_preds)
# -
# Let's compare our different models metric
import matplotlib.pyplot as plt
compare_metrics=pd.DataFrame({"baseline":baseline_metrics,
"clf_2":clf_2_metrics,
"random search":rs_metrics,
"grid search":gs_metrics})
compare_metrics.plot.bar(figsize=(10,8));
# ## 6 Saving and loading machine learning models
#
# Two ways to save and load machine learning models:
# 1. With python's `pickle` module
# 2. With the `joblib` module
#
# **Pickle**
# +
import pickle
# save an existing model to file
pickle.dump(gs_clf,open("gs_random_random_forest_model_1.plk","wb"))
# -
# laod a save model
loaded_pickle_model=pickle.load(open("gs_random_random_forest_model_1.plk","rb"))
# make some predictions
pickle_y_preds=loaded_pickle_model.predict(X_test)
evaluate_preds(y_test,pickle_y_preds)
# **Joblib**
# +
from joblib import dump,load
# save model to file
dump(gs_clf,filename="gs_random_forest_model_1.joblib")
# -
# import a save joblib model
loaded_job_model=load(filename="gs_random_forest_model_1.joblib")
# make and evaluate joblib predictions
joblib_y_preds=loaded_job_model.predict(X_test)
evaluate_preds(y_test,joblib_y_preds)
# ## 7. Putting all together!
data=pd.read_csv("car-sales-extended-missing-data.csv")
data.head()
data.dtypes
data.isna().sum()
# Steps we want to do (all in one cell):
# 1. Fill missing data
# 2. Convert data to numbers
# 3. Build a model on data
# +
# getting data ready
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
# modelling
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split,GridSearchCV
# setup random seed
import numpy as np
np.random.seed(42)
# import data and drop rows with the missing labels
data=pd.read_csv("car-sales-extended-missing-data.csv")
data.dropna(subset=["Price"],inplace=True)
# define different features on transformer pipeline
categorical_features=["Make","Colour"]
categorical_transformer=Pipeline(steps=[
("imputer",SimpleImputer(strategy="constant",fill_value="missing")),
("onehot",OneHotEncoder(handle_unknown="ignore"))])
door_feature=["Doors"]
door_transformer=Pipeline(steps=[
("imputer",SimpleImputer(strategy="constant",fill_value=4))
])
numerical_features=["Odometer (KM)"]
numerical_transformer=Pipeline(steps=[
("imputer",SimpleImputer(strategy="mean"))
])
# setup the preprocessing steps(fill missing values, then convert to numbers)
preprocessor=ColumnTransformer( transformers=[
("cat",categorical_transformer,categorical_features),
("door",door_transformer,door_feature),
("num",numerical_transformer,numerical_features)
])
# create a preprocessing and modelling pipeline
model=Pipeline(steps=[("preprocessor",preprocessor),
("model",RandomForestRegressor())])
# split data
X = data.drop("Price",axis=1)
y = data["Price"]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
# fit and score model
model.fit(X_train,y_train)
model.score(X_test,y_test)
# -
data.isna().sum()
# it's also possible to use `GridSearchCV` or `RandomizedSearchCV` with our `Pipeline`
# +
# use GridSearchCV with our regression Pipeline
from sklearn.model_selection import GridSearchCV
pipe_grid={
"preprocessor__num__imputer__strategy":["mean","median"],
"model__n_estimators":[100,1000],
"model__max_depth":[None,5],
"model__max_features":["auto"],
"model__min_samples_split":[2,4]
}
gs_model=GridSearchCV(model,pipe_grid,cv=5,verbose=2)
gs_model.fit(X_train,y_train)
# -
gs_model.score(X_test,y_test)
# +
import pickle
# save an existing model to file
pickle.dump(gs_model,open("final_example.plk","wb"))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MySQL 테이블을 생성하고 데이터를 넣기
# +
import os
import pandas as pd
import MySQLdb
# +
DATABASE_HOST = "localhost" # "localhost" == "127.0.0.1"
DATABASE_USERNAME = os.environ.get("DATABASE_USERNAME", "YOUR_USERNAME")
DATABASE_PASSWORD = os.environ.get("DATABASE_PASSWORD", "YOUR_PASSWORD")
DATABASE_NAME = "fastcampus_data_science_db"
# -
connection = MySQLdb.connect(
DATABASE_HOST,
DATABASE_USERNAME,
DATABASE_PASSWORD,
DATABASE_NAME,
charset='utf8',
)
# +
connection
cursor = connection.cursor()
# +
SQL_QUERY = """
CREATE TABLE IF NOT EXISTS fastroom
(
email varchar(255),
phonenumber varchar(255),
address varchar(255),
deposit int,
rent int
);
"""
cursor.execute(SQL_QUERY)
# +
SQL_QUERY = """
SHOW TABLES;
"""
cursor.execute(SQL_QUERY)
cursor.fetchall()
# +
SQL_QUERY = """
SELECT *
FROM fastroom;
"""
pd.read_sql(SQL_QUERY, connection)
# +
# 지금까지 추가된 row 데이터 제거
# SQL_QUERY
SQL_QUERY = """
DELETE FROM fastroom;
"""
cursor.execute(SQL_QUERY)
# +
SQL_QUERY = """
INSERT INTO fastroom (email, phonenumber, address, deposit, rent)
VALUES (
"dobestan@gmail.com",
"010-2220-5736",
"서울시 강남구 논현1동 2-9 대기빌딩 1,4층",
1000,
50
);
"""
cursor.execute(SQL_QUERY)
# +
SQL_QUERY = """
SELECT *
FROM fastroom;
"""
pd.read_sql(SQL_QUERY, connection)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
types = {
'age': np.float64,
'bp': np.float64,
'sg': 'category',
'al': 'category',
'su': 'category',
'rbc': 'category',
'pc': 'category',
'pcc': 'category',
'ba': 'category',
'bgr': np.float64,
'bu': np.float64,
'sc': np.float64,
'sod': np.float64,
'pot': np.float64,
'hemo': np.float64,
'pcv': np.float64,
'wbcc': np.float64,
'rbcc': np.float64,
'htn': 'category',
'dm': 'category',
'cad': 'category',
'appet': 'category',
'pe': 'category',
'ane': 'category',
'class': 'category',
}
df = pd.read_csv(
'kd.csv',
sep=',\s*',
na_values='?',
dtype=types,
)
ckd = df[ df['class']=='ckd' ]
assert len(ckd) == 250, 'different number of affected patients than claimed'
# Basic statistics about patients with CKD.
ckd.describe()
# Correlation matrix for patients with CKD.
# +
import matplotlib.pyplot as plt
# correlation matrix
cm = ckd.corr()
# plot correlation matrix
labels = cm.axes[0]
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,11,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
plt.xticks(rotation=60)
plt.show()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import rosbag
import pymap3d as pm
from scipy.signal import savgol_filter
# %matplotlib inline
def wrap_angle(angle):
return (angle + np.pi) % (2 * np.pi) - np.pi
# +
def wrap_angle(angle):
return (angle + np.pi) % (2 * np.pi) - np.pi
def to_euler(x, y, z, w):
"""Return as xyz (roll pitch yaw) Euler angles."""
roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x**2 + y**2))
pitch = np.arcsin(2 * (w * y - z * x))
yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y**2 + z**2))
return np.array([roll, pitch, yaw])
# -
# ls
bag = rosbag.Bag('waypoint_u_turn_lab_kanan.bag')
bag.get_type_and_topic_info()
# # CHECK
lat0, lon0, h0 = -6.8712, 107.5738, 768
gps_t = []
gps_pos = []
gps_cov = []
for topic, msg, _ in bag.read_messages(topics=['/fix']):
gps_t.append(msg.header.stamp.to_sec())
gps_pos.append(pm.geodetic2enu(msg.latitude, msg.longitude, msg.altitude, lat0, lon0, h0))
gps_cov.append(msg.position_covariance)
gps_t = np.array(gps_t)
gps_pos = np.array(gps_pos)
gps_cov = np.array(gps_cov).reshape(-1,3,3)
# Ambil sb. x & sb. y saja dan ga ampe abis
#limm = 300 # u_turn_kanan_2
limm=-1 # u_turn_kiri_1
X_fm = (-1) * np.copy(gps_pos[:limm,:2])
ttime = np.copy(gps_t[:limm])
lim = [0, -1]
plt.scatter(X_fm[lim[0]:lim[1],0], X_fm[lim[0]:lim[1],1], s=3.)
plt.plot(X_fm[:,0], X_fm[:,1])
# +
# Filter the waypoints and remove the outlier !
X_in = np.copy(X_fm)
t_in = np.copy(ttime)
for m in range(6):
X_in_filtered = np.empty(X_in.shape)
dt = np.empty(X_in.shape[0])
dt[:-1] = t_in[1:] - t_in[:-1]
dt[-1] = dt[-2]
for i in range(2):
X_in_filtered[:,i] = savgol_filter(X_in[:,i], 51, 3)
dist = np.sqrt(np.sum((X_in_filtered[:, :2] - X_in[:, :2])**2, axis=-1))
X_in = X_in[dist <= 0.5, :]
t_in = t_in[dist <= 0.5]
if m % 1 == 0:
plt.scatter(X_fm[:,0], X_fm[:,1], label='Raw Data', s=0.5,color='red')
plt.plot(X_in[:,0], X_in[:,1], label='Outlier Rejection')
plt.legend()
plt.title('Iterasi ke-{}'.format(m))
plt.show()
# -
X_in_filtered = np.empty(X_in.shape)
for i in range(2):
X_in_filtered[:,i] = savgol_filter(X_in[:,i], 51, 3)
plt.plot(X_in_filtered[:,0], X_in_filtered[:,1])
plt.scatter(X_fm[:,0], X_fm[:,1], color='red',s=0.5)
dst = 0.5
wp = [X_in[0]]
for i in range(1, X_in.shape[0]):
while True:
dist = np.linalg.norm(X_in[i] - wp[-1])
if dist >= dst:
wp.append(wp[-1] + dst*(X_in[i] - wp[-1])/dist)
else:
break
wp = np.array(wp)
plt.scatter(wp[:,0], wp[:,1], s=2.)
# +
num_f = 15
dst = 0.5
wp = [X_in[0]]
for i in range(1, X_in.shape[0]):
while True:
dist = np.linalg.norm(X_in[i] - wp[-1])
if dist >= dst:
wp.append(wp[-1] + dst*(X_in[i] - wp[-1])/dist)
else:
break
if dist >= dst:
wp.append(wp[-1] + dst*(X_in[i] - wp[-1])/dist)
wp = np.array(wp)
print("YES")
for i in range(2):
wp[:,i] = savgol_filter(wp[:,i], num_f, 3)
print("YES")
yaw = np.empty(wp.shape[0])
yaw[1:] = np.arctan2(wp[1:, 1] - wp[:-1, 1], wp[1:, 0] - wp[:-1, 0])
yaw[0] = yaw[1];
yawf = wrap_angle(savgol_filter(np.unwrap(yaw), num_f, 3))
s = np.zeros(wp.shape[0])
for i in range(1, s.shape[0]):
s[i] = s[i-1] + np.linalg.norm(wp[i] - wp[i-1])
curvature = np.gradient(np.unwrap(yawf), s)
curvaturef = savgol_filter(curvature, num_f, 3)
steer = np.arctan(2.5 * curvaturef)*180/np.pi
steerf = savgol_filter(steer, num_f, 3)
steerf = np.copy(steer)
# Limiting the steering angle
steerf[steerf > 35.] = 35
steerf[steerf < -28.] = -28.
width = 15
height = 15
plt.figure(figsize=(width, height))
plt.subplot(2,2,1)
plt.plot(wp[:,0], wp[:,1], label='Processed')
plt.scatter(X_fm[:,0], X_fm[:,1], color='red',s=2., label='RAW')
plt.xlabel("X (m)")
plt.ylabel("Y (m)")
plt.legend()
plt.title("PATH")
plt.subplot(2,2,2)
plt.plot(s, yaw)
plt.plot(s, yawf, label='post filtered')
plt.title("YAW")
plt.xlabel('s (m)')
plt.ylabel('rad')
plt.legend()
plt.subplot(2,2,3)
plt.plot(s, curvature)
plt.plot(s, curvaturef, label='post filtered')
plt.legend()
plt.title("CURVATURE")
plt.xlabel('s (m)')
plt.ylabel(r'$\kappa\;(1/m)$')
plt.subplot(2,2,4)
plt.plot(s, steer)
plt.plot(s, steerf, label='post filtered')
plt.legend()
plt.title("FEED FORWARD STEERING ANGLE")
plt.xlabel('s (m)')
plt.ylabel(r'$(\degree)$')
plt.savefig('waypoints.png', dpi=600, transparent=True)
plt.show()
# -
# # INTERPOLATING THE WAYPOINTS
# Generate the velocity
v0 = 0.3 #m/s ~ 1.8 km/jam
vmax = 1.5 #m/s ~ 5.4 km/jam
accel = 0.2 #m/s2
num = wp.shape[0]
vel = [v0]
for i in range(1, num):
vel.append(min(np.sqrt(vel[-1]**2 + 2*accel*dst), vmax))
vel = np.array(vel)
for i in range(num):
if s[i] >= 36:
vel[i] = 0.0
elif s[i] >= 25:
vel[i] = max(np.sqrt(max(vel[i-1]**2 - 2*accel*dst, 0.0)), v0)
waypoints_np = np.empty((wp.shape[0], 5))
waypoints_np[:,0] = wp[:,0]
waypoints_np[:,1] = wp[:,1]
waypoints_np[:,2] = yawf
waypoints_np[:,3] = vel
waypoints_np[:,4] = curvaturef
plt.plot(s, waypoints_np[:,3])
# +
INTERP_DISTANCE_RES = 0.05 # distance between interpolated points
# Linear interpolation computations
# Compute a list of distances between waypoints
wp_distance = [] # distance array
for i in range(1, waypoints_np.shape[0]):
wp_distance.append(
np.sqrt((waypoints_np[i, 0] - waypoints_np[i-1, 0])**2 +
(waypoints_np[i, 1] - waypoints_np[i-1, 1])**2))
wp_distance.append(0) # last distance is 0 because it is the distance
# from the last waypoint to the last waypoint
# Linearly interpolate between waypoints and store in a list
wp_interp = [] # interpolated values
# (rows = waypoints, columns = [x, y, v])
wp_interp_hash = [] # hash table which indexes waypoints_np
# to the index of the waypoint in wp_interp
interp_counter = 0 # counter for current interpolated point index
for i in range(waypoints_np.shape[0] - 1):
# Add original waypoint to interpolated waypoints list (and append
# it to the hash table)
wp_interp.append(list(waypoints_np[i]))
wp_interp_hash.append(interp_counter)
interp_counter+=1
# Interpolate to the next waypoint. First compute the number of
# points to interpolate based on the desired resolution and
# incrementally add interpolated points until the next waypoint
# is about to be reached.
num_pts_to_interp = int(np.floor(wp_distance[i] /\
float(INTERP_DISTANCE_RES)) - 1)
wp_vector = waypoints_np[i+1] - waypoints_np[i]
wp_uvector = wp_vector / np.linalg.norm(wp_vector)
for j in range(num_pts_to_interp):
next_wp_vector = INTERP_DISTANCE_RES * float(j+1) * wp_uvector
wp_interp.append(list(waypoints_np[i] + next_wp_vector))
interp_counter+=1
# add last waypoint at the end
wp_interp.append(list(waypoints_np[-1]))
wp_interp_hash.append(interp_counter)
wp_interp = np.array(wp_interp)
wp_interp_hash = np.array(wp_interp_hash)
interp_counter+=1
# -
#np.save('waypoints_u_turn_kanan_2', waypoints_np)
np.save('waypoints_u_lab_kanan', waypoints_np)
np.save('waypoints_u_lab_kanan_interp', wp_interp)
plt.plot(wp_interp[:,0], wp_interp[:,1])
# # CHECK
wp_interp.shape
waypoints_np[50]
wp_interp[60]
bag = rosbag.Bag('waypoint_u_turn_lab_kiri.bag')
bag.get_type_and_topic_info()
# # CHECK
lat0, lon0, h0 = -6.8712, 107.5738, 768
gps_t = []
gps_pos = []
gps_cov = []
for topic, msg, _ in bag.read_messages(topics=['/fix']):
gps_t.append(msg.header.stamp.to_sec())
gps_pos.append(pm.geodetic2enu(msg.latitude, msg.longitude, msg.altitude, lat0, lon0, h0))
gps_cov.append(msg.position_covariance)
gps_t = np.array(gps_t)
gps_pos = np.array(gps_pos)
gps_cov = np.array(gps_cov).reshape(-1,3,3)
# Ambil sb. x & sb. y saja dan ga ampe abis
#limm = 300 # u_turn_kanan_2
limm=-1 # u_turn_kiri_1
X_fm = (-1) * np.copy(gps_pos[:limm,:2])
ttime = np.copy(gps_t[:limm])
plt.scatter(X_fm[:,0], X_fm[:,1], s=2.) # u_turn_kiri
plt.plot(wp_interp[:,0], wp_interp[:,1]) # u_turn_kanan
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# %load_ext autoreload
# %autoreload 2
# %load_ext pycodestyle_magic
# # Matrix multiplication in CKKS
#
# This notebook implements the paper https://eprint.iacr.org/2018/1041.pdf which allows to perform matrix multiplication with $\mathcal{O}(d)$ operations on matrices of size $d \times d$.
# export
import numpy as np
import tenseal.sealapi as seal
from typing import List
# To perform matrix multiplication we must implement 4 matrix operators :
#
# - $\sigma(A)_{i,j} = A_{i,i+j}$
# - $\tau(A)_{i,j} = A_{i+j,j}$
# - $\phi(A)_{i,j} = A_{i,j+1}$
# - $\psi(A)_{i,j} = A_{i+1,j}$
# ## Base class
def sigma_diagonal_vector(d: int, k:int) -> np.array:
"""Creates the k-th diagonal for the sigma operator
for matrices of dimension dxd."""
u = np.arange(d**2)
if k >= 0:
index = (u - d*k >= 0) & (u < d*k + d - k)
else:
index = (u - d*(d+k) >= -k ) & (u - d*(d+k)< d)
u[index] = 1
u[~index] = 0
return u
def tau_diagonal_vector(d: int, k:int) -> np.array:
"""Creates the k-th diagonal for the tau operator
for matrices of dimension dxd."""
u = np.zeros(d**2)
for i in range(d):
l = (k + d * i)
u[l] = 1
return u
def row_diagonal_vector(d,k):
v_k = np.arange(d**2)
index = (v_k % d) < (d - k)
v_k[index] = 1
v_k[~index] = 0
v_k_d = np.arange(d**2)
index = ((v_k_d % d) >= (d -k)) & ((v_k_d % d) < d)
v_k_d[index] = 1
v_k_d[~index] = 0
return v_k, v_k_d
def column_diagonal_vector(d,k):
v_k = np.ones(d**2)
return v_k
class MatrixMultiplicator:
"""Base class to create a matrix multiplicator operator."""
def __init__(self, d, create_zero, sigma_diagonal_vector, tau_diagonal_vector,
row_diagonal_vector, column_diagonal_vector,
rotate=None, add=None, pmult=None, cmult=None):
self.d = d
self.create_zero = create_zero
self.sigma_diagonal_vector = sigma_diagonal_vector
self.tau_diagonal_vector = tau_diagonal_vector
self.row_diagonal_vector = row_diagonal_vector
self.column_diagonal_vector = column_diagonal_vector
if not rotate:
rotate = lambda x,k: np.roll(x, -k)
if not add:
add = lambda x,y: x+y
if not pmult:
pmult = lambda x,y: x*y
if not cmult:
cmult = lambda x,y: x*y
self.rotate, self.add, self.pmult, self.cmult = rotate, add, pmult, cmult
def sigma_lin_transform(self, input):
sigma = []
d = self.d
for k in range(-d+1,d):
sigma.append(self.sigma_diagonal_vector(d,k))
output = self.create_zero()
for sigma_vector,k in zip(sigma,range(-d+1,d)):
output = self.add(output, self.pmult(self.rotate(input,k), sigma_vector))
return output
def tau_lin_transform(self, input):
tau = []
d = self.d
for k in range(d):
tau.append(self.tau_diagonal_vector(d,k))
output = self.create_zero()
for tau_vector,k in zip(tau,range(d)):
output = self.add(output, self.pmult(self.rotate(input,k * d), tau_vector))
return output
def row_lin_transform(self, input, k):
d = self.d
v_k, v_k_d = self.row_diagonal_vector(d, k)
output = self.create_zero()
output = self.add(output, self.pmult(self.rotate(input, k), v_k))
output = self.add(output, self.pmult(self.rotate(input, k-d), v_k_d))
return output
def column_lin_transform(self, input, k):
d = self.d
v_k = self.column_diagonal_vector(d, k)
output = self.create_zero()
output = self.add(output, self.pmult(self.rotate(input, d*k),v_k))
return output
def matmul(self, A, B):
d = self.d
sigma_A = self.create_zero()
sigma_A = self.sigma_lin_transform(A)
tau_B = self.create_zero()
tau_B = self.tau_lin_transform(B)
output = self.cmult(sigma_A, tau_B)
for k in range(1,d):
shift_A = self.row_lin_transform(sigma_A, k)
shift_B = self.column_lin_transform(tau_B, k)
output = self.add(output, self.cmult(shift_A, shift_B))
return output
# +
def encode_matrices_to_vector(matrix):
shape = matrix.shape
assert len(shape) == 3, "Non tridimensional tensor"
assert shape[1] == shape[2], "Non square matrices"
g = shape[0]
d = shape[1]
n = g * (d ** 2)
output = np.zeros(n)
for l in range(n):
k = l % g
i = (l // g) // d
j = (l // g) % d
output[l] = matrix[k,i,j]
return output
def decode_vector_to_matrices(vector, d):
n = len(vector)
g = n // (d ** 2)
output = np.zeros((g, d, d))
for k in range(g):
for i in range(d):
for j in range(d):
output[k,i,j] = vector[g * (d*i + j) +k]
return output
# +
def encode_matrix_to_vector(matrix: np.array) -> np.array:
"""Encodes a d*d matrix to a vector of size d*d"""
shape = matrix.shape
assert len(shape) == 2 and shape[0] == shape[1], "Non square matrix"
d = shape[0]
output = np.zeros(d**2)
for l in range(d**2):
i = l // d
j = l % d
output[l] = matrix[i,j]
return output
def decode_vector_to_matrix(vector):
n = len(vector)
d = np.sqrt(n)
assert len(vector.shape) == 1 and d.is_integer(), "Non square matrix"
d = int(d)
output = np.zeros((d,d))
for i in range(d):
for j in range(d):
output[i,j] = vector[d*i + j]
return output
# -
def weave(vector, g):
output = np.zeros(len(vector) * g)
for i in range(len(vector)):
output[i*g:(i+1)*g] = vector[i]
return output
# +
d = 3
A = np.random.randn(d**2)
B = np.random.randn(d**2)
create_zero = lambda : np.zeros(d**2)
mm = MatrixMultiplicator(d, create_zero, sigma_diagonal_vector, tau_diagonal_vector,
row_diagonal_vector, column_diagonal_vector)
# -
l2_error = lambda x,y : ((x - y) ** 2).mean()
# +
expected = encode_matrix_to_vector(np.matmul(decode_vector_to_matrix(A),decode_vector_to_matrix(B)))
predicted = mm.matmul(A,B)
l2_error(expected,predicted)
# +
d = 3
g = 2
A = np.random.randn(d**2)
B = np.random.randn(d**2)
C = np.concatenate([decode_vector_to_matrix(A).reshape(1,d,d)]*g)
C = encode_matrices_to_vector(C)
D = np.concatenate([decode_vector_to_matrix(B).reshape(1,d,d)]*g)
D = encode_matrices_to_vector(D)
parallel_sigma_diagonal_vector = lambda d,k: weave(sigma_diagonal_vector(d,k),g)
parallel_tau_diagonal_vector = lambda d,k: weave(tau_diagonal_vector(d,k),g)
parallel_row_diagonal_vector = lambda d,k: [weave(vector,g) for vector in row_diagonal_vector(d,k)]
parallel_column_diagonal_vector = lambda d,k: weave(column_diagonal_vector(d,k),g)
parallel_create_zero = lambda: np.zeros(g * (d** 2))
parallel_rotate = lambda x,k: np.roll(x, -(k * g))
pmm = MatrixMultiplicator(d,parallel_create_zero , parallel_sigma_diagonal_vector, parallel_tau_diagonal_vector,
parallel_row_diagonal_vector, parallel_column_diagonal_vector, parallel_rotate)
# +
expected = encode_matrices_to_vector(np.matmul(decode_vector_to_matrices(C, d), decode_vector_to_matrices(C, d)))
predicted = pmm.matmul(C,D)
l2_error(expected, predicted)
# -
decode_vector_to_matrix(mm.matmul(A,B))
decode_vector_to_matrices(pmm.matmul(C,D),d)
# +
import builtins
from cryptotree.seal_helper import print_ctx, print_ptx, create_seal_globals, append_globals_to_builtins
poly_modulus_degree = 8192
moduli = [40,30,30,30,40]
PRECISION_BITS = 30
create_seal_globals(globals(), poly_modulus_degree, moduli, PRECISION_BITS)
append_globals_to_builtins(globals(), builtins)
# +
d = 28
A = np.random.randn(d ** 2)
B = np.random.randn(d ** 2)
# +
ptx = seal.Plaintext()
encoder.encode(A, scale, ptx)
ctA = seal.Ciphertext()
encryptor.encrypt(ptx, ctA)
encoder.encode(B, scale, ptx)
ctB = seal.Ciphertext()
encryptor.encrypt(ptx, ctB)
# +
def get_vector(ctx):
ptx = seal.Plaintext()
decryptor.decrypt(ctx, ptx)
return np.array(encoder.decode_double(ptx))
def encode(vector):
ptx = seal.Plaintext()
encoder.encode(vector, scale, ptx)
return ptx
def encrypt(vector):
ptx = encode(vector)
ctx = seal.Ciphertext()
encryptor.encrypt(ptx, ctx)
return ctx
# +
def ckks_create_zero():
zero = np.zeros(encoder.slot_count())
ptx = seal.Plaintext()
encoder.encode(zero, scale, ptx)
ctx = seal.Ciphertext()
encryptor.encrypt(ptx, ctx)
return ctx
def ckks_rotate(ctx, k):
output = seal.Ciphertext()
evaluator.rotate_vector(ctx, k, galois_keys, output)
return output
def ckks_add(ctx1, ctx2):
output = seal.Ciphertext()
if not ctx1.parms_id() == ctx2.parms_id():
evaluator.mod_switch_to_inplace(ctx1, ctx2.parms_id())
evaluator.add(ctx1, ctx2, output)
return output
def ckks_pmult(ctx, ptx):
output = seal.Ciphertext()
if not ptx.parms_id() == ctx.parms_id():
evaluator.mod_switch_to_inplace(ptx, ctx.parms_id())
evaluator.multiply_plain(ctx, ptx, output)
evaluator.rescale_to_next_inplace(output)
output.scale = scale
return output
def ckks_cmult(ctx1, ctx2):
output = seal.Ciphertext()
if not ctx2.parms_id() == ctx1.parms_id():
evaluator.mod_switch_to_inplace(ctx2, ctx1.parms_id())
evaluator.multiply(ctx1, ctx2, output)
evaluator.rescale_to_next_inplace(output)
output.scale = scale
return output
ckks_sigma_diagonal_vector = lambda d,k: encode(sigma_diagonal_vector(d,k))
ckks_tau_diagonal_vector = lambda d,k: encode(tau_diagonal_vector(d,k))
ckks_row_diagonal_vector = lambda d,k: [encode(vector) for vector in row_diagonal_vector(d,k)]
ckks_column_diagonal_vector = lambda d,k: encode(column_diagonal_vector(d,k))
cmm = MatrixMultiplicator(d, ckks_create_zero, ckks_sigma_diagonal_vector, ckks_tau_diagonal_vector,
ckks_row_diagonal_vector, ckks_column_diagonal_vector, ckks_rotate, ckks_add,
ckks_pmult, ckks_cmult)
# -
mm = MatrixMultiplicator(d, create_zero, sigma_diagonal_vector, tau_diagonal_vector,
row_diagonal_vector, column_diagonal_vector)
predicted = cmm.matmul(ctA, ctB)
# +
expected = mm.matmul(A,B)
l2_error(get_vector(predicted)[:784], expected)
# +
d = 16
g = 16
A = np.random.randn(d ** 2)
B = np.random.randn(d ** 2)
C = np.concatenate([decode_vector_to_matrix(A).reshape(1,d,d)]*g)
C = encode_matrices_to_vector(C)
ctC = encrypt(C)
D = np.concatenate([decode_vector_to_matrix(B).reshape(1,d,d)]*g)
D = encode_matrices_to_vector(D)
ctD = encrypt(D)
parallel_ckks_sigma_diagonal_vector = lambda d,k: encode(weave(sigma_diagonal_vector(d,k),g))
parallel_ckks_tau_diagonal_vector = lambda d,k: encode(weave(tau_diagonal_vector(d,k),g))
parallel_ckks_row_diagonal_vector = lambda d,k: [encode(weave(vector,g)) for vector in row_diagonal_vector(d,k)]
parallel_ckks_column_diagonal_vector = lambda d,k: encode(weave(column_diagonal_vector(d,k),g))
parallel_ckks_rotate = lambda ctx,k: ckks_rotate(ctx, k*g)
pcmm = MatrixMultiplicator(d,ckks_create_zero , parallel_ckks_sigma_diagonal_vector, parallel_ckks_tau_diagonal_vector,
parallel_ckks_row_diagonal_vector, parallel_ckks_column_diagonal_vector, parallel_ckks_rotate,
ckks_add, ckks_pmult, ckks_cmult)
# -
2**13
# +
parallel_sigma_diagonal_vector = lambda d,k: weave(sigma_diagonal_vector(d,k),g)
parallel_tau_diagonal_vector = lambda d,k: weave(tau_diagonal_vector(d,k),g)
parallel_row_diagonal_vector = lambda d,k: [weave(vector,g) for vector in row_diagonal_vector(d,k)]
parallel_column_diagonal_vector = lambda d,k: weave(column_diagonal_vector(d,k),g)
parallel_create_zero = lambda: np.zeros(g * (d** 2))
parallel_rotate = lambda x,k: np.roll(x, -(k * g))
pmm = MatrixMultiplicator(d,parallel_create_zero , parallel_sigma_diagonal_vector, parallel_tau_diagonal_vector,
parallel_row_diagonal_vector, parallel_column_diagonal_vector, parallel_rotate)
pmm = MatrixMultiplicator(d,parallel_create_zero , parallel_sigma_diagonal_vector, parallel_tau_diagonal_vector,
parallel_row_diagonal_vector, parallel_column_diagonal_vector, parallel_rotate)
# -
predicted = pcmm.matmul(ctC,ctD)
# +
expected = pmm.matmul(C,D)
l2_error(get_vector(predicted), expected)
# -
((v - expected) ** 2).mean()
evaluator.rotate_vector_inplace(ctx, 1, galois_keys)
print_ctx(ctx)
# +
# export
from fastcore.test import test_close
def test_sum(x: List[float], evaluator, encoder, encryptor, decryptor, scale, eps=1e-2):
"""Tests if the output of the polynomial, defined by the coeffs, is the same
between the homomorphic evaluation and the regular one"""
n_slot = len(x)
ptx = seal.Plaintext()
encoder.encode(x, scale, ptx)
ctx = seal.Ciphertext()
encryptor.encrypt(ptx, ctx)
output = sum_reduce(ctx, evaluator, galois_keys, n_slot)
decryptor.decrypt(output, ptx)
values = encoder.decode_double(ptx)
homomorphic_output = values[0]
expected_output = np.sum(x)
test_close(homomorphic_output, expected_output, eps)
def test_dot_product_plain(x: List[float], y: List[float],
evaluator, encoder, encryptor, decryptor,
galois_keys,
scale, eps=1e-2):
"""Tests if the output of the polynomial, defined by the coeffs, is the same
between the homomorphic evaluation and the regular one"""
assert len(x) == len(y), f"x and y must have same length {len(x)} != {len(y)}"
n_slot = len(x)
ptx = seal.Plaintext()
encoder.encode(x, scale, ptx)
ctx = seal.Ciphertext()
encryptor.encrypt(ptx, ctx)
pty = seal.Plaintext()
encoder.encode(y, scale, pty)
output = dot_product_plain(ctx, pty, evaluator, galois_keys, n_slot)
decryptor.decrypt(output, ptx)
values = encoder.decode_double(ptx)
homomorphic_output = values[0]
expected_output = np.dot(x, y)
test_close(homomorphic_output, expected_output, eps)
# -
print_ctx(sum_reduce(ctx, evaluator, galois_keys, n_slot))
print_ctx(dot_product_plain(ctx, ptx, evaluator, galois_keys, n_slot))
test_sum([1,2], evaluator, encoder, encryptor, decryptor, scale)
test_sum([1,2,3], evaluator, encoder, encryptor, decryptor, scale)
test_dot_product_plain([1,2,3], [1,1,1], evaluator, encoder, encryptor, decryptor, galois_keys, scale)
test_dot_product_plain([1,2,3,5], [1,1,1,-6], evaluator, encoder, encryptor, decryptor, galois_keys, scale)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.